]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9-2.6.32.58-201203051840.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-2.6.32.58-201203051840.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index e1efc40..47f0daf 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -1,15 +1,19 @@
6 *.a
7 *.aux
8 *.bin
9 +*.cis
10 *.cpio
11 *.csp
12 +*.dbg
13 *.dsp
14 *.dvi
15 *.elf
16 *.eps
17 *.fw
18 +*.gcno
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -38,8 +42,10 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *_MODULES
32 +*_reg_safe.h
33 *_vga16.c
34 *~
35 *.9
36 @@ -49,11 +55,16 @@
37 53c700_d.h
38 CVS
39 ChangeSet
40 +GPATH
41 +GRTAGS
42 +GSYMS
43 +GTAGS
44 Image
45 Kerntypes
46 Module.markers
47 Module.symvers
48 PENDING
49 +PERF*
50 SCCS
51 System.map*
52 TAGS
53 @@ -76,7 +87,11 @@ btfixupprep
54 build
55 bvmlinux
56 bzImage*
57 +capability_names.h
58 +capflags.c
59 classlist.h*
60 +clut_vga16.c
61 +common-cmds.h
62 comp*.log
63 compile.h*
64 conf
65 @@ -84,6 +99,8 @@ config
66 config-*
67 config_data.h*
68 config_data.gz*
69 +config.c
70 +config.tmp
71 conmakehash
72 consolemap_deftbl.c*
73 cpustr.h
74 @@ -97,19 +114,23 @@ elfconfig.h*
75 fixdep
76 fore200e_mkfirm
77 fore200e_pca_fw.c*
78 +gate.lds
79 gconf
80 gen-devlist
81 gen_crc32table
82 gen_init_cpio
83 genksyms
84 *_gray256.c
85 +hash
86 +hid-example
87 ihex2fw
88 ikconfig.h*
89 initramfs_data.cpio
90 +initramfs_data.cpio.bz2
91 initramfs_data.cpio.gz
92 initramfs_list
93 kallsyms
94 -kconfig
95 +kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99 @@ -127,13 +148,16 @@ machtypes.h
100 map
101 maui_boot.h
102 mconf
103 +mdp
104 miboot*
105 mk_elfconfig
106 mkboot
107 mkbugboot
108 mkcpustr
109 mkdep
110 +mkpiggy
111 mkprep
112 +mkregtable
113 mktables
114 mktree
115 modpost
116 @@ -149,6 +173,7 @@ patches*
117 pca200e.bin
118 pca200e_ecd.bin2
119 piggy.gz
120 +piggy.S
121 piggyback
122 pnmtologo
123 ppc_defs.h*
124 @@ -157,12 +182,15 @@ qconf
125 raid6altivec*.c
126 raid6int*.c
127 raid6tables.c
128 +regdb.c
129 relocs
130 +rlim_names.h
131 series
132 setup
133 setup.bin
134 setup.elf
135 sImage
136 +slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140 @@ -171,6 +199,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144 +user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148 @@ -186,14 +215,20 @@ version.h*
149 vmlinux
150 vmlinux-*
151 vmlinux.aout
152 +vmlinux.bin.all
153 +vmlinux.bin.bz2
154 vmlinux.lds
155 +vmlinux.relocs
156 +voffset.h
157 vsyscall.lds
158 vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162 +utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168 +zoffset.h
169 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
170 index c840e7d..f4c451c 100644
171 --- a/Documentation/kernel-parameters.txt
172 +++ b/Documentation/kernel-parameters.txt
173 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
174 the specified number of seconds. This is to be used if
175 your oopses keep scrolling off the screen.
176
177 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
178 + virtualization environments that don't cope well with the
179 + expand down segment used by UDEREF on X86-32 or the frequent
180 + page table updates on X86-64.
181 +
182 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
183 +
184 pcbit= [HW,ISDN]
185
186 pcd. [PARIDE]
187 diff --git a/Makefile b/Makefile
188 index ed78982..bcc432e 100644
189 --- a/Makefile
190 +++ b/Makefile
191 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
192
193 HOSTCC = gcc
194 HOSTCXX = g++
195 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
196 -HOSTCXXFLAGS = -O2
197 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
198 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
199 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
200
201 # Decide whether to build built-in, modular, or both.
202 # Normally, just do built-in.
203 @@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
204 # Rules shared between *config targets and build targets
205
206 # Basic helpers built in scripts/
207 -PHONY += scripts_basic
208 -scripts_basic:
209 +PHONY += scripts_basic gcc-plugins
210 +scripts_basic: gcc-plugins
211 $(Q)$(MAKE) $(build)=scripts/basic
212
213 # To avoid any implicit rule to kick in, define an empty command.
214 @@ -403,7 +404,7 @@ endif
215 # of make so .config is not included in this case either (for *config).
216
217 no-dot-config-targets := clean mrproper distclean \
218 - cscope TAGS tags help %docs check% \
219 + cscope gtags TAGS tags help %docs check% \
220 include/linux/version.h headers_% \
221 kernelrelease kernelversion
222
223 @@ -526,6 +527,48 @@ else
224 KBUILD_CFLAGS += -O2
225 endif
226
227 +ifndef DISABLE_PAX_PLUGINS
228 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
229 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
230 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
231 +endif
232 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
233 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
234 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
235 +endif
236 +ifdef CONFIG_KALLOCSTAT_PLUGIN
237 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
238 +endif
239 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
240 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
241 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
242 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
243 +endif
244 +ifdef CONFIG_CHECKER_PLUGIN
245 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
246 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
247 +endif
248 +endif
249 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS) $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS)
250 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
251 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
252 +ifeq ($(KBUILD_EXTMOD),)
253 +gcc-plugins:
254 + $(Q)$(MAKE) $(build)=tools/gcc
255 +else
256 +gcc-plugins: ;
257 +endif
258 +else
259 +gcc-plugins:
260 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
261 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
262 +else
263 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
264 +endif
265 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
266 +endif
267 +endif
268 +
269 include $(srctree)/arch/$(SRCARCH)/Makefile
270
271 ifneq ($(CONFIG_FRAME_WARN),0)
272 @@ -647,7 +690,7 @@ export mod_strip_cmd
273
274
275 ifeq ($(KBUILD_EXTMOD),)
276 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
277 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
278
279 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
280 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
281 @@ -868,6 +911,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
282
283 # The actual objects are generated when descending,
284 # make sure no implicit rule kicks in
285 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
286 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
287 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
288
289 # Handle descending into subdirectories listed in $(vmlinux-dirs)
290 @@ -877,7 +922,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
291 # Error messages still appears in the original language
292
293 PHONY += $(vmlinux-dirs)
294 -$(vmlinux-dirs): prepare scripts
295 +$(vmlinux-dirs): gcc-plugins prepare scripts
296 $(Q)$(MAKE) $(build)=$@
297
298 # Build the kernel release string
299 @@ -986,6 +1031,7 @@ prepare0: archprepare FORCE
300 $(Q)$(MAKE) $(build)=. missing-syscalls
301
302 # All the preparing..
303 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
304 prepare: prepare0
305
306 # The asm symlink changes when $(ARCH) changes.
307 @@ -1127,6 +1173,8 @@ all: modules
308 # using awk while concatenating to the final file.
309
310 PHONY += modules
311 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
312 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
313 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
314 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
315 @$(kecho) ' Building modules, stage 2.';
316 @@ -1136,7 +1184,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
317
318 # Target to prepare building external modules
319 PHONY += modules_prepare
320 -modules_prepare: prepare scripts
321 +modules_prepare: gcc-plugins prepare scripts
322
323 # Target to install modules
324 PHONY += modules_install
325 @@ -1201,7 +1249,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
326 include/linux/autoconf.h include/linux/version.h \
327 include/linux/utsrelease.h \
328 include/linux/bounds.h include/asm*/asm-offsets.h \
329 - Module.symvers Module.markers tags TAGS cscope*
330 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
331
332 # clean - Delete most, but leave enough to build external modules
333 #
334 @@ -1245,7 +1293,7 @@ distclean: mrproper
335 @find $(srctree) $(RCS_FIND_IGNORE) \
336 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
337 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
338 - -o -name '.*.rej' -o -size 0 \
339 + -o -name '.*.rej' -o -name '*.so' -o -size 0 \
340 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
341 -type f -print | xargs rm -f
342
343 @@ -1292,6 +1340,7 @@ help:
344 @echo ' modules_prepare - Set up for building external modules'
345 @echo ' tags/TAGS - Generate tags file for editors'
346 @echo ' cscope - Generate cscope index'
347 + @echo ' gtags - Generate GNU GLOBAL index'
348 @echo ' kernelrelease - Output the release version string'
349 @echo ' kernelversion - Output the version stored in Makefile'
350 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
351 @@ -1393,6 +1442,8 @@ PHONY += $(module-dirs) modules
352 $(module-dirs): crmodverdir $(objtree)/Module.symvers
353 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
354
355 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
356 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
357 modules: $(module-dirs)
358 @$(kecho) ' Building modules, stage 2.';
359 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
360 @@ -1448,7 +1499,7 @@ endif # KBUILD_EXTMOD
361 quiet_cmd_tags = GEN $@
362 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
363
364 -tags TAGS cscope: FORCE
365 +tags TAGS cscope gtags: FORCE
366 $(call cmd,tags)
367
368 # Scripts to check various things for consistency
369 @@ -1513,17 +1564,21 @@ else
370 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
371 endif
372
373 -%.s: %.c prepare scripts FORCE
374 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
375 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
376 +%.s: %.c gcc-plugins prepare scripts FORCE
377 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
378 %.i: %.c prepare scripts FORCE
379 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
380 -%.o: %.c prepare scripts FORCE
381 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
382 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
383 +%.o: %.c gcc-plugins prepare scripts FORCE
384 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
385 %.lst: %.c prepare scripts FORCE
386 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
387 -%.s: %.S prepare scripts FORCE
388 +%.s: %.S gcc-plugins prepare scripts FORCE
389 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
390 -%.o: %.S prepare scripts FORCE
391 +%.o: %.S gcc-plugins prepare scripts FORCE
392 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
393 %.symtypes: %.c prepare scripts FORCE
394 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
395 @@ -1533,11 +1588,15 @@ endif
396 $(cmd_crmodverdir)
397 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
398 $(build)=$(build-dir)
399 -%/: prepare scripts FORCE
400 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
401 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
402 +%/: gcc-plugins prepare scripts FORCE
403 $(cmd_crmodverdir)
404 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
405 $(build)=$(build-dir)
406 -%.ko: prepare scripts FORCE
407 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
408 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
409 +%.ko: gcc-plugins prepare scripts FORCE
410 $(cmd_crmodverdir)
411 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
412 $(build)=$(build-dir) $(@:.ko=.o)
413 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
414 index 610dff4..f396854 100644
415 --- a/arch/alpha/include/asm/atomic.h
416 +++ b/arch/alpha/include/asm/atomic.h
417 @@ -251,6 +251,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
418 #define atomic_dec(v) atomic_sub(1,(v))
419 #define atomic64_dec(v) atomic64_sub(1,(v))
420
421 +#define atomic64_read_unchecked(v) atomic64_read(v)
422 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
423 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
424 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
425 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
426 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
427 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
428 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
429 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
430 +
431 #define smp_mb__before_atomic_dec() smp_mb()
432 #define smp_mb__after_atomic_dec() smp_mb()
433 #define smp_mb__before_atomic_inc() smp_mb()
434 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
435 index 5c75c1b..c82f878 100644
436 --- a/arch/alpha/include/asm/elf.h
437 +++ b/arch/alpha/include/asm/elf.h
438 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
439
440 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
441
442 +#ifdef CONFIG_PAX_ASLR
443 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
444 +
445 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
446 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
447 +#endif
448 +
449 /* $0 is set by ld.so to a pointer to a function which might be
450 registered using atexit. This provides a mean for the dynamic
451 linker to call DT_FINI functions for shared libraries that have
452 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
453 index 3f0c59f..cf1e100 100644
454 --- a/arch/alpha/include/asm/pgtable.h
455 +++ b/arch/alpha/include/asm/pgtable.h
456 @@ -101,6 +101,17 @@ struct vm_area_struct;
457 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
458 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
459 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
460 +
461 +#ifdef CONFIG_PAX_PAGEEXEC
462 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
463 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
464 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
465 +#else
466 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
467 +# define PAGE_COPY_NOEXEC PAGE_COPY
468 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
469 +#endif
470 +
471 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
472
473 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
474 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
475 index ebc3c89..20cfa63 100644
476 --- a/arch/alpha/kernel/module.c
477 +++ b/arch/alpha/kernel/module.c
478 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
479
480 /* The small sections were sorted to the end of the segment.
481 The following should definitely cover them. */
482 - gp = (u64)me->module_core + me->core_size - 0x8000;
483 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
484 got = sechdrs[me->arch.gotsecindex].sh_addr;
485
486 for (i = 0; i < n; i++) {
487 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
488 index a94e49c..d71dd44 100644
489 --- a/arch/alpha/kernel/osf_sys.c
490 +++ b/arch/alpha/kernel/osf_sys.c
491 @@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
492 /* At this point: (!vma || addr < vma->vm_end). */
493 if (limit - len < addr)
494 return -ENOMEM;
495 - if (!vma || addr + len <= vma->vm_start)
496 + if (check_heap_stack_gap(vma, addr, len))
497 return addr;
498 addr = vma->vm_end;
499 vma = vma->vm_next;
500 @@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
501 merely specific addresses, but regions of memory -- perhaps
502 this feature should be incorporated into all ports? */
503
504 +#ifdef CONFIG_PAX_RANDMMAP
505 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
506 +#endif
507 +
508 if (addr) {
509 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
510 if (addr != (unsigned long) -ENOMEM)
511 @@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
512 }
513
514 /* Next, try allocating at TASK_UNMAPPED_BASE. */
515 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
516 - len, limit);
517 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
518 +
519 if (addr != (unsigned long) -ENOMEM)
520 return addr;
521
522 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
523 index 00a31de..2ded0f2 100644
524 --- a/arch/alpha/mm/fault.c
525 +++ b/arch/alpha/mm/fault.c
526 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
527 __reload_thread(pcb);
528 }
529
530 +#ifdef CONFIG_PAX_PAGEEXEC
531 +/*
532 + * PaX: decide what to do with offenders (regs->pc = fault address)
533 + *
534 + * returns 1 when task should be killed
535 + * 2 when patched PLT trampoline was detected
536 + * 3 when unpatched PLT trampoline was detected
537 + */
538 +static int pax_handle_fetch_fault(struct pt_regs *regs)
539 +{
540 +
541 +#ifdef CONFIG_PAX_EMUPLT
542 + int err;
543 +
544 + do { /* PaX: patched PLT emulation #1 */
545 + unsigned int ldah, ldq, jmp;
546 +
547 + err = get_user(ldah, (unsigned int *)regs->pc);
548 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
549 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
550 +
551 + if (err)
552 + break;
553 +
554 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
555 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
556 + jmp == 0x6BFB0000U)
557 + {
558 + unsigned long r27, addr;
559 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
560 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
561 +
562 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
563 + err = get_user(r27, (unsigned long *)addr);
564 + if (err)
565 + break;
566 +
567 + regs->r27 = r27;
568 + regs->pc = r27;
569 + return 2;
570 + }
571 + } while (0);
572 +
573 + do { /* PaX: patched PLT emulation #2 */
574 + unsigned int ldah, lda, br;
575 +
576 + err = get_user(ldah, (unsigned int *)regs->pc);
577 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
578 + err |= get_user(br, (unsigned int *)(regs->pc+8));
579 +
580 + if (err)
581 + break;
582 +
583 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
584 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
585 + (br & 0xFFE00000U) == 0xC3E00000U)
586 + {
587 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
588 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
589 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
590 +
591 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
592 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
593 + return 2;
594 + }
595 + } while (0);
596 +
597 + do { /* PaX: unpatched PLT emulation */
598 + unsigned int br;
599 +
600 + err = get_user(br, (unsigned int *)regs->pc);
601 +
602 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
603 + unsigned int br2, ldq, nop, jmp;
604 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
605 +
606 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
607 + err = get_user(br2, (unsigned int *)addr);
608 + err |= get_user(ldq, (unsigned int *)(addr+4));
609 + err |= get_user(nop, (unsigned int *)(addr+8));
610 + err |= get_user(jmp, (unsigned int *)(addr+12));
611 + err |= get_user(resolver, (unsigned long *)(addr+16));
612 +
613 + if (err)
614 + break;
615 +
616 + if (br2 == 0xC3600000U &&
617 + ldq == 0xA77B000CU &&
618 + nop == 0x47FF041FU &&
619 + jmp == 0x6B7B0000U)
620 + {
621 + regs->r28 = regs->pc+4;
622 + regs->r27 = addr+16;
623 + regs->pc = resolver;
624 + return 3;
625 + }
626 + }
627 + } while (0);
628 +#endif
629 +
630 + return 1;
631 +}
632 +
633 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
634 +{
635 + unsigned long i;
636 +
637 + printk(KERN_ERR "PAX: bytes at PC: ");
638 + for (i = 0; i < 5; i++) {
639 + unsigned int c;
640 + if (get_user(c, (unsigned int *)pc+i))
641 + printk(KERN_CONT "???????? ");
642 + else
643 + printk(KERN_CONT "%08x ", c);
644 + }
645 + printk("\n");
646 +}
647 +#endif
648
649 /*
650 * This routine handles page faults. It determines the address,
651 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
652 good_area:
653 si_code = SEGV_ACCERR;
654 if (cause < 0) {
655 - if (!(vma->vm_flags & VM_EXEC))
656 + if (!(vma->vm_flags & VM_EXEC)) {
657 +
658 +#ifdef CONFIG_PAX_PAGEEXEC
659 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
660 + goto bad_area;
661 +
662 + up_read(&mm->mmap_sem);
663 + switch (pax_handle_fetch_fault(regs)) {
664 +
665 +#ifdef CONFIG_PAX_EMUPLT
666 + case 2:
667 + case 3:
668 + return;
669 +#endif
670 +
671 + }
672 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
673 + do_group_exit(SIGKILL);
674 +#else
675 goto bad_area;
676 +#endif
677 +
678 + }
679 } else if (!cause) {
680 /* Allow reads even for write-only mappings */
681 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
682 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
683 index b68faef..6dd1496 100644
684 --- a/arch/arm/Kconfig
685 +++ b/arch/arm/Kconfig
686 @@ -14,6 +14,7 @@ config ARM
687 select SYS_SUPPORTS_APM_EMULATION
688 select HAVE_OPROFILE
689 select HAVE_ARCH_KGDB
690 + select GENERIC_ATOMIC64
691 select HAVE_KPROBES if (!XIP_KERNEL)
692 select HAVE_KRETPROBES if (HAVE_KPROBES)
693 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
694 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
695 index d0daeab..ff286a8 100644
696 --- a/arch/arm/include/asm/atomic.h
697 +++ b/arch/arm/include/asm/atomic.h
698 @@ -15,6 +15,10 @@
699 #include <linux/types.h>
700 #include <asm/system.h>
701
702 +#ifdef CONFIG_GENERIC_ATOMIC64
703 +#include <asm-generic/atomic64.h>
704 +#endif
705 +
706 #define ATOMIC_INIT(i) { (i) }
707
708 #ifdef __KERNEL__
709 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
710 index 6aac3f5..265536b 100644
711 --- a/arch/arm/include/asm/elf.h
712 +++ b/arch/arm/include/asm/elf.h
713 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
714 the loader. We need to make sure that it is out of the way of the program
715 that it will "exec", and that there is sufficient room for the brk. */
716
717 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
718 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
719 +
720 +#ifdef CONFIG_PAX_ASLR
721 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
722 +
723 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
724 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
725 +#endif
726
727 /* When the program starts, a1 contains a pointer to a function to be
728 registered with atexit, as per the SVR4 ABI. A value of 0 means we
729 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
730 index c019949..388fdd1 100644
731 --- a/arch/arm/include/asm/kmap_types.h
732 +++ b/arch/arm/include/asm/kmap_types.h
733 @@ -19,6 +19,7 @@ enum km_type {
734 KM_SOFTIRQ0,
735 KM_SOFTIRQ1,
736 KM_L2_CACHE,
737 + KM_CLEARPAGE,
738 KM_TYPE_NR
739 };
740
741 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
742 index 1d6bd40..fba0cb9 100644
743 --- a/arch/arm/include/asm/uaccess.h
744 +++ b/arch/arm/include/asm/uaccess.h
745 @@ -22,6 +22,8 @@
746 #define VERIFY_READ 0
747 #define VERIFY_WRITE 1
748
749 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
750 +
751 /*
752 * The exception table consists of pairs of addresses: the first is the
753 * address of an instruction that is allowed to fault, and the second is
754 @@ -387,8 +389,23 @@ do { \
755
756
757 #ifdef CONFIG_MMU
758 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
759 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
760 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
761 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
762 +
763 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
764 +{
765 + if (!__builtin_constant_p(n))
766 + check_object_size(to, n, false);
767 + return ___copy_from_user(to, from, n);
768 +}
769 +
770 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
771 +{
772 + if (!__builtin_constant_p(n))
773 + check_object_size(from, n, true);
774 + return ___copy_to_user(to, from, n);
775 +}
776 +
777 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
778 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
779 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
780 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
781
782 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
783 {
784 + if ((long)n < 0)
785 + return n;
786 +
787 if (access_ok(VERIFY_READ, from, n))
788 n = __copy_from_user(to, from, n);
789 else /* security hole - plug it */
790 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
791
792 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
793 {
794 + if ((long)n < 0)
795 + return n;
796 +
797 if (access_ok(VERIFY_WRITE, to, n))
798 n = __copy_to_user(to, from, n);
799 return n;
800 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
801 index 0e62770..e2c2cd6 100644
802 --- a/arch/arm/kernel/armksyms.c
803 +++ b/arch/arm/kernel/armksyms.c
804 @@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
805 #ifdef CONFIG_MMU
806 EXPORT_SYMBOL(copy_page);
807
808 -EXPORT_SYMBOL(__copy_from_user);
809 -EXPORT_SYMBOL(__copy_to_user);
810 +EXPORT_SYMBOL(___copy_from_user);
811 +EXPORT_SYMBOL(___copy_to_user);
812 EXPORT_SYMBOL(__clear_user);
813
814 EXPORT_SYMBOL(__get_user_1);
815 diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
816 index ba8ccfe..2dc34dc 100644
817 --- a/arch/arm/kernel/kgdb.c
818 +++ b/arch/arm/kernel/kgdb.c
819 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
820 * and we handle the normal undef case within the do_undefinstr
821 * handler.
822 */
823 -struct kgdb_arch arch_kgdb_ops = {
824 +const struct kgdb_arch arch_kgdb_ops = {
825 #ifndef __ARMEB__
826 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
827 #else /* ! __ARMEB__ */
828 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
829 index 3f361a7..6e806e1 100644
830 --- a/arch/arm/kernel/traps.c
831 +++ b/arch/arm/kernel/traps.c
832 @@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
833
834 DEFINE_SPINLOCK(die_lock);
835
836 +extern void gr_handle_kernel_exploit(void);
837 +
838 /*
839 * This function is protected against re-entrancy.
840 */
841 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
842 if (panic_on_oops)
843 panic("Fatal exception");
844
845 + gr_handle_kernel_exploit();
846 +
847 do_exit(SIGSEGV);
848 }
849
850 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
851 index e4fe124..0fc246b 100644
852 --- a/arch/arm/lib/copy_from_user.S
853 +++ b/arch/arm/lib/copy_from_user.S
854 @@ -16,7 +16,7 @@
855 /*
856 * Prototype:
857 *
858 - * size_t __copy_from_user(void *to, const void *from, size_t n)
859 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
860 *
861 * Purpose:
862 *
863 @@ -84,11 +84,11 @@
864
865 .text
866
867 -ENTRY(__copy_from_user)
868 +ENTRY(___copy_from_user)
869
870 #include "copy_template.S"
871
872 -ENDPROC(__copy_from_user)
873 +ENDPROC(___copy_from_user)
874
875 .section .fixup,"ax"
876 .align 0
877 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
878 index 1a71e15..ac7b258 100644
879 --- a/arch/arm/lib/copy_to_user.S
880 +++ b/arch/arm/lib/copy_to_user.S
881 @@ -16,7 +16,7 @@
882 /*
883 * Prototype:
884 *
885 - * size_t __copy_to_user(void *to, const void *from, size_t n)
886 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
887 *
888 * Purpose:
889 *
890 @@ -88,11 +88,11 @@
891 .text
892
893 ENTRY(__copy_to_user_std)
894 -WEAK(__copy_to_user)
895 +WEAK(___copy_to_user)
896
897 #include "copy_template.S"
898
899 -ENDPROC(__copy_to_user)
900 +ENDPROC(___copy_to_user)
901
902 .section .fixup,"ax"
903 .align 0
904 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
905 index ffdd274..91017b6 100644
906 --- a/arch/arm/lib/uaccess.S
907 +++ b/arch/arm/lib/uaccess.S
908 @@ -19,7 +19,7 @@
909
910 #define PAGE_SHIFT 12
911
912 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
913 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
914 * Purpose : copy a block to user memory from kernel memory
915 * Params : to - user memory
916 * : from - kernel memory
917 @@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
918 sub r2, r2, ip
919 b .Lc2u_dest_aligned
920
921 -ENTRY(__copy_to_user)
922 +ENTRY(___copy_to_user)
923 stmfd sp!, {r2, r4 - r7, lr}
924 cmp r2, #4
925 blt .Lc2u_not_enough
926 @@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
927 ldrgtb r3, [r1], #0
928 USER( strgtbt r3, [r0], #1) @ May fault
929 b .Lc2u_finished
930 -ENDPROC(__copy_to_user)
931 +ENDPROC(___copy_to_user)
932
933 .section .fixup,"ax"
934 .align 0
935 9001: ldmfd sp!, {r0, r4 - r7, pc}
936 .previous
937
938 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
939 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
940 * Purpose : copy a block from user memory to kernel memory
941 * Params : to - kernel memory
942 * : from - user memory
943 @@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
944 sub r2, r2, ip
945 b .Lcfu_dest_aligned
946
947 -ENTRY(__copy_from_user)
948 +ENTRY(___copy_from_user)
949 stmfd sp!, {r0, r2, r4 - r7, lr}
950 cmp r2, #4
951 blt .Lcfu_not_enough
952 @@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
953 USER( ldrgtbt r3, [r1], #1) @ May fault
954 strgtb r3, [r0], #1
955 b .Lcfu_finished
956 -ENDPROC(__copy_from_user)
957 +ENDPROC(___copy_from_user)
958
959 .section .fixup,"ax"
960 .align 0
961 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
962 index 6b967ff..67d5b2b 100644
963 --- a/arch/arm/lib/uaccess_with_memcpy.c
964 +++ b/arch/arm/lib/uaccess_with_memcpy.c
965 @@ -97,7 +97,7 @@ out:
966 }
967
968 unsigned long
969 -__copy_to_user(void __user *to, const void *from, unsigned long n)
970 +___copy_to_user(void __user *to, const void *from, unsigned long n)
971 {
972 /*
973 * This test is stubbed out of the main function above to keep
974 diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
975 index 4028724..beec230 100644
976 --- a/arch/arm/mach-at91/pm.c
977 +++ b/arch/arm/mach-at91/pm.c
978 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
979 }
980
981
982 -static struct platform_suspend_ops at91_pm_ops ={
983 +static const struct platform_suspend_ops at91_pm_ops ={
984 .valid = at91_pm_valid_state,
985 .begin = at91_pm_begin,
986 .enter = at91_pm_enter,
987 diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
988 index 5218943..0a34552 100644
989 --- a/arch/arm/mach-omap1/pm.c
990 +++ b/arch/arm/mach-omap1/pm.c
991 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
992
993
994
995 -static struct platform_suspend_ops omap_pm_ops ={
996 +static const struct platform_suspend_ops omap_pm_ops ={
997 .prepare = omap_pm_prepare,
998 .enter = omap_pm_enter,
999 .finish = omap_pm_finish,
1000 diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
1001 index bff5c4e..d4c649b 100644
1002 --- a/arch/arm/mach-omap2/pm24xx.c
1003 +++ b/arch/arm/mach-omap2/pm24xx.c
1004 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
1005 enable_hlt();
1006 }
1007
1008 -static struct platform_suspend_ops omap_pm_ops = {
1009 +static const struct platform_suspend_ops omap_pm_ops = {
1010 .prepare = omap2_pm_prepare,
1011 .enter = omap2_pm_enter,
1012 .finish = omap2_pm_finish,
1013 diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
1014 index 8946319..7d3e661 100644
1015 --- a/arch/arm/mach-omap2/pm34xx.c
1016 +++ b/arch/arm/mach-omap2/pm34xx.c
1017 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
1018 return;
1019 }
1020
1021 -static struct platform_suspend_ops omap_pm_ops = {
1022 +static const struct platform_suspend_ops omap_pm_ops = {
1023 .begin = omap3_pm_begin,
1024 .end = omap3_pm_end,
1025 .prepare = omap3_pm_prepare,
1026 diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
1027 index b3d8d53..6e68ebc 100644
1028 --- a/arch/arm/mach-pnx4008/pm.c
1029 +++ b/arch/arm/mach-pnx4008/pm.c
1030 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
1031 (state == PM_SUSPEND_MEM);
1032 }
1033
1034 -static struct platform_suspend_ops pnx4008_pm_ops = {
1035 +static const struct platform_suspend_ops pnx4008_pm_ops = {
1036 .enter = pnx4008_pm_enter,
1037 .valid = pnx4008_pm_valid,
1038 };
1039 diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
1040 index 7693355..9beb00a 100644
1041 --- a/arch/arm/mach-pxa/pm.c
1042 +++ b/arch/arm/mach-pxa/pm.c
1043 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
1044 pxa_cpu_pm_fns->finish();
1045 }
1046
1047 -static struct platform_suspend_ops pxa_pm_ops = {
1048 +static const struct platform_suspend_ops pxa_pm_ops = {
1049 .valid = pxa_pm_valid,
1050 .enter = pxa_pm_enter,
1051 .prepare = pxa_pm_prepare,
1052 diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
1053 index 629e05d..06be589 100644
1054 --- a/arch/arm/mach-pxa/sharpsl_pm.c
1055 +++ b/arch/arm/mach-pxa/sharpsl_pm.c
1056 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
1057 }
1058
1059 #ifdef CONFIG_PM
1060 -static struct platform_suspend_ops sharpsl_pm_ops = {
1061 +static const struct platform_suspend_ops sharpsl_pm_ops = {
1062 .prepare = pxa_pm_prepare,
1063 .finish = pxa_pm_finish,
1064 .enter = corgi_pxa_pm_enter,
1065 diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1066 index c83fdc8..ab9fc44 100644
1067 --- a/arch/arm/mach-sa1100/pm.c
1068 +++ b/arch/arm/mach-sa1100/pm.c
1069 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1070 return virt_to_phys(sp);
1071 }
1072
1073 -static struct platform_suspend_ops sa11x0_pm_ops = {
1074 +static const struct platform_suspend_ops sa11x0_pm_ops = {
1075 .enter = sa11x0_pm_enter,
1076 .valid = suspend_valid_only_mem,
1077 };
1078 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1079 index 3191cd6..c0739db 100644
1080 --- a/arch/arm/mm/fault.c
1081 +++ b/arch/arm/mm/fault.c
1082 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1083 }
1084 #endif
1085
1086 +#ifdef CONFIG_PAX_PAGEEXEC
1087 + if (fsr & FSR_LNX_PF) {
1088 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1089 + do_group_exit(SIGKILL);
1090 + }
1091 +#endif
1092 +
1093 tsk->thread.address = addr;
1094 tsk->thread.error_code = fsr;
1095 tsk->thread.trap_no = 14;
1096 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1097 }
1098 #endif /* CONFIG_MMU */
1099
1100 +#ifdef CONFIG_PAX_PAGEEXEC
1101 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1102 +{
1103 + long i;
1104 +
1105 + printk(KERN_ERR "PAX: bytes at PC: ");
1106 + for (i = 0; i < 20; i++) {
1107 + unsigned char c;
1108 + if (get_user(c, (__force unsigned char __user *)pc+i))
1109 + printk(KERN_CONT "?? ");
1110 + else
1111 + printk(KERN_CONT "%02x ", c);
1112 + }
1113 + printk("\n");
1114 +
1115 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1116 + for (i = -1; i < 20; i++) {
1117 + unsigned long c;
1118 + if (get_user(c, (__force unsigned long __user *)sp+i))
1119 + printk(KERN_CONT "???????? ");
1120 + else
1121 + printk(KERN_CONT "%08lx ", c);
1122 + }
1123 + printk("\n");
1124 +}
1125 +#endif
1126 +
1127 /*
1128 * First Level Translation Fault Handler
1129 *
1130 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1131 index f5abc51..7ec524c 100644
1132 --- a/arch/arm/mm/mmap.c
1133 +++ b/arch/arm/mm/mmap.c
1134 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1135 if (len > TASK_SIZE)
1136 return -ENOMEM;
1137
1138 +#ifdef CONFIG_PAX_RANDMMAP
1139 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1140 +#endif
1141 +
1142 if (addr) {
1143 if (do_align)
1144 addr = COLOUR_ALIGN(addr, pgoff);
1145 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1146 addr = PAGE_ALIGN(addr);
1147
1148 vma = find_vma(mm, addr);
1149 - if (TASK_SIZE - len >= addr &&
1150 - (!vma || addr + len <= vma->vm_start))
1151 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1152 return addr;
1153 }
1154 if (len > mm->cached_hole_size) {
1155 - start_addr = addr = mm->free_area_cache;
1156 + start_addr = addr = mm->free_area_cache;
1157 } else {
1158 - start_addr = addr = TASK_UNMAPPED_BASE;
1159 - mm->cached_hole_size = 0;
1160 + start_addr = addr = mm->mmap_base;
1161 + mm->cached_hole_size = 0;
1162 }
1163
1164 full_search:
1165 @@ -94,14 +97,14 @@ full_search:
1166 * Start a new search - just in case we missed
1167 * some holes.
1168 */
1169 - if (start_addr != TASK_UNMAPPED_BASE) {
1170 - start_addr = addr = TASK_UNMAPPED_BASE;
1171 + if (start_addr != mm->mmap_base) {
1172 + start_addr = addr = mm->mmap_base;
1173 mm->cached_hole_size = 0;
1174 goto full_search;
1175 }
1176 return -ENOMEM;
1177 }
1178 - if (!vma || addr + len <= vma->vm_start) {
1179 + if (check_heap_stack_gap(vma, addr, len)) {
1180 /*
1181 * Remember the place where we stopped the search:
1182 */
1183 diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1184 index 8d97db2..b66cfa5 100644
1185 --- a/arch/arm/plat-s3c/pm.c
1186 +++ b/arch/arm/plat-s3c/pm.c
1187 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1188 s3c_pm_check_cleanup();
1189 }
1190
1191 -static struct platform_suspend_ops s3c_pm_ops = {
1192 +static const struct platform_suspend_ops s3c_pm_ops = {
1193 .enter = s3c_pm_enter,
1194 .prepare = s3c_pm_prepare,
1195 .finish = s3c_pm_finish,
1196 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1197 index d5d1d41..856e2ed 100644
1198 --- a/arch/avr32/include/asm/elf.h
1199 +++ b/arch/avr32/include/asm/elf.h
1200 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1201 the loader. We need to make sure that it is out of the way of the program
1202 that it will "exec", and that there is sufficient room for the brk. */
1203
1204 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1205 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1206
1207 +#ifdef CONFIG_PAX_ASLR
1208 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1209 +
1210 +#define PAX_DELTA_MMAP_LEN 15
1211 +#define PAX_DELTA_STACK_LEN 15
1212 +#endif
1213
1214 /* This yields a mask that user programs can use to figure out what
1215 instruction set this CPU supports. This could be done in user space,
1216 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1217 index b7f5c68..556135c 100644
1218 --- a/arch/avr32/include/asm/kmap_types.h
1219 +++ b/arch/avr32/include/asm/kmap_types.h
1220 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1221 D(11) KM_IRQ1,
1222 D(12) KM_SOFTIRQ0,
1223 D(13) KM_SOFTIRQ1,
1224 -D(14) KM_TYPE_NR
1225 +D(14) KM_CLEARPAGE,
1226 +D(15) KM_TYPE_NR
1227 };
1228
1229 #undef D
1230 diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1231 index f021edf..32d680e 100644
1232 --- a/arch/avr32/mach-at32ap/pm.c
1233 +++ b/arch/avr32/mach-at32ap/pm.c
1234 @@ -176,7 +176,7 @@ out:
1235 return 0;
1236 }
1237
1238 -static struct platform_suspend_ops avr32_pm_ops = {
1239 +static const struct platform_suspend_ops avr32_pm_ops = {
1240 .valid = avr32_pm_valid_state,
1241 .enter = avr32_pm_enter,
1242 };
1243 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1244 index b61d86d..e292c7f 100644
1245 --- a/arch/avr32/mm/fault.c
1246 +++ b/arch/avr32/mm/fault.c
1247 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1248
1249 int exception_trace = 1;
1250
1251 +#ifdef CONFIG_PAX_PAGEEXEC
1252 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1253 +{
1254 + unsigned long i;
1255 +
1256 + printk(KERN_ERR "PAX: bytes at PC: ");
1257 + for (i = 0; i < 20; i++) {
1258 + unsigned char c;
1259 + if (get_user(c, (unsigned char *)pc+i))
1260 + printk(KERN_CONT "???????? ");
1261 + else
1262 + printk(KERN_CONT "%02x ", c);
1263 + }
1264 + printk("\n");
1265 +}
1266 +#endif
1267 +
1268 /*
1269 * This routine handles page faults. It determines the address and the
1270 * problem, and then passes it off to one of the appropriate routines.
1271 @@ -157,6 +174,16 @@ bad_area:
1272 up_read(&mm->mmap_sem);
1273
1274 if (user_mode(regs)) {
1275 +
1276 +#ifdef CONFIG_PAX_PAGEEXEC
1277 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1278 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1279 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1280 + do_group_exit(SIGKILL);
1281 + }
1282 + }
1283 +#endif
1284 +
1285 if (exception_trace && printk_ratelimit())
1286 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1287 "sp %08lx ecr %lu\n",
1288 diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1289 index cce79d0..c406c85 100644
1290 --- a/arch/blackfin/kernel/kgdb.c
1291 +++ b/arch/blackfin/kernel/kgdb.c
1292 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1293 return -1; /* this means that we do not want to exit from the handler */
1294 }
1295
1296 -struct kgdb_arch arch_kgdb_ops = {
1297 +const struct kgdb_arch arch_kgdb_ops = {
1298 .gdb_bpt_instr = {0xa1},
1299 #ifdef CONFIG_SMP
1300 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1301 diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1302 index 8837be4..b2fb413 100644
1303 --- a/arch/blackfin/mach-common/pm.c
1304 +++ b/arch/blackfin/mach-common/pm.c
1305 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1306 return 0;
1307 }
1308
1309 -struct platform_suspend_ops bfin_pm_ops = {
1310 +const struct platform_suspend_ops bfin_pm_ops = {
1311 .enter = bfin_pm_enter,
1312 .valid = bfin_pm_valid,
1313 };
1314 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
1315 index 00a57af..c3ef0cd 100644
1316 --- a/arch/frv/include/asm/atomic.h
1317 +++ b/arch/frv/include/asm/atomic.h
1318 @@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
1319 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
1320 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
1321
1322 +#define atomic64_read_unchecked(v) atomic64_read(v)
1323 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1324 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1325 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1326 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1327 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1328 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1329 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1330 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1331 +
1332 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
1333 {
1334 int c, old;
1335 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1336 index f8e16b2..c73ff79 100644
1337 --- a/arch/frv/include/asm/kmap_types.h
1338 +++ b/arch/frv/include/asm/kmap_types.h
1339 @@ -23,6 +23,7 @@ enum km_type {
1340 KM_IRQ1,
1341 KM_SOFTIRQ0,
1342 KM_SOFTIRQ1,
1343 + KM_CLEARPAGE,
1344 KM_TYPE_NR
1345 };
1346
1347 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1348 index 385fd30..6c3d97e 100644
1349 --- a/arch/frv/mm/elf-fdpic.c
1350 +++ b/arch/frv/mm/elf-fdpic.c
1351 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1352 if (addr) {
1353 addr = PAGE_ALIGN(addr);
1354 vma = find_vma(current->mm, addr);
1355 - if (TASK_SIZE - len >= addr &&
1356 - (!vma || addr + len <= vma->vm_start))
1357 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1358 goto success;
1359 }
1360
1361 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1362 for (; vma; vma = vma->vm_next) {
1363 if (addr > limit)
1364 break;
1365 - if (addr + len <= vma->vm_start)
1366 + if (check_heap_stack_gap(vma, addr, len))
1367 goto success;
1368 addr = vma->vm_end;
1369 }
1370 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1371 for (; vma; vma = vma->vm_next) {
1372 if (addr > limit)
1373 break;
1374 - if (addr + len <= vma->vm_start)
1375 + if (check_heap_stack_gap(vma, addr, len))
1376 goto success;
1377 addr = vma->vm_end;
1378 }
1379 diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1380 index e4a80d8..11a7ea1 100644
1381 --- a/arch/ia64/hp/common/hwsw_iommu.c
1382 +++ b/arch/ia64/hp/common/hwsw_iommu.c
1383 @@ -17,7 +17,7 @@
1384 #include <linux/swiotlb.h>
1385 #include <asm/machvec.h>
1386
1387 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1388 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1389
1390 /* swiotlb declarations & definitions: */
1391 extern int swiotlb_late_init_with_default_size (size_t size);
1392 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1393 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1394 }
1395
1396 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1397 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1398 {
1399 if (use_swiotlb(dev))
1400 return &swiotlb_dma_ops;
1401 diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1402 index 01ae69b..35752fd 100644
1403 --- a/arch/ia64/hp/common/sba_iommu.c
1404 +++ b/arch/ia64/hp/common/sba_iommu.c
1405 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
1406 },
1407 };
1408
1409 -extern struct dma_map_ops swiotlb_dma_ops;
1410 +extern const struct dma_map_ops swiotlb_dma_ops;
1411
1412 static int __init
1413 sba_init(void)
1414 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
1415
1416 __setup("sbapagesize=",sba_page_override);
1417
1418 -struct dma_map_ops sba_dma_ops = {
1419 +const struct dma_map_ops sba_dma_ops = {
1420 .alloc_coherent = sba_alloc_coherent,
1421 .free_coherent = sba_free_coherent,
1422 .map_page = sba_map_page,
1423 diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
1424 index c69552b..c7122f4 100644
1425 --- a/arch/ia64/ia32/binfmt_elf32.c
1426 +++ b/arch/ia64/ia32/binfmt_elf32.c
1427 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
1428
1429 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
1430
1431 +#ifdef CONFIG_PAX_ASLR
1432 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1433 +
1434 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1435 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1436 +#endif
1437 +
1438 /* Ugly but avoids duplication */
1439 #include "../../../fs/binfmt_elf.c"
1440
1441 diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
1442 index 0f15349..26b3429 100644
1443 --- a/arch/ia64/ia32/ia32priv.h
1444 +++ b/arch/ia64/ia32/ia32priv.h
1445 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
1446 #define ELF_DATA ELFDATA2LSB
1447 #define ELF_ARCH EM_386
1448
1449 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
1450 +#ifdef CONFIG_PAX_RANDUSTACK
1451 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
1452 +#else
1453 +#define __IA32_DELTA_STACK 0UL
1454 +#endif
1455 +
1456 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
1457 +
1458 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
1459 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
1460
1461 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
1462 index 88405cb..de5ca5d 100644
1463 --- a/arch/ia64/include/asm/atomic.h
1464 +++ b/arch/ia64/include/asm/atomic.h
1465 @@ -210,6 +210,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
1466 #define atomic64_inc(v) atomic64_add(1, (v))
1467 #define atomic64_dec(v) atomic64_sub(1, (v))
1468
1469 +#define atomic64_read_unchecked(v) atomic64_read(v)
1470 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1471 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1472 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1473 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1474 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1475 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1476 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1477 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1478 +
1479 /* Atomic operations are already serializing */
1480 #define smp_mb__before_atomic_dec() barrier()
1481 #define smp_mb__after_atomic_dec() barrier()
1482 diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
1483 index 8d3c79c..71b3af6 100644
1484 --- a/arch/ia64/include/asm/dma-mapping.h
1485 +++ b/arch/ia64/include/asm/dma-mapping.h
1486 @@ -12,7 +12,7 @@
1487
1488 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1489
1490 -extern struct dma_map_ops *dma_ops;
1491 +extern const struct dma_map_ops *dma_ops;
1492 extern struct ia64_machine_vector ia64_mv;
1493 extern void set_iommu_machvec(void);
1494
1495 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
1496 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1497 dma_addr_t *daddr, gfp_t gfp)
1498 {
1499 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1500 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1501 void *caddr;
1502
1503 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1504 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1505 static inline void dma_free_coherent(struct device *dev, size_t size,
1506 void *caddr, dma_addr_t daddr)
1507 {
1508 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1509 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1510 debug_dma_free_coherent(dev, size, caddr, daddr);
1511 ops->free_coherent(dev, size, caddr, daddr);
1512 }
1513 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
1514
1515 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1516 {
1517 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1518 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1519 return ops->mapping_error(dev, daddr);
1520 }
1521
1522 static inline int dma_supported(struct device *dev, u64 mask)
1523 {
1524 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1525 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1526 return ops->dma_supported(dev, mask);
1527 }
1528
1529 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1530 index 86eddee..b116bb4 100644
1531 --- a/arch/ia64/include/asm/elf.h
1532 +++ b/arch/ia64/include/asm/elf.h
1533 @@ -43,6 +43,13 @@
1534 */
1535 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1536
1537 +#ifdef CONFIG_PAX_ASLR
1538 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1539 +
1540 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1541 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1542 +#endif
1543 +
1544 #define PT_IA_64_UNWIND 0x70000001
1545
1546 /* IA-64 relocations: */
1547 diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
1548 index 367d299..9ad4279 100644
1549 --- a/arch/ia64/include/asm/machvec.h
1550 +++ b/arch/ia64/include/asm/machvec.h
1551 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
1552 /* DMA-mapping interface: */
1553 typedef void ia64_mv_dma_init (void);
1554 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1555 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1556 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1557
1558 /*
1559 * WARNING: The legacy I/O space is _architected_. Platforms are
1560 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
1561 # endif /* CONFIG_IA64_GENERIC */
1562
1563 extern void swiotlb_dma_init(void);
1564 -extern struct dma_map_ops *dma_get_ops(struct device *);
1565 +extern const struct dma_map_ops *dma_get_ops(struct device *);
1566
1567 /*
1568 * Define default versions so we can extend machvec for new platforms without having
1569 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1570 index 8840a69..cdb63d9 100644
1571 --- a/arch/ia64/include/asm/pgtable.h
1572 +++ b/arch/ia64/include/asm/pgtable.h
1573 @@ -12,7 +12,7 @@
1574 * David Mosberger-Tang <davidm@hpl.hp.com>
1575 */
1576
1577 -
1578 +#include <linux/const.h>
1579 #include <asm/mman.h>
1580 #include <asm/page.h>
1581 #include <asm/processor.h>
1582 @@ -143,6 +143,17 @@
1583 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1584 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1585 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1586 +
1587 +#ifdef CONFIG_PAX_PAGEEXEC
1588 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1589 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1590 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1591 +#else
1592 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1593 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1594 +# define PAGE_COPY_NOEXEC PAGE_COPY
1595 +#endif
1596 +
1597 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1598 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1599 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1600 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1601 index 239ecdc..f94170e 100644
1602 --- a/arch/ia64/include/asm/spinlock.h
1603 +++ b/arch/ia64/include/asm/spinlock.h
1604 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
1605 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1606
1607 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1608 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1609 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1610 }
1611
1612 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1613 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1614 index 449c8c0..432a3d2 100644
1615 --- a/arch/ia64/include/asm/uaccess.h
1616 +++ b/arch/ia64/include/asm/uaccess.h
1617 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1618 const void *__cu_from = (from); \
1619 long __cu_len = (n); \
1620 \
1621 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1622 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1623 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1624 __cu_len; \
1625 })
1626 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1627 long __cu_len = (n); \
1628 \
1629 __chk_user_ptr(__cu_from); \
1630 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1631 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1632 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1633 __cu_len; \
1634 })
1635 diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
1636 index f2c1600..969398a 100644
1637 --- a/arch/ia64/kernel/dma-mapping.c
1638 +++ b/arch/ia64/kernel/dma-mapping.c
1639 @@ -3,7 +3,7 @@
1640 /* Set this to 1 if there is a HW IOMMU in the system */
1641 int iommu_detected __read_mostly;
1642
1643 -struct dma_map_ops *dma_ops;
1644 +const struct dma_map_ops *dma_ops;
1645 EXPORT_SYMBOL(dma_ops);
1646
1647 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1648 @@ -16,7 +16,7 @@ static int __init dma_init(void)
1649 }
1650 fs_initcall(dma_init);
1651
1652 -struct dma_map_ops *dma_get_ops(struct device *dev)
1653 +const struct dma_map_ops *dma_get_ops(struct device *dev)
1654 {
1655 return dma_ops;
1656 }
1657 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1658 index 1481b0a..e7d38ff 100644
1659 --- a/arch/ia64/kernel/module.c
1660 +++ b/arch/ia64/kernel/module.c
1661 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1662 void
1663 module_free (struct module *mod, void *module_region)
1664 {
1665 - if (mod && mod->arch.init_unw_table &&
1666 - module_region == mod->module_init) {
1667 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1668 unw_remove_unwind_table(mod->arch.init_unw_table);
1669 mod->arch.init_unw_table = NULL;
1670 }
1671 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1672 }
1673
1674 static inline int
1675 +in_init_rx (const struct module *mod, uint64_t addr)
1676 +{
1677 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1678 +}
1679 +
1680 +static inline int
1681 +in_init_rw (const struct module *mod, uint64_t addr)
1682 +{
1683 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1684 +}
1685 +
1686 +static inline int
1687 in_init (const struct module *mod, uint64_t addr)
1688 {
1689 - return addr - (uint64_t) mod->module_init < mod->init_size;
1690 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1691 +}
1692 +
1693 +static inline int
1694 +in_core_rx (const struct module *mod, uint64_t addr)
1695 +{
1696 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1697 +}
1698 +
1699 +static inline int
1700 +in_core_rw (const struct module *mod, uint64_t addr)
1701 +{
1702 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1703 }
1704
1705 static inline int
1706 in_core (const struct module *mod, uint64_t addr)
1707 {
1708 - return addr - (uint64_t) mod->module_core < mod->core_size;
1709 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1710 }
1711
1712 static inline int
1713 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1714 break;
1715
1716 case RV_BDREL:
1717 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1718 + if (in_init_rx(mod, val))
1719 + val -= (uint64_t) mod->module_init_rx;
1720 + else if (in_init_rw(mod, val))
1721 + val -= (uint64_t) mod->module_init_rw;
1722 + else if (in_core_rx(mod, val))
1723 + val -= (uint64_t) mod->module_core_rx;
1724 + else if (in_core_rw(mod, val))
1725 + val -= (uint64_t) mod->module_core_rw;
1726 break;
1727
1728 case RV_LTV:
1729 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1730 * addresses have been selected...
1731 */
1732 uint64_t gp;
1733 - if (mod->core_size > MAX_LTOFF)
1734 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1735 /*
1736 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1737 * at the end of the module.
1738 */
1739 - gp = mod->core_size - MAX_LTOFF / 2;
1740 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1741 else
1742 - gp = mod->core_size / 2;
1743 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1744 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1745 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1746 mod->arch.gp = gp;
1747 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1748 }
1749 diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
1750 index f6b1ff0..de773fb 100644
1751 --- a/arch/ia64/kernel/pci-dma.c
1752 +++ b/arch/ia64/kernel/pci-dma.c
1753 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1754 .dma_mask = &fallback_dev.coherent_dma_mask,
1755 };
1756
1757 -extern struct dma_map_ops intel_dma_ops;
1758 +extern const struct dma_map_ops intel_dma_ops;
1759
1760 static int __init pci_iommu_init(void)
1761 {
1762 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
1763 }
1764 EXPORT_SYMBOL(iommu_dma_supported);
1765
1766 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1767 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1768 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1769 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1770 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1771 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1772 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1773 +
1774 +static const struct dma_map_ops intel_iommu_dma_ops = {
1775 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1776 + .alloc_coherent = intel_alloc_coherent,
1777 + .free_coherent = intel_free_coherent,
1778 + .map_sg = intel_map_sg,
1779 + .unmap_sg = intel_unmap_sg,
1780 + .map_page = intel_map_page,
1781 + .unmap_page = intel_unmap_page,
1782 + .mapping_error = intel_mapping_error,
1783 +
1784 + .sync_single_for_cpu = machvec_dma_sync_single,
1785 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1786 + .sync_single_for_device = machvec_dma_sync_single,
1787 + .sync_sg_for_device = machvec_dma_sync_sg,
1788 + .dma_supported = iommu_dma_supported,
1789 +};
1790 +
1791 void __init pci_iommu_alloc(void)
1792 {
1793 - dma_ops = &intel_dma_ops;
1794 -
1795 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1796 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1797 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1798 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1799 - dma_ops->dma_supported = iommu_dma_supported;
1800 + dma_ops = &intel_iommu_dma_ops;
1801
1802 /*
1803 * The order of these functions is important for
1804 diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
1805 index 285aae8..61dbab6 100644
1806 --- a/arch/ia64/kernel/pci-swiotlb.c
1807 +++ b/arch/ia64/kernel/pci-swiotlb.c
1808 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
1809 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1810 }
1811
1812 -struct dma_map_ops swiotlb_dma_ops = {
1813 +const struct dma_map_ops swiotlb_dma_ops = {
1814 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1815 .free_coherent = swiotlb_free_coherent,
1816 .map_page = swiotlb_map_page,
1817 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1818 index 609d500..7dde2a8 100644
1819 --- a/arch/ia64/kernel/sys_ia64.c
1820 +++ b/arch/ia64/kernel/sys_ia64.c
1821 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1822 if (REGION_NUMBER(addr) == RGN_HPAGE)
1823 addr = 0;
1824 #endif
1825 +
1826 +#ifdef CONFIG_PAX_RANDMMAP
1827 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1828 + addr = mm->free_area_cache;
1829 + else
1830 +#endif
1831 +
1832 if (!addr)
1833 addr = mm->free_area_cache;
1834
1835 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1836 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1837 /* At this point: (!vma || addr < vma->vm_end). */
1838 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1839 - if (start_addr != TASK_UNMAPPED_BASE) {
1840 + if (start_addr != mm->mmap_base) {
1841 /* Start a new search --- just in case we missed some holes. */
1842 - addr = TASK_UNMAPPED_BASE;
1843 + addr = mm->mmap_base;
1844 goto full_search;
1845 }
1846 return -ENOMEM;
1847 }
1848 - if (!vma || addr + len <= vma->vm_start) {
1849 + if (check_heap_stack_gap(vma, addr, len)) {
1850 /* Remember the address where we stopped this search: */
1851 mm->free_area_cache = addr + len;
1852 return addr;
1853 diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
1854 index 8f06035..b3a5818 100644
1855 --- a/arch/ia64/kernel/topology.c
1856 +++ b/arch/ia64/kernel/topology.c
1857 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
1858 return ret;
1859 }
1860
1861 -static struct sysfs_ops cache_sysfs_ops = {
1862 +static const struct sysfs_ops cache_sysfs_ops = {
1863 .show = cache_show
1864 };
1865
1866 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1867 index 0a0c77b..8e55a81 100644
1868 --- a/arch/ia64/kernel/vmlinux.lds.S
1869 +++ b/arch/ia64/kernel/vmlinux.lds.S
1870 @@ -190,7 +190,7 @@ SECTIONS
1871 /* Per-cpu data: */
1872 . = ALIGN(PERCPU_PAGE_SIZE);
1873 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1874 - __phys_per_cpu_start = __per_cpu_load;
1875 + __phys_per_cpu_start = per_cpu_load;
1876 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1877 * into percpu page size
1878 */
1879 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1880 index 19261a9..1611b7a 100644
1881 --- a/arch/ia64/mm/fault.c
1882 +++ b/arch/ia64/mm/fault.c
1883 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
1884 return pte_present(pte);
1885 }
1886
1887 +#ifdef CONFIG_PAX_PAGEEXEC
1888 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1889 +{
1890 + unsigned long i;
1891 +
1892 + printk(KERN_ERR "PAX: bytes at PC: ");
1893 + for (i = 0; i < 8; i++) {
1894 + unsigned int c;
1895 + if (get_user(c, (unsigned int *)pc+i))
1896 + printk(KERN_CONT "???????? ");
1897 + else
1898 + printk(KERN_CONT "%08x ", c);
1899 + }
1900 + printk("\n");
1901 +}
1902 +#endif
1903 +
1904 void __kprobes
1905 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1906 {
1907 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1908 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1909 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1910
1911 - if ((vma->vm_flags & mask) != mask)
1912 + if ((vma->vm_flags & mask) != mask) {
1913 +
1914 +#ifdef CONFIG_PAX_PAGEEXEC
1915 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1916 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1917 + goto bad_area;
1918 +
1919 + up_read(&mm->mmap_sem);
1920 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1921 + do_group_exit(SIGKILL);
1922 + }
1923 +#endif
1924 +
1925 goto bad_area;
1926
1927 + }
1928 +
1929 survive:
1930 /*
1931 * If for any reason at all we couldn't handle the fault, make
1932 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1933 index b0f6157..a082bbc 100644
1934 --- a/arch/ia64/mm/hugetlbpage.c
1935 +++ b/arch/ia64/mm/hugetlbpage.c
1936 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1937 /* At this point: (!vmm || addr < vmm->vm_end). */
1938 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1939 return -ENOMEM;
1940 - if (!vmm || (addr + len) <= vmm->vm_start)
1941 + if (check_heap_stack_gap(vmm, addr, len))
1942 return addr;
1943 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1944 }
1945 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1946 index 1857766..05cc6a3 100644
1947 --- a/arch/ia64/mm/init.c
1948 +++ b/arch/ia64/mm/init.c
1949 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1950 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1951 vma->vm_end = vma->vm_start + PAGE_SIZE;
1952 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1953 +
1954 +#ifdef CONFIG_PAX_PAGEEXEC
1955 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1956 + vma->vm_flags &= ~VM_EXEC;
1957 +
1958 +#ifdef CONFIG_PAX_MPROTECT
1959 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1960 + vma->vm_flags &= ~VM_MAYEXEC;
1961 +#endif
1962 +
1963 + }
1964 +#endif
1965 +
1966 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1967 down_write(&current->mm->mmap_sem);
1968 if (insert_vm_struct(current->mm, vma)) {
1969 diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
1970 index 98b6849..8046766 100644
1971 --- a/arch/ia64/sn/pci/pci_dma.c
1972 +++ b/arch/ia64/sn/pci/pci_dma.c
1973 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
1974 return ret;
1975 }
1976
1977 -static struct dma_map_ops sn_dma_ops = {
1978 +static const struct dma_map_ops sn_dma_ops = {
1979 .alloc_coherent = sn_dma_alloc_coherent,
1980 .free_coherent = sn_dma_free_coherent,
1981 .map_page = sn_dma_map_page,
1982 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1983 index 82abd15..d95ae5d 100644
1984 --- a/arch/m32r/lib/usercopy.c
1985 +++ b/arch/m32r/lib/usercopy.c
1986 @@ -14,6 +14,9 @@
1987 unsigned long
1988 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1989 {
1990 + if ((long)n < 0)
1991 + return n;
1992 +
1993 prefetch(from);
1994 if (access_ok(VERIFY_WRITE, to, n))
1995 __copy_user(to,from,n);
1996 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1997 unsigned long
1998 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1999 {
2000 + if ((long)n < 0)
2001 + return n;
2002 +
2003 prefetchw(to);
2004 if (access_ok(VERIFY_READ, from, n))
2005 __copy_user_zeroing(to,from,n);
2006 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
2007 index fd7620f..63d73a6 100644
2008 --- a/arch/mips/Kconfig
2009 +++ b/arch/mips/Kconfig
2010 @@ -5,6 +5,7 @@ config MIPS
2011 select HAVE_IDE
2012 select HAVE_OPROFILE
2013 select HAVE_ARCH_KGDB
2014 + select GENERIC_ATOMIC64 if !64BIT
2015 # Horrible source of confusion. Die, die, die ...
2016 select EMBEDDED
2017 select RTC_LIB if !LEMOTE_FULOONG2E
2018 diff --git a/arch/mips/Makefile b/arch/mips/Makefile
2019 index 77f5021..2b1db8a 100644
2020 --- a/arch/mips/Makefile
2021 +++ b/arch/mips/Makefile
2022 @@ -51,6 +51,8 @@ endif
2023 cflags-y := -ffunction-sections
2024 cflags-y += $(call cc-option, -mno-check-zero-division)
2025
2026 +cflags-y += -Wno-sign-compare -Wno-extra
2027 +
2028 ifdef CONFIG_32BIT
2029 ld-emul = $(32bit-emul)
2030 vmlinux-32 = vmlinux
2031 diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
2032 index 632f986..fd0378d 100644
2033 --- a/arch/mips/alchemy/devboards/pm.c
2034 +++ b/arch/mips/alchemy/devboards/pm.c
2035 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
2036
2037 }
2038
2039 -static struct platform_suspend_ops db1x_pm_ops = {
2040 +static const struct platform_suspend_ops db1x_pm_ops = {
2041 .valid = suspend_valid_only_mem,
2042 .begin = db1x_pm_begin,
2043 .enter = db1x_pm_enter,
2044 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2045 index 09e7128..111035b 100644
2046 --- a/arch/mips/include/asm/atomic.h
2047 +++ b/arch/mips/include/asm/atomic.h
2048 @@ -21,6 +21,10 @@
2049 #include <asm/war.h>
2050 #include <asm/system.h>
2051
2052 +#ifdef CONFIG_GENERIC_ATOMIC64
2053 +#include <asm-generic/atomic64.h>
2054 +#endif
2055 +
2056 #define ATOMIC_INIT(i) { (i) }
2057
2058 /*
2059 @@ -782,6 +786,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2060 */
2061 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2062
2063 +#define atomic64_read_unchecked(v) atomic64_read(v)
2064 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2065 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2066 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2067 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2068 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2069 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2070 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2071 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2072 +
2073 #endif /* CONFIG_64BIT */
2074
2075 /*
2076 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2077 index 7990694..4e93acf 100644
2078 --- a/arch/mips/include/asm/elf.h
2079 +++ b/arch/mips/include/asm/elf.h
2080 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
2081 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2082 #endif
2083
2084 +#ifdef CONFIG_PAX_ASLR
2085 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2086 +
2087 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2088 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2089 +#endif
2090 +
2091 #endif /* _ASM_ELF_H */
2092 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2093 index f266295..627cfff 100644
2094 --- a/arch/mips/include/asm/page.h
2095 +++ b/arch/mips/include/asm/page.h
2096 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2097 #ifdef CONFIG_CPU_MIPS32
2098 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2099 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2100 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2101 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2102 #else
2103 typedef struct { unsigned long long pte; } pte_t;
2104 #define pte_val(x) ((x).pte)
2105 diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
2106 index e48c0bf..f3acf65 100644
2107 --- a/arch/mips/include/asm/reboot.h
2108 +++ b/arch/mips/include/asm/reboot.h
2109 @@ -9,7 +9,7 @@
2110 #ifndef _ASM_REBOOT_H
2111 #define _ASM_REBOOT_H
2112
2113 -extern void (*_machine_restart)(char *command);
2114 -extern void (*_machine_halt)(void);
2115 +extern void (*__noreturn _machine_restart)(char *command);
2116 +extern void (*__noreturn _machine_halt)(void);
2117
2118 #endif /* _ASM_REBOOT_H */
2119 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2120 index 83b5509..9fa24a23 100644
2121 --- a/arch/mips/include/asm/system.h
2122 +++ b/arch/mips/include/asm/system.h
2123 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2124 */
2125 #define __ARCH_WANT_UNLOCKED_CTXSW
2126
2127 -extern unsigned long arch_align_stack(unsigned long sp);
2128 +#define arch_align_stack(x) ((x) & ~0xfUL)
2129
2130 #endif /* _ASM_SYSTEM_H */
2131 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2132 index 9fdd8bc..fcf9d68 100644
2133 --- a/arch/mips/kernel/binfmt_elfn32.c
2134 +++ b/arch/mips/kernel/binfmt_elfn32.c
2135 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2136 #undef ELF_ET_DYN_BASE
2137 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2138
2139 +#ifdef CONFIG_PAX_ASLR
2140 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2141 +
2142 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2143 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2144 +#endif
2145 +
2146 #include <asm/processor.h>
2147 #include <linux/module.h>
2148 #include <linux/elfcore.h>
2149 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2150 index ff44823..cf0b48a 100644
2151 --- a/arch/mips/kernel/binfmt_elfo32.c
2152 +++ b/arch/mips/kernel/binfmt_elfo32.c
2153 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2154 #undef ELF_ET_DYN_BASE
2155 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2156
2157 +#ifdef CONFIG_PAX_ASLR
2158 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2159 +
2160 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2161 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2162 +#endif
2163 +
2164 #include <asm/processor.h>
2165
2166 /*
2167 diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2168 index 50c9bb8..efdd5f8 100644
2169 --- a/arch/mips/kernel/kgdb.c
2170 +++ b/arch/mips/kernel/kgdb.c
2171 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2172 return -1;
2173 }
2174
2175 +/* cannot be const */
2176 struct kgdb_arch arch_kgdb_ops;
2177
2178 /*
2179 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2180 index f3d73e1..bb3f57a 100644
2181 --- a/arch/mips/kernel/process.c
2182 +++ b/arch/mips/kernel/process.c
2183 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2184 out:
2185 return pc;
2186 }
2187 -
2188 -/*
2189 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2190 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2191 - */
2192 -unsigned long arch_align_stack(unsigned long sp)
2193 -{
2194 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2195 - sp -= get_random_int() & ~PAGE_MASK;
2196 -
2197 - return sp & ALMASK;
2198 -}
2199 diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2200 index 060563a..7fbf310 100644
2201 --- a/arch/mips/kernel/reset.c
2202 +++ b/arch/mips/kernel/reset.c
2203 @@ -19,8 +19,8 @@
2204 * So handle all using function pointers to machine specific
2205 * functions.
2206 */
2207 -void (*_machine_restart)(char *command);
2208 -void (*_machine_halt)(void);
2209 +void (*__noreturn _machine_restart)(char *command);
2210 +void (*__noreturn _machine_halt)(void);
2211 void (*pm_power_off)(void);
2212
2213 EXPORT_SYMBOL(pm_power_off);
2214 @@ -29,16 +29,19 @@ void machine_restart(char *command)
2215 {
2216 if (_machine_restart)
2217 _machine_restart(command);
2218 + BUG();
2219 }
2220
2221 void machine_halt(void)
2222 {
2223 if (_machine_halt)
2224 _machine_halt();
2225 + BUG();
2226 }
2227
2228 void machine_power_off(void)
2229 {
2230 if (pm_power_off)
2231 pm_power_off();
2232 + BUG();
2233 }
2234 diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2235 index 3f7f466..3abe0b5 100644
2236 --- a/arch/mips/kernel/syscall.c
2237 +++ b/arch/mips/kernel/syscall.c
2238 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2239 do_color_align = 0;
2240 if (filp || (flags & MAP_SHARED))
2241 do_color_align = 1;
2242 +
2243 +#ifdef CONFIG_PAX_RANDMMAP
2244 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2245 +#endif
2246 +
2247 if (addr) {
2248 if (do_color_align)
2249 addr = COLOUR_ALIGN(addr, pgoff);
2250 else
2251 addr = PAGE_ALIGN(addr);
2252 vmm = find_vma(current->mm, addr);
2253 - if (task_size - len >= addr &&
2254 - (!vmm || addr + len <= vmm->vm_start))
2255 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2256 return addr;
2257 }
2258 - addr = TASK_UNMAPPED_BASE;
2259 + addr = current->mm->mmap_base;
2260 if (do_color_align)
2261 addr = COLOUR_ALIGN(addr, pgoff);
2262 else
2263 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2264 /* At this point: (!vmm || addr < vmm->vm_end). */
2265 if (task_size - len < addr)
2266 return -ENOMEM;
2267 - if (!vmm || addr + len <= vmm->vm_start)
2268 + if (check_heap_stack_gap(vmm, addr, len))
2269 return addr;
2270 addr = vmm->vm_end;
2271 if (do_color_align)
2272 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2273 index e97a7a2..f18f5b0 100644
2274 --- a/arch/mips/mm/fault.c
2275 +++ b/arch/mips/mm/fault.c
2276 @@ -26,6 +26,23 @@
2277 #include <asm/ptrace.h>
2278 #include <asm/highmem.h> /* For VMALLOC_END */
2279
2280 +#ifdef CONFIG_PAX_PAGEEXEC
2281 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2282 +{
2283 + unsigned long i;
2284 +
2285 + printk(KERN_ERR "PAX: bytes at PC: ");
2286 + for (i = 0; i < 5; i++) {
2287 + unsigned int c;
2288 + if (get_user(c, (unsigned int *)pc+i))
2289 + printk(KERN_CONT "???????? ");
2290 + else
2291 + printk(KERN_CONT "%08x ", c);
2292 + }
2293 + printk("\n");
2294 +}
2295 +#endif
2296 +
2297 /*
2298 * This routine handles page faults. It determines the address,
2299 * and the problem, and then passes it off to one of the appropriate
2300 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
2301 index 8bc9e96..26554f8 100644
2302 --- a/arch/parisc/include/asm/atomic.h
2303 +++ b/arch/parisc/include/asm/atomic.h
2304 @@ -336,6 +336,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2305
2306 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2307
2308 +#define atomic64_read_unchecked(v) atomic64_read(v)
2309 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2310 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2311 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2312 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2313 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2314 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2315 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2316 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2317 +
2318 #else /* CONFIG_64BIT */
2319
2320 #include <asm-generic/atomic64.h>
2321 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2322 index 9c802eb..0592e41 100644
2323 --- a/arch/parisc/include/asm/elf.h
2324 +++ b/arch/parisc/include/asm/elf.h
2325 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
2326
2327 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2328
2329 +#ifdef CONFIG_PAX_ASLR
2330 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
2331 +
2332 +#define PAX_DELTA_MMAP_LEN 16
2333 +#define PAX_DELTA_STACK_LEN 16
2334 +#endif
2335 +
2336 /* This yields a mask that user programs can use to figure out what
2337 instruction set this CPU supports. This could be done in user space,
2338 but it's not easy, and we've already done it here. */
2339 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2340 index a27d2e2..18fd845 100644
2341 --- a/arch/parisc/include/asm/pgtable.h
2342 +++ b/arch/parisc/include/asm/pgtable.h
2343 @@ -207,6 +207,17 @@
2344 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2345 #define PAGE_COPY PAGE_EXECREAD
2346 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
2347 +
2348 +#ifdef CONFIG_PAX_PAGEEXEC
2349 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
2350 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2351 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2352 +#else
2353 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2354 +# define PAGE_COPY_NOEXEC PAGE_COPY
2355 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2356 +#endif
2357 +
2358 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
2359 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
2360 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
2361 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
2362 index 2120746..8d70a5e 100644
2363 --- a/arch/parisc/kernel/module.c
2364 +++ b/arch/parisc/kernel/module.c
2365 @@ -95,16 +95,38 @@
2366
2367 /* three functions to determine where in the module core
2368 * or init pieces the location is */
2369 +static inline int in_init_rx(struct module *me, void *loc)
2370 +{
2371 + return (loc >= me->module_init_rx &&
2372 + loc < (me->module_init_rx + me->init_size_rx));
2373 +}
2374 +
2375 +static inline int in_init_rw(struct module *me, void *loc)
2376 +{
2377 + return (loc >= me->module_init_rw &&
2378 + loc < (me->module_init_rw + me->init_size_rw));
2379 +}
2380 +
2381 static inline int in_init(struct module *me, void *loc)
2382 {
2383 - return (loc >= me->module_init &&
2384 - loc <= (me->module_init + me->init_size));
2385 + return in_init_rx(me, loc) || in_init_rw(me, loc);
2386 +}
2387 +
2388 +static inline int in_core_rx(struct module *me, void *loc)
2389 +{
2390 + return (loc >= me->module_core_rx &&
2391 + loc < (me->module_core_rx + me->core_size_rx));
2392 +}
2393 +
2394 +static inline int in_core_rw(struct module *me, void *loc)
2395 +{
2396 + return (loc >= me->module_core_rw &&
2397 + loc < (me->module_core_rw + me->core_size_rw));
2398 }
2399
2400 static inline int in_core(struct module *me, void *loc)
2401 {
2402 - return (loc >= me->module_core &&
2403 - loc <= (me->module_core + me->core_size));
2404 + return in_core_rx(me, loc) || in_core_rw(me, loc);
2405 }
2406
2407 static inline int in_local(struct module *me, void *loc)
2408 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
2409 }
2410
2411 /* align things a bit */
2412 - me->core_size = ALIGN(me->core_size, 16);
2413 - me->arch.got_offset = me->core_size;
2414 - me->core_size += gots * sizeof(struct got_entry);
2415 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
2416 + me->arch.got_offset = me->core_size_rw;
2417 + me->core_size_rw += gots * sizeof(struct got_entry);
2418
2419 - me->core_size = ALIGN(me->core_size, 16);
2420 - me->arch.fdesc_offset = me->core_size;
2421 - me->core_size += fdescs * sizeof(Elf_Fdesc);
2422 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
2423 + me->arch.fdesc_offset = me->core_size_rw;
2424 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
2425
2426 me->arch.got_max = gots;
2427 me->arch.fdesc_max = fdescs;
2428 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2429
2430 BUG_ON(value == 0);
2431
2432 - got = me->module_core + me->arch.got_offset;
2433 + got = me->module_core_rw + me->arch.got_offset;
2434 for (i = 0; got[i].addr; i++)
2435 if (got[i].addr == value)
2436 goto out;
2437 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2438 #ifdef CONFIG_64BIT
2439 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2440 {
2441 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
2442 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
2443
2444 if (!value) {
2445 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2446 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2447
2448 /* Create new one */
2449 fdesc->addr = value;
2450 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2451 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2452 return (Elf_Addr)fdesc;
2453 }
2454 #endif /* CONFIG_64BIT */
2455 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
2456
2457 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2458 end = table + sechdrs[me->arch.unwind_section].sh_size;
2459 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2460 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2461
2462 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2463 me->arch.unwind_section, table, end, gp);
2464 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2465 index 9147391..f3d949a 100644
2466 --- a/arch/parisc/kernel/sys_parisc.c
2467 +++ b/arch/parisc/kernel/sys_parisc.c
2468 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2469 /* At this point: (!vma || addr < vma->vm_end). */
2470 if (TASK_SIZE - len < addr)
2471 return -ENOMEM;
2472 - if (!vma || addr + len <= vma->vm_start)
2473 + if (check_heap_stack_gap(vma, addr, len))
2474 return addr;
2475 addr = vma->vm_end;
2476 }
2477 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2478 /* At this point: (!vma || addr < vma->vm_end). */
2479 if (TASK_SIZE - len < addr)
2480 return -ENOMEM;
2481 - if (!vma || addr + len <= vma->vm_start)
2482 + if (check_heap_stack_gap(vma, addr, len))
2483 return addr;
2484 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2485 if (addr < vma->vm_end) /* handle wraparound */
2486 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2487 if (flags & MAP_FIXED)
2488 return addr;
2489 if (!addr)
2490 - addr = TASK_UNMAPPED_BASE;
2491 + addr = current->mm->mmap_base;
2492
2493 if (filp) {
2494 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2495 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2496 index 8b58bf0..7afff03 100644
2497 --- a/arch/parisc/kernel/traps.c
2498 +++ b/arch/parisc/kernel/traps.c
2499 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2500
2501 down_read(&current->mm->mmap_sem);
2502 vma = find_vma(current->mm,regs->iaoq[0]);
2503 - if (vma && (regs->iaoq[0] >= vma->vm_start)
2504 - && (vma->vm_flags & VM_EXEC)) {
2505 -
2506 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2507 fault_address = regs->iaoq[0];
2508 fault_space = regs->iasq[0];
2509
2510 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2511 index c6afbfc..c5839f6 100644
2512 --- a/arch/parisc/mm/fault.c
2513 +++ b/arch/parisc/mm/fault.c
2514 @@ -15,6 +15,7 @@
2515 #include <linux/sched.h>
2516 #include <linux/interrupt.h>
2517 #include <linux/module.h>
2518 +#include <linux/unistd.h>
2519
2520 #include <asm/uaccess.h>
2521 #include <asm/traps.h>
2522 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2523 static unsigned long
2524 parisc_acctyp(unsigned long code, unsigned int inst)
2525 {
2526 - if (code == 6 || code == 16)
2527 + if (code == 6 || code == 7 || code == 16)
2528 return VM_EXEC;
2529
2530 switch (inst & 0xf0000000) {
2531 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2532 }
2533 #endif
2534
2535 +#ifdef CONFIG_PAX_PAGEEXEC
2536 +/*
2537 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2538 + *
2539 + * returns 1 when task should be killed
2540 + * 2 when rt_sigreturn trampoline was detected
2541 + * 3 when unpatched PLT trampoline was detected
2542 + */
2543 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2544 +{
2545 +
2546 +#ifdef CONFIG_PAX_EMUPLT
2547 + int err;
2548 +
2549 + do { /* PaX: unpatched PLT emulation */
2550 + unsigned int bl, depwi;
2551 +
2552 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2553 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2554 +
2555 + if (err)
2556 + break;
2557 +
2558 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2559 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2560 +
2561 + err = get_user(ldw, (unsigned int *)addr);
2562 + err |= get_user(bv, (unsigned int *)(addr+4));
2563 + err |= get_user(ldw2, (unsigned int *)(addr+8));
2564 +
2565 + if (err)
2566 + break;
2567 +
2568 + if (ldw == 0x0E801096U &&
2569 + bv == 0xEAC0C000U &&
2570 + ldw2 == 0x0E881095U)
2571 + {
2572 + unsigned int resolver, map;
2573 +
2574 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2575 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2576 + if (err)
2577 + break;
2578 +
2579 + regs->gr[20] = instruction_pointer(regs)+8;
2580 + regs->gr[21] = map;
2581 + regs->gr[22] = resolver;
2582 + regs->iaoq[0] = resolver | 3UL;
2583 + regs->iaoq[1] = regs->iaoq[0] + 4;
2584 + return 3;
2585 + }
2586 + }
2587 + } while (0);
2588 +#endif
2589 +
2590 +#ifdef CONFIG_PAX_EMUTRAMP
2591 +
2592 +#ifndef CONFIG_PAX_EMUSIGRT
2593 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2594 + return 1;
2595 +#endif
2596 +
2597 + do { /* PaX: rt_sigreturn emulation */
2598 + unsigned int ldi1, ldi2, bel, nop;
2599 +
2600 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2601 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2602 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2603 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2604 +
2605 + if (err)
2606 + break;
2607 +
2608 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2609 + ldi2 == 0x3414015AU &&
2610 + bel == 0xE4008200U &&
2611 + nop == 0x08000240U)
2612 + {
2613 + regs->gr[25] = (ldi1 & 2) >> 1;
2614 + regs->gr[20] = __NR_rt_sigreturn;
2615 + regs->gr[31] = regs->iaoq[1] + 16;
2616 + regs->sr[0] = regs->iasq[1];
2617 + regs->iaoq[0] = 0x100UL;
2618 + regs->iaoq[1] = regs->iaoq[0] + 4;
2619 + regs->iasq[0] = regs->sr[2];
2620 + regs->iasq[1] = regs->sr[2];
2621 + return 2;
2622 + }
2623 + } while (0);
2624 +#endif
2625 +
2626 + return 1;
2627 +}
2628 +
2629 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2630 +{
2631 + unsigned long i;
2632 +
2633 + printk(KERN_ERR "PAX: bytes at PC: ");
2634 + for (i = 0; i < 5; i++) {
2635 + unsigned int c;
2636 + if (get_user(c, (unsigned int *)pc+i))
2637 + printk(KERN_CONT "???????? ");
2638 + else
2639 + printk(KERN_CONT "%08x ", c);
2640 + }
2641 + printk("\n");
2642 +}
2643 +#endif
2644 +
2645 int fixup_exception(struct pt_regs *regs)
2646 {
2647 const struct exception_table_entry *fix;
2648 @@ -192,8 +303,33 @@ good_area:
2649
2650 acc_type = parisc_acctyp(code,regs->iir);
2651
2652 - if ((vma->vm_flags & acc_type) != acc_type)
2653 + if ((vma->vm_flags & acc_type) != acc_type) {
2654 +
2655 +#ifdef CONFIG_PAX_PAGEEXEC
2656 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2657 + (address & ~3UL) == instruction_pointer(regs))
2658 + {
2659 + up_read(&mm->mmap_sem);
2660 + switch (pax_handle_fetch_fault(regs)) {
2661 +
2662 +#ifdef CONFIG_PAX_EMUPLT
2663 + case 3:
2664 + return;
2665 +#endif
2666 +
2667 +#ifdef CONFIG_PAX_EMUTRAMP
2668 + case 2:
2669 + return;
2670 +#endif
2671 +
2672 + }
2673 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2674 + do_group_exit(SIGKILL);
2675 + }
2676 +#endif
2677 +
2678 goto bad_area;
2679 + }
2680
2681 /*
2682 * If for any reason at all we couldn't handle the fault, make
2683 diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
2684 index c107b74..409dc0f 100644
2685 --- a/arch/powerpc/Makefile
2686 +++ b/arch/powerpc/Makefile
2687 @@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2688 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2689 CPP = $(CC) -E $(KBUILD_CFLAGS)
2690
2691 +cflags-y += -Wno-sign-compare -Wno-extra
2692 +
2693 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2694
2695 ifeq ($(CONFIG_PPC64),y)
2696 diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
2697 index 6d94d27..50d4cad 100644
2698 --- a/arch/powerpc/include/asm/device.h
2699 +++ b/arch/powerpc/include/asm/device.h
2700 @@ -14,7 +14,7 @@ struct dev_archdata {
2701 struct device_node *of_node;
2702
2703 /* DMA operations on that device */
2704 - struct dma_map_ops *dma_ops;
2705 + const struct dma_map_ops *dma_ops;
2706
2707 /*
2708 * When an iommu is in use, dma_data is used as a ptr to the base of the
2709 diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
2710 index e281dae..2b8a784 100644
2711 --- a/arch/powerpc/include/asm/dma-mapping.h
2712 +++ b/arch/powerpc/include/asm/dma-mapping.h
2713 @@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
2714 #ifdef CONFIG_PPC64
2715 extern struct dma_map_ops dma_iommu_ops;
2716 #endif
2717 -extern struct dma_map_ops dma_direct_ops;
2718 +extern const struct dma_map_ops dma_direct_ops;
2719
2720 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2721 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2722 {
2723 /* We don't handle the NULL dev case for ISA for now. We could
2724 * do it via an out of line call but it is not needed for now. The
2725 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2726 return dev->archdata.dma_ops;
2727 }
2728
2729 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2730 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2731 {
2732 dev->archdata.dma_ops = ops;
2733 }
2734 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
2735
2736 static inline int dma_supported(struct device *dev, u64 mask)
2737 {
2738 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2739 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2740
2741 if (unlikely(dma_ops == NULL))
2742 return 0;
2743 @@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
2744
2745 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2746 {
2747 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2748 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2749
2750 if (unlikely(dma_ops == NULL))
2751 return -EIO;
2752 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2753 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2754 dma_addr_t *dma_handle, gfp_t flag)
2755 {
2756 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2757 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2758 void *cpu_addr;
2759
2760 BUG_ON(!dma_ops);
2761 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2762 static inline void dma_free_coherent(struct device *dev, size_t size,
2763 void *cpu_addr, dma_addr_t dma_handle)
2764 {
2765 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2766 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2767
2768 BUG_ON(!dma_ops);
2769
2770 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2771
2772 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2773 {
2774 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2775 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2776
2777 if (dma_ops->mapping_error)
2778 return dma_ops->mapping_error(dev, dma_addr);
2779 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2780 index 5698502..5db093c 100644
2781 --- a/arch/powerpc/include/asm/elf.h
2782 +++ b/arch/powerpc/include/asm/elf.h
2783 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2784 the loader. We need to make sure that it is out of the way of the program
2785 that it will "exec", and that there is sufficient room for the brk. */
2786
2787 -extern unsigned long randomize_et_dyn(unsigned long base);
2788 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2789 +#define ELF_ET_DYN_BASE (0x20000000)
2790 +
2791 +#ifdef CONFIG_PAX_ASLR
2792 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2793 +
2794 +#ifdef __powerpc64__
2795 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2796 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2797 +#else
2798 +#define PAX_DELTA_MMAP_LEN 15
2799 +#define PAX_DELTA_STACK_LEN 15
2800 +#endif
2801 +#endif
2802
2803 /*
2804 * Our registers are always unsigned longs, whether we're a 32 bit
2805 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2806 (0x7ff >> (PAGE_SHIFT - 12)) : \
2807 (0x3ffff >> (PAGE_SHIFT - 12)))
2808
2809 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2810 -#define arch_randomize_brk arch_randomize_brk
2811 -
2812 #endif /* __KERNEL__ */
2813
2814 /*
2815 diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
2816 index edfc980..1766f59 100644
2817 --- a/arch/powerpc/include/asm/iommu.h
2818 +++ b/arch/powerpc/include/asm/iommu.h
2819 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
2820 extern void iommu_init_early_dart(void);
2821 extern void iommu_init_early_pasemi(void);
2822
2823 +/* dma-iommu.c */
2824 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2825 +
2826 #ifdef CONFIG_PCI
2827 extern void pci_iommu_init(void);
2828 extern void pci_direct_iommu_init(void);
2829 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2830 index 9163695..5a00112 100644
2831 --- a/arch/powerpc/include/asm/kmap_types.h
2832 +++ b/arch/powerpc/include/asm/kmap_types.h
2833 @@ -26,6 +26,7 @@ enum km_type {
2834 KM_SOFTIRQ1,
2835 KM_PPC_SYNC_PAGE,
2836 KM_PPC_SYNC_ICACHE,
2837 + KM_CLEARPAGE,
2838 KM_TYPE_NR
2839 };
2840
2841 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2842 index ff24254..fe45b21 100644
2843 --- a/arch/powerpc/include/asm/page.h
2844 +++ b/arch/powerpc/include/asm/page.h
2845 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2846 * and needs to be executable. This means the whole heap ends
2847 * up being executable.
2848 */
2849 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2850 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2851 +#define VM_DATA_DEFAULT_FLAGS32 \
2852 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2853 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2854
2855 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2856 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2857 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2858 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2859 #endif
2860
2861 +#define ktla_ktva(addr) (addr)
2862 +#define ktva_ktla(addr) (addr)
2863 +
2864 #ifndef __ASSEMBLY__
2865
2866 #undef STRICT_MM_TYPECHECKS
2867 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2868 index 3f17b83..1f9e766 100644
2869 --- a/arch/powerpc/include/asm/page_64.h
2870 +++ b/arch/powerpc/include/asm/page_64.h
2871 @@ -180,15 +180,18 @@ do { \
2872 * stack by default, so in the absense of a PT_GNU_STACK program header
2873 * we turn execute permission off.
2874 */
2875 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2876 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2877 +#define VM_STACK_DEFAULT_FLAGS32 \
2878 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2879 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2880
2881 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2882 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2883
2884 +#ifndef CONFIG_PAX_PAGEEXEC
2885 #define VM_STACK_DEFAULT_FLAGS \
2886 (test_thread_flag(TIF_32BIT) ? \
2887 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2888 +#endif
2889
2890 #include <asm-generic/getorder.h>
2891
2892 diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
2893 index b5ea626..40308222 100644
2894 --- a/arch/powerpc/include/asm/pci.h
2895 +++ b/arch/powerpc/include/asm/pci.h
2896 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
2897 }
2898
2899 #ifdef CONFIG_PCI
2900 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2901 -extern struct dma_map_ops *get_pci_dma_ops(void);
2902 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2903 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2904 #else /* CONFIG_PCI */
2905 #define set_pci_dma_ops(d)
2906 #define get_pci_dma_ops() NULL
2907 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2908 index 2a5da06..d65bea2 100644
2909 --- a/arch/powerpc/include/asm/pgtable.h
2910 +++ b/arch/powerpc/include/asm/pgtable.h
2911 @@ -2,6 +2,7 @@
2912 #define _ASM_POWERPC_PGTABLE_H
2913 #ifdef __KERNEL__
2914
2915 +#include <linux/const.h>
2916 #ifndef __ASSEMBLY__
2917 #include <asm/processor.h> /* For TASK_SIZE */
2918 #include <asm/mmu.h>
2919 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2920 index 4aad413..85d86bf 100644
2921 --- a/arch/powerpc/include/asm/pte-hash32.h
2922 +++ b/arch/powerpc/include/asm/pte-hash32.h
2923 @@ -21,6 +21,7 @@
2924 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2925 #define _PAGE_USER 0x004 /* usermode access allowed */
2926 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2927 +#define _PAGE_EXEC _PAGE_GUARDED
2928 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2929 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2930 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2931 diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
2932 index 8c34149..78f425a 100644
2933 --- a/arch/powerpc/include/asm/ptrace.h
2934 +++ b/arch/powerpc/include/asm/ptrace.h
2935 @@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
2936 } while(0)
2937
2938 struct task_struct;
2939 -extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2940 +extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2941 extern int ptrace_put_reg(struct task_struct *task, int regno,
2942 unsigned long data);
2943
2944 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2945 index 32a7c30..be3a8bb 100644
2946 --- a/arch/powerpc/include/asm/reg.h
2947 +++ b/arch/powerpc/include/asm/reg.h
2948 @@ -191,6 +191,7 @@
2949 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2950 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2951 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2952 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2953 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2954 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2955 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2956 diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
2957 index 8979d4c..d2fd0d3 100644
2958 --- a/arch/powerpc/include/asm/swiotlb.h
2959 +++ b/arch/powerpc/include/asm/swiotlb.h
2960 @@ -13,7 +13,7 @@
2961
2962 #include <linux/swiotlb.h>
2963
2964 -extern struct dma_map_ops swiotlb_dma_ops;
2965 +extern const struct dma_map_ops swiotlb_dma_ops;
2966
2967 static inline void dma_mark_clean(void *addr, size_t size) {}
2968
2969 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2970 index 094a12a..877a60a 100644
2971 --- a/arch/powerpc/include/asm/system.h
2972 +++ b/arch/powerpc/include/asm/system.h
2973 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2974 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2975 #endif
2976
2977 -extern unsigned long arch_align_stack(unsigned long sp);
2978 +#define arch_align_stack(x) ((x) & ~0xfUL)
2979
2980 /* Used in very early kernel initialization. */
2981 extern unsigned long reloc_offset(void);
2982 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2983 index bd0fb84..a42a14b 100644
2984 --- a/arch/powerpc/include/asm/uaccess.h
2985 +++ b/arch/powerpc/include/asm/uaccess.h
2986 @@ -13,6 +13,8 @@
2987 #define VERIFY_READ 0
2988 #define VERIFY_WRITE 1
2989
2990 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2991 +
2992 /*
2993 * The fs value determines whether argument validity checking should be
2994 * performed or not. If get_fs() == USER_DS, checking is performed, with
2995 @@ -327,52 +329,6 @@ do { \
2996 extern unsigned long __copy_tofrom_user(void __user *to,
2997 const void __user *from, unsigned long size);
2998
2999 -#ifndef __powerpc64__
3000 -
3001 -static inline unsigned long copy_from_user(void *to,
3002 - const void __user *from, unsigned long n)
3003 -{
3004 - unsigned long over;
3005 -
3006 - if (access_ok(VERIFY_READ, from, n))
3007 - return __copy_tofrom_user((__force void __user *)to, from, n);
3008 - if ((unsigned long)from < TASK_SIZE) {
3009 - over = (unsigned long)from + n - TASK_SIZE;
3010 - return __copy_tofrom_user((__force void __user *)to, from,
3011 - n - over) + over;
3012 - }
3013 - return n;
3014 -}
3015 -
3016 -static inline unsigned long copy_to_user(void __user *to,
3017 - const void *from, unsigned long n)
3018 -{
3019 - unsigned long over;
3020 -
3021 - if (access_ok(VERIFY_WRITE, to, n))
3022 - return __copy_tofrom_user(to, (__force void __user *)from, n);
3023 - if ((unsigned long)to < TASK_SIZE) {
3024 - over = (unsigned long)to + n - TASK_SIZE;
3025 - return __copy_tofrom_user(to, (__force void __user *)from,
3026 - n - over) + over;
3027 - }
3028 - return n;
3029 -}
3030 -
3031 -#else /* __powerpc64__ */
3032 -
3033 -#define __copy_in_user(to, from, size) \
3034 - __copy_tofrom_user((to), (from), (size))
3035 -
3036 -extern unsigned long copy_from_user(void *to, const void __user *from,
3037 - unsigned long n);
3038 -extern unsigned long copy_to_user(void __user *to, const void *from,
3039 - unsigned long n);
3040 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
3041 - unsigned long n);
3042 -
3043 -#endif /* __powerpc64__ */
3044 -
3045 static inline unsigned long __copy_from_user_inatomic(void *to,
3046 const void __user *from, unsigned long n)
3047 {
3048 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3049 if (ret == 0)
3050 return 0;
3051 }
3052 +
3053 + if (!__builtin_constant_p(n))
3054 + check_object_size(to, n, false);
3055 +
3056 return __copy_tofrom_user((__force void __user *)to, from, n);
3057 }
3058
3059 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3060 if (ret == 0)
3061 return 0;
3062 }
3063 +
3064 + if (!__builtin_constant_p(n))
3065 + check_object_size(from, n, true);
3066 +
3067 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3068 }
3069
3070 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3071 return __copy_to_user_inatomic(to, from, size);
3072 }
3073
3074 +#ifndef __powerpc64__
3075 +
3076 +static inline unsigned long __must_check copy_from_user(void *to,
3077 + const void __user *from, unsigned long n)
3078 +{
3079 + unsigned long over;
3080 +
3081 + if ((long)n < 0)
3082 + return n;
3083 +
3084 + if (access_ok(VERIFY_READ, from, n)) {
3085 + if (!__builtin_constant_p(n))
3086 + check_object_size(to, n, false);
3087 + return __copy_tofrom_user((__force void __user *)to, from, n);
3088 + }
3089 + if ((unsigned long)from < TASK_SIZE) {
3090 + over = (unsigned long)from + n - TASK_SIZE;
3091 + if (!__builtin_constant_p(n - over))
3092 + check_object_size(to, n - over, false);
3093 + return __copy_tofrom_user((__force void __user *)to, from,
3094 + n - over) + over;
3095 + }
3096 + return n;
3097 +}
3098 +
3099 +static inline unsigned long __must_check copy_to_user(void __user *to,
3100 + const void *from, unsigned long n)
3101 +{
3102 + unsigned long over;
3103 +
3104 + if ((long)n < 0)
3105 + return n;
3106 +
3107 + if (access_ok(VERIFY_WRITE, to, n)) {
3108 + if (!__builtin_constant_p(n))
3109 + check_object_size(from, n, true);
3110 + return __copy_tofrom_user(to, (__force void __user *)from, n);
3111 + }
3112 + if ((unsigned long)to < TASK_SIZE) {
3113 + over = (unsigned long)to + n - TASK_SIZE;
3114 + if (!__builtin_constant_p(n))
3115 + check_object_size(from, n - over, true);
3116 + return __copy_tofrom_user(to, (__force void __user *)from,
3117 + n - over) + over;
3118 + }
3119 + return n;
3120 +}
3121 +
3122 +#else /* __powerpc64__ */
3123 +
3124 +#define __copy_in_user(to, from, size) \
3125 + __copy_tofrom_user((to), (from), (size))
3126 +
3127 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3128 +{
3129 + if ((long)n < 0 || n > INT_MAX)
3130 + return n;
3131 +
3132 + if (!__builtin_constant_p(n))
3133 + check_object_size(to, n, false);
3134 +
3135 + if (likely(access_ok(VERIFY_READ, from, n)))
3136 + n = __copy_from_user(to, from, n);
3137 + else
3138 + memset(to, 0, n);
3139 + return n;
3140 +}
3141 +
3142 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3143 +{
3144 + if ((long)n < 0 || n > INT_MAX)
3145 + return n;
3146 +
3147 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
3148 + if (!__builtin_constant_p(n))
3149 + check_object_size(from, n, true);
3150 + n = __copy_to_user(to, from, n);
3151 + }
3152 + return n;
3153 +}
3154 +
3155 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
3156 + unsigned long n);
3157 +
3158 +#endif /* __powerpc64__ */
3159 +
3160 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3161
3162 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3163 diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
3164 index bb37b1d..01fe9ce 100644
3165 --- a/arch/powerpc/kernel/cacheinfo.c
3166 +++ b/arch/powerpc/kernel/cacheinfo.c
3167 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3168 &cache_assoc_attr,
3169 };
3170
3171 -static struct sysfs_ops cache_index_ops = {
3172 +static const struct sysfs_ops cache_index_ops = {
3173 .show = cache_index_show,
3174 };
3175
3176 diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3177 index 37771a5..648530c 100644
3178 --- a/arch/powerpc/kernel/dma-iommu.c
3179 +++ b/arch/powerpc/kernel/dma-iommu.c
3180 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3181 }
3182
3183 /* We support DMA to/from any memory page via the iommu */
3184 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3185 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
3186 {
3187 struct iommu_table *tbl = get_iommu_table_base(dev);
3188
3189 diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3190 index e96cbbd..bdd6d41 100644
3191 --- a/arch/powerpc/kernel/dma-swiotlb.c
3192 +++ b/arch/powerpc/kernel/dma-swiotlb.c
3193 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3194 * map_page, and unmap_page on highmem, use normal dma_ops
3195 * for everything else.
3196 */
3197 -struct dma_map_ops swiotlb_dma_ops = {
3198 +const struct dma_map_ops swiotlb_dma_ops = {
3199 .alloc_coherent = dma_direct_alloc_coherent,
3200 .free_coherent = dma_direct_free_coherent,
3201 .map_sg = swiotlb_map_sg_attrs,
3202 diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3203 index 6215062..ebea59c 100644
3204 --- a/arch/powerpc/kernel/dma.c
3205 +++ b/arch/powerpc/kernel/dma.c
3206 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3207 }
3208 #endif
3209
3210 -struct dma_map_ops dma_direct_ops = {
3211 +const struct dma_map_ops dma_direct_ops = {
3212 .alloc_coherent = dma_direct_alloc_coherent,
3213 .free_coherent = dma_direct_free_coherent,
3214 .map_sg = dma_direct_map_sg,
3215 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3216 index 24dcc0e..a300455 100644
3217 --- a/arch/powerpc/kernel/exceptions-64e.S
3218 +++ b/arch/powerpc/kernel/exceptions-64e.S
3219 @@ -455,6 +455,7 @@ storage_fault_common:
3220 std r14,_DAR(r1)
3221 std r15,_DSISR(r1)
3222 addi r3,r1,STACK_FRAME_OVERHEAD
3223 + bl .save_nvgprs
3224 mr r4,r14
3225 mr r5,r15
3226 ld r14,PACA_EXGEN+EX_R14(r13)
3227 @@ -464,8 +465,7 @@ storage_fault_common:
3228 cmpdi r3,0
3229 bne- 1f
3230 b .ret_from_except_lite
3231 -1: bl .save_nvgprs
3232 - mr r5,r3
3233 +1: mr r5,r3
3234 addi r3,r1,STACK_FRAME_OVERHEAD
3235 ld r4,_DAR(r1)
3236 bl .bad_page_fault
3237 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3238 index 1808876..9fd206a 100644
3239 --- a/arch/powerpc/kernel/exceptions-64s.S
3240 +++ b/arch/powerpc/kernel/exceptions-64s.S
3241 @@ -818,10 +818,10 @@ handle_page_fault:
3242 11: ld r4,_DAR(r1)
3243 ld r5,_DSISR(r1)
3244 addi r3,r1,STACK_FRAME_OVERHEAD
3245 + bl .save_nvgprs
3246 bl .do_page_fault
3247 cmpdi r3,0
3248 beq+ 13f
3249 - bl .save_nvgprs
3250 mr r5,r3
3251 addi r3,r1,STACK_FRAME_OVERHEAD
3252 lwz r4,_DAR(r1)
3253 diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
3254 index a4c8b38..1b09ad9 100644
3255 --- a/arch/powerpc/kernel/ibmebus.c
3256 +++ b/arch/powerpc/kernel/ibmebus.c
3257 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
3258 return 1;
3259 }
3260
3261 -static struct dma_map_ops ibmebus_dma_ops = {
3262 +static const struct dma_map_ops ibmebus_dma_ops = {
3263 .alloc_coherent = ibmebus_alloc_coherent,
3264 .free_coherent = ibmebus_free_coherent,
3265 .map_sg = ibmebus_map_sg,
3266 diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
3267 index 641c74b..8339ad7 100644
3268 --- a/arch/powerpc/kernel/kgdb.c
3269 +++ b/arch/powerpc/kernel/kgdb.c
3270 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
3271 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
3272 return 0;
3273
3274 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3275 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3276 regs->nip += 4;
3277
3278 return 1;
3279 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
3280 /*
3281 * Global data
3282 */
3283 -struct kgdb_arch arch_kgdb_ops = {
3284 +const struct kgdb_arch arch_kgdb_ops = {
3285 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
3286 };
3287
3288 diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
3289 index 477c663..4f50234 100644
3290 --- a/arch/powerpc/kernel/module.c
3291 +++ b/arch/powerpc/kernel/module.c
3292 @@ -31,11 +31,24 @@
3293
3294 LIST_HEAD(module_bug_list);
3295
3296 +#ifdef CONFIG_PAX_KERNEXEC
3297 void *module_alloc(unsigned long size)
3298 {
3299 if (size == 0)
3300 return NULL;
3301
3302 + return vmalloc(size);
3303 +}
3304 +
3305 +void *module_alloc_exec(unsigned long size)
3306 +#else
3307 +void *module_alloc(unsigned long size)
3308 +#endif
3309 +
3310 +{
3311 + if (size == 0)
3312 + return NULL;
3313 +
3314 return vmalloc_exec(size);
3315 }
3316
3317 @@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
3318 vfree(module_region);
3319 }
3320
3321 +#ifdef CONFIG_PAX_KERNEXEC
3322 +void module_free_exec(struct module *mod, void *module_region)
3323 +{
3324 + module_free(mod, module_region);
3325 +}
3326 +#endif
3327 +
3328 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
3329 const Elf_Shdr *sechdrs,
3330 const char *name)
3331 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3332 index f832773..0507238 100644
3333 --- a/arch/powerpc/kernel/module_32.c
3334 +++ b/arch/powerpc/kernel/module_32.c
3335 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3336 me->arch.core_plt_section = i;
3337 }
3338 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3339 - printk("Module doesn't contain .plt or .init.plt sections.\n");
3340 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3341 return -ENOEXEC;
3342 }
3343
3344 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
3345
3346 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3347 /* Init, or core PLT? */
3348 - if (location >= mod->module_core
3349 - && location < mod->module_core + mod->core_size)
3350 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3351 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3352 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3353 - else
3354 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3355 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3356 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3357 + else {
3358 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3359 + return ~0UL;
3360 + }
3361
3362 /* Find this entry, or if that fails, the next avail. entry */
3363 while (entry->jump[0]) {
3364 diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
3365 index cadbed6..b9bbb00 100644
3366 --- a/arch/powerpc/kernel/pci-common.c
3367 +++ b/arch/powerpc/kernel/pci-common.c
3368 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
3369 unsigned int ppc_pci_flags = 0;
3370
3371
3372 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3373 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3374
3375 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
3376 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
3377 {
3378 pci_dma_ops = dma_ops;
3379 }
3380
3381 -struct dma_map_ops *get_pci_dma_ops(void)
3382 +const struct dma_map_ops *get_pci_dma_ops(void)
3383 {
3384 return pci_dma_ops;
3385 }
3386 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3387 index 7b816da..8d5c277 100644
3388 --- a/arch/powerpc/kernel/process.c
3389 +++ b/arch/powerpc/kernel/process.c
3390 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
3391 * Lookup NIP late so we have the best change of getting the
3392 * above info out without failing
3393 */
3394 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3395 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3396 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3397 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3398 #endif
3399 show_stack(current, (unsigned long *) regs->gpr[1]);
3400 if (!user_mode(regs))
3401 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3402 newsp = stack[0];
3403 ip = stack[STACK_FRAME_LR_SAVE];
3404 if (!firstframe || ip != lr) {
3405 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3406 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3407 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3408 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3409 - printk(" (%pS)",
3410 + printk(" (%pA)",
3411 (void *)current->ret_stack[curr_frame].ret);
3412 curr_frame--;
3413 }
3414 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3415 struct pt_regs *regs = (struct pt_regs *)
3416 (sp + STACK_FRAME_OVERHEAD);
3417 lr = regs->link;
3418 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
3419 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
3420 regs->trap, (void *)regs->nip, (void *)lr);
3421 firstframe = 1;
3422 }
3423 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
3424 }
3425
3426 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3427 -
3428 -unsigned long arch_align_stack(unsigned long sp)
3429 -{
3430 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3431 - sp -= get_random_int() & ~PAGE_MASK;
3432 - return sp & ~0xf;
3433 -}
3434 -
3435 -static inline unsigned long brk_rnd(void)
3436 -{
3437 - unsigned long rnd = 0;
3438 -
3439 - /* 8MB for 32bit, 1GB for 64bit */
3440 - if (is_32bit_task())
3441 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3442 - else
3443 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3444 -
3445 - return rnd << PAGE_SHIFT;
3446 -}
3447 -
3448 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3449 -{
3450 - unsigned long base = mm->brk;
3451 - unsigned long ret;
3452 -
3453 -#ifdef CONFIG_PPC_STD_MMU_64
3454 - /*
3455 - * If we are using 1TB segments and we are allowed to randomise
3456 - * the heap, we can put it above 1TB so it is backed by a 1TB
3457 - * segment. Otherwise the heap will be in the bottom 1TB
3458 - * which always uses 256MB segments and this may result in a
3459 - * performance penalty.
3460 - */
3461 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3462 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3463 -#endif
3464 -
3465 - ret = PAGE_ALIGN(base + brk_rnd());
3466 -
3467 - if (ret < mm->brk)
3468 - return mm->brk;
3469 -
3470 - return ret;
3471 -}
3472 -
3473 -unsigned long randomize_et_dyn(unsigned long base)
3474 -{
3475 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3476 -
3477 - if (ret < base)
3478 - return base;
3479 -
3480 - return ret;
3481 -}
3482 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
3483 index ef14988..856c4bc 100644
3484 --- a/arch/powerpc/kernel/ptrace.c
3485 +++ b/arch/powerpc/kernel/ptrace.c
3486 @@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
3487 /*
3488 * Get contents of register REGNO in task TASK.
3489 */
3490 -unsigned long ptrace_get_reg(struct task_struct *task, int regno)
3491 +unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
3492 {
3493 if (task->thread.regs == NULL)
3494 return -EIO;
3495 @@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
3496
3497 CHECK_FULL_REGS(child->thread.regs);
3498 if (index < PT_FPR0) {
3499 - tmp = ptrace_get_reg(child, (int) index);
3500 + tmp = ptrace_get_reg(child, index);
3501 } else {
3502 flush_fp_to_thread(child);
3503 tmp = ((unsigned long *)child->thread.fpr)
3504 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3505 index d670429..2bc59b2 100644
3506 --- a/arch/powerpc/kernel/signal_32.c
3507 +++ b/arch/powerpc/kernel/signal_32.c
3508 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3509 /* Save user registers on the stack */
3510 frame = &rt_sf->uc.uc_mcontext;
3511 addr = frame;
3512 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3513 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3514 if (save_user_regs(regs, frame, 0, 1))
3515 goto badframe;
3516 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3517 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3518 index 2fe6fc6..ada0d96 100644
3519 --- a/arch/powerpc/kernel/signal_64.c
3520 +++ b/arch/powerpc/kernel/signal_64.c
3521 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3522 current->thread.fpscr.val = 0;
3523
3524 /* Set up to return from userspace. */
3525 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3526 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3527 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3528 } else {
3529 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3530 diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
3531 index b97c2d6..dd01a6a 100644
3532 --- a/arch/powerpc/kernel/sys_ppc32.c
3533 +++ b/arch/powerpc/kernel/sys_ppc32.c
3534 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
3535 if (oldlenp) {
3536 if (!error) {
3537 if (get_user(oldlen, oldlenp) ||
3538 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
3539 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
3540 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
3541 error = -EFAULT;
3542 }
3543 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
3544 }
3545 return error;
3546 }
3547 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3548 index 6f0ae1a..e4b6a56 100644
3549 --- a/arch/powerpc/kernel/traps.c
3550 +++ b/arch/powerpc/kernel/traps.c
3551 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
3552 static inline void pmac_backlight_unblank(void) { }
3553 #endif
3554
3555 +extern void gr_handle_kernel_exploit(void);
3556 +
3557 int die(const char *str, struct pt_regs *regs, long err)
3558 {
3559 static struct {
3560 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
3561 if (panic_on_oops)
3562 panic("Fatal exception");
3563
3564 + gr_handle_kernel_exploit();
3565 +
3566 oops_exit();
3567 do_exit(err);
3568
3569 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3570 index 137dc22..fe57a79 100644
3571 --- a/arch/powerpc/kernel/vdso.c
3572 +++ b/arch/powerpc/kernel/vdso.c
3573 @@ -36,6 +36,7 @@
3574 #include <asm/firmware.h>
3575 #include <asm/vdso.h>
3576 #include <asm/vdso_datapage.h>
3577 +#include <asm/mman.h>
3578
3579 #include "setup.h"
3580
3581 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3582 vdso_base = VDSO32_MBASE;
3583 #endif
3584
3585 - current->mm->context.vdso_base = 0;
3586 + current->mm->context.vdso_base = ~0UL;
3587
3588 /* vDSO has a problem and was disabled, just don't "enable" it for the
3589 * process
3590 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3591 vdso_base = get_unmapped_area(NULL, vdso_base,
3592 (vdso_pages << PAGE_SHIFT) +
3593 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3594 - 0, 0);
3595 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
3596 if (IS_ERR_VALUE(vdso_base)) {
3597 rc = vdso_base;
3598 goto fail_mmapsem;
3599 diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
3600 index 77f6421..829564a 100644
3601 --- a/arch/powerpc/kernel/vio.c
3602 +++ b/arch/powerpc/kernel/vio.c
3603 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
3604 vio_cmo_dealloc(viodev, alloc_size);
3605 }
3606
3607 -struct dma_map_ops vio_dma_mapping_ops = {
3608 +static const struct dma_map_ops vio_dma_mapping_ops = {
3609 .alloc_coherent = vio_dma_iommu_alloc_coherent,
3610 .free_coherent = vio_dma_iommu_free_coherent,
3611 .map_sg = vio_dma_iommu_map_sg,
3612 .unmap_sg = vio_dma_iommu_unmap_sg,
3613 + .dma_supported = dma_iommu_dma_supported,
3614 .map_page = vio_dma_iommu_map_page,
3615 .unmap_page = vio_dma_iommu_unmap_page,
3616
3617 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
3618
3619 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
3620 {
3621 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
3622 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
3623 }
3624
3625 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3626 index 5eea6f3..5d10396 100644
3627 --- a/arch/powerpc/lib/usercopy_64.c
3628 +++ b/arch/powerpc/lib/usercopy_64.c
3629 @@ -9,22 +9,6 @@
3630 #include <linux/module.h>
3631 #include <asm/uaccess.h>
3632
3633 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3634 -{
3635 - if (likely(access_ok(VERIFY_READ, from, n)))
3636 - n = __copy_from_user(to, from, n);
3637 - else
3638 - memset(to, 0, n);
3639 - return n;
3640 -}
3641 -
3642 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3643 -{
3644 - if (likely(access_ok(VERIFY_WRITE, to, n)))
3645 - n = __copy_to_user(to, from, n);
3646 - return n;
3647 -}
3648 -
3649 unsigned long copy_in_user(void __user *to, const void __user *from,
3650 unsigned long n)
3651 {
3652 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
3653 return n;
3654 }
3655
3656 -EXPORT_SYMBOL(copy_from_user);
3657 -EXPORT_SYMBOL(copy_to_user);
3658 EXPORT_SYMBOL(copy_in_user);
3659
3660 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
3661 index e7dae82..877ce0d 100644
3662 --- a/arch/powerpc/mm/fault.c
3663 +++ b/arch/powerpc/mm/fault.c
3664 @@ -30,6 +30,10 @@
3665 #include <linux/kprobes.h>
3666 #include <linux/kdebug.h>
3667 #include <linux/perf_event.h>
3668 +#include <linux/slab.h>
3669 +#include <linux/pagemap.h>
3670 +#include <linux/compiler.h>
3671 +#include <linux/unistd.h>
3672
3673 #include <asm/firmware.h>
3674 #include <asm/page.h>
3675 @@ -40,6 +44,7 @@
3676 #include <asm/uaccess.h>
3677 #include <asm/tlbflush.h>
3678 #include <asm/siginfo.h>
3679 +#include <asm/ptrace.h>
3680
3681
3682 #ifdef CONFIG_KPROBES
3683 @@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
3684 }
3685 #endif
3686
3687 +#ifdef CONFIG_PAX_PAGEEXEC
3688 +/*
3689 + * PaX: decide what to do with offenders (regs->nip = fault address)
3690 + *
3691 + * returns 1 when task should be killed
3692 + */
3693 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3694 +{
3695 + return 1;
3696 +}
3697 +
3698 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3699 +{
3700 + unsigned long i;
3701 +
3702 + printk(KERN_ERR "PAX: bytes at PC: ");
3703 + for (i = 0; i < 5; i++) {
3704 + unsigned int c;
3705 + if (get_user(c, (unsigned int __user *)pc+i))
3706 + printk(KERN_CONT "???????? ");
3707 + else
3708 + printk(KERN_CONT "%08x ", c);
3709 + }
3710 + printk("\n");
3711 +}
3712 +#endif
3713 +
3714 /*
3715 * Check whether the instruction at regs->nip is a store using
3716 * an update addressing form which will update r1.
3717 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
3718 * indicate errors in DSISR but can validly be set in SRR1.
3719 */
3720 if (trap == 0x400)
3721 - error_code &= 0x48200000;
3722 + error_code &= 0x58200000;
3723 else
3724 is_write = error_code & DSISR_ISSTORE;
3725 #else
3726 @@ -250,7 +282,7 @@ good_area:
3727 * "undefined". Of those that can be set, this is the only
3728 * one which seems bad.
3729 */
3730 - if (error_code & 0x10000000)
3731 + if (error_code & DSISR_GUARDED)
3732 /* Guarded storage error. */
3733 goto bad_area;
3734 #endif /* CONFIG_8xx */
3735 @@ -265,7 +297,7 @@ good_area:
3736 * processors use the same I/D cache coherency mechanism
3737 * as embedded.
3738 */
3739 - if (error_code & DSISR_PROTFAULT)
3740 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3741 goto bad_area;
3742 #endif /* CONFIG_PPC_STD_MMU */
3743
3744 @@ -335,6 +367,23 @@ bad_area:
3745 bad_area_nosemaphore:
3746 /* User mode accesses cause a SIGSEGV */
3747 if (user_mode(regs)) {
3748 +
3749 +#ifdef CONFIG_PAX_PAGEEXEC
3750 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3751 +#ifdef CONFIG_PPC_STD_MMU
3752 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3753 +#else
3754 + if (is_exec && regs->nip == address) {
3755 +#endif
3756 + switch (pax_handle_fetch_fault(regs)) {
3757 + }
3758 +
3759 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3760 + do_group_exit(SIGKILL);
3761 + }
3762 + }
3763 +#endif
3764 +
3765 _exception(SIGSEGV, regs, code, address);
3766 return 0;
3767 }
3768 diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
3769 index 5973631..ad617af 100644
3770 --- a/arch/powerpc/mm/mem.c
3771 +++ b/arch/powerpc/mm/mem.c
3772 @@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
3773 {
3774 unsigned long lmb_next_region_start_pfn,
3775 lmb_region_max_pfn;
3776 - int i;
3777 + unsigned int i;
3778
3779 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3780 lmb_region_max_pfn =
3781 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
3782 index 0d957a4..26d968f 100644
3783 --- a/arch/powerpc/mm/mmap_64.c
3784 +++ b/arch/powerpc/mm/mmap_64.c
3785 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3786 */
3787 if (mmap_is_legacy()) {
3788 mm->mmap_base = TASK_UNMAPPED_BASE;
3789 +
3790 +#ifdef CONFIG_PAX_RANDMMAP
3791 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3792 + mm->mmap_base += mm->delta_mmap;
3793 +#endif
3794 +
3795 mm->get_unmapped_area = arch_get_unmapped_area;
3796 mm->unmap_area = arch_unmap_area;
3797 } else {
3798 mm->mmap_base = mmap_base();
3799 +
3800 +#ifdef CONFIG_PAX_RANDMMAP
3801 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3802 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3803 +#endif
3804 +
3805 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3806 mm->unmap_area = arch_unmap_area_topdown;
3807 }
3808 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3809 index ba51948..23009d9 100644
3810 --- a/arch/powerpc/mm/slice.c
3811 +++ b/arch/powerpc/mm/slice.c
3812 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3813 if ((mm->task_size - len) < addr)
3814 return 0;
3815 vma = find_vma(mm, addr);
3816 - return (!vma || (addr + len) <= vma->vm_start);
3817 + return check_heap_stack_gap(vma, addr, len);
3818 }
3819
3820 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3821 @@ -256,7 +256,7 @@ full_search:
3822 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3823 continue;
3824 }
3825 - if (!vma || addr + len <= vma->vm_start) {
3826 + if (check_heap_stack_gap(vma, addr, len)) {
3827 /*
3828 * Remember the place where we stopped the search:
3829 */
3830 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3831 }
3832 }
3833
3834 - addr = mm->mmap_base;
3835 - while (addr > len) {
3836 + if (mm->mmap_base < len)
3837 + addr = -ENOMEM;
3838 + else
3839 + addr = mm->mmap_base - len;
3840 +
3841 + while (!IS_ERR_VALUE(addr)) {
3842 /* Go down by chunk size */
3843 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3844 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3845
3846 /* Check for hit with different page size */
3847 mask = slice_range_to_mask(addr, len);
3848 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3849 * return with success:
3850 */
3851 vma = find_vma(mm, addr);
3852 - if (!vma || (addr + len) <= vma->vm_start) {
3853 + if (check_heap_stack_gap(vma, addr, len)) {
3854 /* remember the address as a hint for next time */
3855 if (use_cache)
3856 mm->free_area_cache = addr;
3857 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3858 mm->cached_hole_size = vma->vm_start - addr;
3859
3860 /* try just below the current vma->vm_start */
3861 - addr = vma->vm_start;
3862 + addr = skip_heap_stack_gap(vma, len);
3863 }
3864
3865 /*
3866 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3867 if (fixed && addr > (mm->task_size - len))
3868 return -EINVAL;
3869
3870 +#ifdef CONFIG_PAX_RANDMMAP
3871 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3872 + addr = 0;
3873 +#endif
3874 +
3875 /* If hint, make sure it matches our alignment restrictions */
3876 if (!fixed && addr) {
3877 addr = _ALIGN_UP(addr, 1ul << pshift);
3878 diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
3879 index b5c753d..8f01abe 100644
3880 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c
3881 +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
3882 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3883 lite5200_pm_target_state = PM_SUSPEND_ON;
3884 }
3885
3886 -static struct platform_suspend_ops lite5200_pm_ops = {
3887 +static const struct platform_suspend_ops lite5200_pm_ops = {
3888 .valid = lite5200_pm_valid,
3889 .begin = lite5200_pm_begin,
3890 .prepare = lite5200_pm_prepare,
3891 diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3892 index a55b0b6..478c18e 100644
3893 --- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3894 +++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3895 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3896 iounmap(mbar);
3897 }
3898
3899 -static struct platform_suspend_ops mpc52xx_pm_ops = {
3900 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
3901 .valid = mpc52xx_pm_valid,
3902 .prepare = mpc52xx_pm_prepare,
3903 .enter = mpc52xx_pm_enter,
3904 diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
3905 index 08e65fc..643d3ac 100644
3906 --- a/arch/powerpc/platforms/83xx/suspend.c
3907 +++ b/arch/powerpc/platforms/83xx/suspend.c
3908 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3909 return ret;
3910 }
3911
3912 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
3913 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3914 .valid = mpc83xx_suspend_valid,
3915 .begin = mpc83xx_suspend_begin,
3916 .enter = mpc83xx_suspend_enter,
3917 diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
3918 index ca5bfdf..1602e09 100644
3919 --- a/arch/powerpc/platforms/cell/iommu.c
3920 +++ b/arch/powerpc/platforms/cell/iommu.c
3921 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
3922
3923 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3924
3925 -struct dma_map_ops dma_iommu_fixed_ops = {
3926 +const struct dma_map_ops dma_iommu_fixed_ops = {
3927 .alloc_coherent = dma_fixed_alloc_coherent,
3928 .free_coherent = dma_fixed_free_coherent,
3929 .map_sg = dma_fixed_map_sg,
3930 diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
3931 index e34b305..20e48ec 100644
3932 --- a/arch/powerpc/platforms/ps3/system-bus.c
3933 +++ b/arch/powerpc/platforms/ps3/system-bus.c
3934 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
3935 return mask >= DMA_BIT_MASK(32);
3936 }
3937
3938 -static struct dma_map_ops ps3_sb_dma_ops = {
3939 +static const struct dma_map_ops ps3_sb_dma_ops = {
3940 .alloc_coherent = ps3_alloc_coherent,
3941 .free_coherent = ps3_free_coherent,
3942 .map_sg = ps3_sb_map_sg,
3943 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
3944 .unmap_page = ps3_unmap_page,
3945 };
3946
3947 -static struct dma_map_ops ps3_ioc0_dma_ops = {
3948 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
3949 .alloc_coherent = ps3_alloc_coherent,
3950 .free_coherent = ps3_free_coherent,
3951 .map_sg = ps3_ioc0_map_sg,
3952 diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
3953 index f0e6f28..60d53ed 100644
3954 --- a/arch/powerpc/platforms/pseries/Kconfig
3955 +++ b/arch/powerpc/platforms/pseries/Kconfig
3956 @@ -2,6 +2,8 @@ config PPC_PSERIES
3957 depends on PPC64 && PPC_BOOK3S
3958 bool "IBM pSeries & new (POWER5-based) iSeries"
3959 select MPIC
3960 + select PCI_MSI
3961 + select XICS
3962 select PPC_I8259
3963 select PPC_RTAS
3964 select RTAS_ERROR_LOGGING
3965 diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
3966 index 43c0aca..42c045b 100644
3967 --- a/arch/s390/Kconfig
3968 +++ b/arch/s390/Kconfig
3969 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3970
3971 config S390_SWITCH_AMODE
3972 bool "Switch kernel/user addressing modes"
3973 + default y
3974 help
3975 This option allows to switch the addressing modes of kernel and user
3976 - space. The kernel parameter switch_amode=on will enable this feature,
3977 - default is disabled. Enabling this (via kernel parameter) on machines
3978 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3979 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3980 + will reduce system performance.
3981
3982 Note that this option will also be selected by selecting the execute
3983 - protection option below. Enabling the execute protection via the
3984 - noexec kernel parameter will also switch the addressing modes,
3985 - independent of the switch_amode kernel parameter.
3986 + protection option below. Enabling the execute protection will also
3987 + switch the addressing modes, independent of this option.
3988
3989
3990 config S390_EXEC_PROTECT
3991 bool "Data execute protection"
3992 + default y
3993 select S390_SWITCH_AMODE
3994 help
3995 This option allows to enable a buffer overflow protection for user
3996 space programs and it also selects the addressing mode option above.
3997 - The kernel parameter noexec=on will enable this feature and also
3998 - switch the addressing modes, default is disabled. Enabling this (via
3999 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
4000 - will reduce system performance.
4001 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
4002 + reduce system performance.
4003
4004 comment "Code generation options"
4005
4006 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4007 index ae7c8f9..3f01a0c 100644
4008 --- a/arch/s390/include/asm/atomic.h
4009 +++ b/arch/s390/include/asm/atomic.h
4010 @@ -362,6 +362,16 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
4011 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4012 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4013
4014 +#define atomic64_read_unchecked(v) atomic64_read(v)
4015 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4016 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4017 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4018 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4019 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4020 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4021 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4022 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4023 +
4024 #define smp_mb__before_atomic_dec() smp_mb()
4025 #define smp_mb__after_atomic_dec() smp_mb()
4026 #define smp_mb__before_atomic_inc() smp_mb()
4027 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4028 index e885442..e3a2817 100644
4029 --- a/arch/s390/include/asm/elf.h
4030 +++ b/arch/s390/include/asm/elf.h
4031 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
4032 that it will "exec", and that there is sufficient room for the brk. */
4033 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4034
4035 +#ifdef CONFIG_PAX_ASLR
4036 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4037 +
4038 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4039 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4040 +#endif
4041 +
4042 /* This yields a mask that user programs can use to figure out what
4043 instruction set this CPU supports. */
4044
4045 diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
4046 index e37478e..9ce0e9f 100644
4047 --- a/arch/s390/include/asm/setup.h
4048 +++ b/arch/s390/include/asm/setup.h
4049 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
4050 void detect_memory_layout(struct mem_chunk chunk[]);
4051
4052 #ifdef CONFIG_S390_SWITCH_AMODE
4053 -extern unsigned int switch_amode;
4054 +#define switch_amode (1)
4055 #else
4056 #define switch_amode (0)
4057 #endif
4058
4059 #ifdef CONFIG_S390_EXEC_PROTECT
4060 -extern unsigned int s390_noexec;
4061 +#define s390_noexec (1)
4062 #else
4063 #define s390_noexec (0)
4064 #endif
4065 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4066 index 8377e91..e28e6f1 100644
4067 --- a/arch/s390/include/asm/uaccess.h
4068 +++ b/arch/s390/include/asm/uaccess.h
4069 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
4070 copy_to_user(void __user *to, const void *from, unsigned long n)
4071 {
4072 might_fault();
4073 +
4074 + if ((long)n < 0)
4075 + return n;
4076 +
4077 if (access_ok(VERIFY_WRITE, to, n))
4078 n = __copy_to_user(to, from, n);
4079 return n;
4080 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4081 static inline unsigned long __must_check
4082 __copy_from_user(void *to, const void __user *from, unsigned long n)
4083 {
4084 + if ((long)n < 0)
4085 + return n;
4086 +
4087 if (__builtin_constant_p(n) && (n <= 256))
4088 return uaccess.copy_from_user_small(n, from, to);
4089 else
4090 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
4091 copy_from_user(void *to, const void __user *from, unsigned long n)
4092 {
4093 might_fault();
4094 +
4095 + if ((long)n < 0)
4096 + return n;
4097 +
4098 if (access_ok(VERIFY_READ, from, n))
4099 n = __copy_from_user(to, from, n);
4100 else
4101 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4102 index 639380a..72e3c02 100644
4103 --- a/arch/s390/kernel/module.c
4104 +++ b/arch/s390/kernel/module.c
4105 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4106
4107 /* Increase core size by size of got & plt and set start
4108 offsets for got and plt. */
4109 - me->core_size = ALIGN(me->core_size, 4);
4110 - me->arch.got_offset = me->core_size;
4111 - me->core_size += me->arch.got_size;
4112 - me->arch.plt_offset = me->core_size;
4113 - me->core_size += me->arch.plt_size;
4114 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4115 + me->arch.got_offset = me->core_size_rw;
4116 + me->core_size_rw += me->arch.got_size;
4117 + me->arch.plt_offset = me->core_size_rx;
4118 + me->core_size_rx += me->arch.plt_size;
4119 return 0;
4120 }
4121
4122 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4123 if (info->got_initialized == 0) {
4124 Elf_Addr *gotent;
4125
4126 - gotent = me->module_core + me->arch.got_offset +
4127 + gotent = me->module_core_rw + me->arch.got_offset +
4128 info->got_offset;
4129 *gotent = val;
4130 info->got_initialized = 1;
4131 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4132 else if (r_type == R_390_GOTENT ||
4133 r_type == R_390_GOTPLTENT)
4134 *(unsigned int *) loc =
4135 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4136 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4137 else if (r_type == R_390_GOT64 ||
4138 r_type == R_390_GOTPLT64)
4139 *(unsigned long *) loc = val;
4140 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4141 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4142 if (info->plt_initialized == 0) {
4143 unsigned int *ip;
4144 - ip = me->module_core + me->arch.plt_offset +
4145 + ip = me->module_core_rx + me->arch.plt_offset +
4146 info->plt_offset;
4147 #ifndef CONFIG_64BIT
4148 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4149 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4150 val - loc + 0xffffUL < 0x1ffffeUL) ||
4151 (r_type == R_390_PLT32DBL &&
4152 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4153 - val = (Elf_Addr) me->module_core +
4154 + val = (Elf_Addr) me->module_core_rx +
4155 me->arch.plt_offset +
4156 info->plt_offset;
4157 val += rela->r_addend - loc;
4158 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4159 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4160 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4161 val = val + rela->r_addend -
4162 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4163 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4164 if (r_type == R_390_GOTOFF16)
4165 *(unsigned short *) loc = val;
4166 else if (r_type == R_390_GOTOFF32)
4167 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4168 break;
4169 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4170 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4171 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4172 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4173 rela->r_addend - loc;
4174 if (r_type == R_390_GOTPC)
4175 *(unsigned int *) loc = val;
4176 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
4177 index 0b2573a..71a22ec 100644
4178 --- a/arch/s390/kernel/setup.c
4179 +++ b/arch/s390/kernel/setup.c
4180 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *p)
4181 early_param("mem", early_parse_mem);
4182
4183 #ifdef CONFIG_S390_SWITCH_AMODE
4184 -unsigned int switch_amode = 0;
4185 -EXPORT_SYMBOL_GPL(switch_amode);
4186 -
4187 static int set_amode_and_uaccess(unsigned long user_amode,
4188 unsigned long user32_amode)
4189 {
4190 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4191 return 0;
4192 }
4193 }
4194 -
4195 -/*
4196 - * Switch kernel/user addressing modes?
4197 - */
4198 -static int __init early_parse_switch_amode(char *p)
4199 -{
4200 - switch_amode = 1;
4201 - return 0;
4202 -}
4203 -early_param("switch_amode", early_parse_switch_amode);
4204 -
4205 #else /* CONFIG_S390_SWITCH_AMODE */
4206 static inline int set_amode_and_uaccess(unsigned long user_amode,
4207 unsigned long user32_amode)
4208 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
4209 }
4210 #endif /* CONFIG_S390_SWITCH_AMODE */
4211
4212 -#ifdef CONFIG_S390_EXEC_PROTECT
4213 -unsigned int s390_noexec = 0;
4214 -EXPORT_SYMBOL_GPL(s390_noexec);
4215 -
4216 -/*
4217 - * Enable execute protection?
4218 - */
4219 -static int __init early_parse_noexec(char *p)
4220 -{
4221 - if (!strncmp(p, "off", 3))
4222 - return 0;
4223 - switch_amode = 1;
4224 - s390_noexec = 1;
4225 - return 0;
4226 -}
4227 -early_param("noexec", early_parse_noexec);
4228 -#endif /* CONFIG_S390_EXEC_PROTECT */
4229 -
4230 static void setup_addressing_mode(void)
4231 {
4232 if (s390_noexec) {
4233 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4234 index 0ab74ae..c8b68f9 100644
4235 --- a/arch/s390/mm/mmap.c
4236 +++ b/arch/s390/mm/mmap.c
4237 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4238 */
4239 if (mmap_is_legacy()) {
4240 mm->mmap_base = TASK_UNMAPPED_BASE;
4241 +
4242 +#ifdef CONFIG_PAX_RANDMMAP
4243 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4244 + mm->mmap_base += mm->delta_mmap;
4245 +#endif
4246 +
4247 mm->get_unmapped_area = arch_get_unmapped_area;
4248 mm->unmap_area = arch_unmap_area;
4249 } else {
4250 mm->mmap_base = mmap_base();
4251 +
4252 +#ifdef CONFIG_PAX_RANDMMAP
4253 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4254 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4255 +#endif
4256 +
4257 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4258 mm->unmap_area = arch_unmap_area_topdown;
4259 }
4260 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4261 */
4262 if (mmap_is_legacy()) {
4263 mm->mmap_base = TASK_UNMAPPED_BASE;
4264 +
4265 +#ifdef CONFIG_PAX_RANDMMAP
4266 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4267 + mm->mmap_base += mm->delta_mmap;
4268 +#endif
4269 +
4270 mm->get_unmapped_area = s390_get_unmapped_area;
4271 mm->unmap_area = arch_unmap_area;
4272 } else {
4273 mm->mmap_base = mmap_base();
4274 +
4275 +#ifdef CONFIG_PAX_RANDMMAP
4276 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4277 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4278 +#endif
4279 +
4280 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4281 mm->unmap_area = arch_unmap_area_topdown;
4282 }
4283 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4284 index 589d5c7..669e274 100644
4285 --- a/arch/score/include/asm/system.h
4286 +++ b/arch/score/include/asm/system.h
4287 @@ -17,7 +17,7 @@ do { \
4288 #define finish_arch_switch(prev) do {} while (0)
4289
4290 typedef void (*vi_handler_t)(void);
4291 -extern unsigned long arch_align_stack(unsigned long sp);
4292 +#define arch_align_stack(x) (x)
4293
4294 #define mb() barrier()
4295 #define rmb() barrier()
4296 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4297 index 25d0803..d6c8e36 100644
4298 --- a/arch/score/kernel/process.c
4299 +++ b/arch/score/kernel/process.c
4300 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4301
4302 return task_pt_regs(task)->cp0_epc;
4303 }
4304 -
4305 -unsigned long arch_align_stack(unsigned long sp)
4306 -{
4307 - return sp;
4308 -}
4309 diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
4310 index d936c1a..304a252 100644
4311 --- a/arch/sh/boards/mach-hp6xx/pm.c
4312 +++ b/arch/sh/boards/mach-hp6xx/pm.c
4313 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
4314 return 0;
4315 }
4316
4317 -static struct platform_suspend_ops hp6x0_pm_ops = {
4318 +static const struct platform_suspend_ops hp6x0_pm_ops = {
4319 .enter = hp6x0_pm_enter,
4320 .valid = suspend_valid_only_mem,
4321 };
4322 diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
4323 index 8a8a993..7b3079b 100644
4324 --- a/arch/sh/kernel/cpu/sh4/sq.c
4325 +++ b/arch/sh/kernel/cpu/sh4/sq.c
4326 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
4327 NULL,
4328 };
4329
4330 -static struct sysfs_ops sq_sysfs_ops = {
4331 +static const struct sysfs_ops sq_sysfs_ops = {
4332 .show = sq_sysfs_show,
4333 .store = sq_sysfs_store,
4334 };
4335 diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
4336 index ee3c2aa..c49cee6 100644
4337 --- a/arch/sh/kernel/cpu/shmobile/pm.c
4338 +++ b/arch/sh/kernel/cpu/shmobile/pm.c
4339 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
4340 return 0;
4341 }
4342
4343 -static struct platform_suspend_ops sh_pm_ops = {
4344 +static const struct platform_suspend_ops sh_pm_ops = {
4345 .enter = sh_pm_enter,
4346 .valid = suspend_valid_only_mem,
4347 };
4348 diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
4349 index 3e532d0..9faa306 100644
4350 --- a/arch/sh/kernel/kgdb.c
4351 +++ b/arch/sh/kernel/kgdb.c
4352 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
4353 {
4354 }
4355
4356 -struct kgdb_arch arch_kgdb_ops = {
4357 +const struct kgdb_arch arch_kgdb_ops = {
4358 /* Breakpoint instruction: trapa #0x3c */
4359 #ifdef CONFIG_CPU_LITTLE_ENDIAN
4360 .gdb_bpt_instr = { 0x3c, 0xc3 },
4361 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4362 index afeb710..d1d1289 100644
4363 --- a/arch/sh/mm/mmap.c
4364 +++ b/arch/sh/mm/mmap.c
4365 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4366 addr = PAGE_ALIGN(addr);
4367
4368 vma = find_vma(mm, addr);
4369 - if (TASK_SIZE - len >= addr &&
4370 - (!vma || addr + len <= vma->vm_start))
4371 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4372 return addr;
4373 }
4374
4375 @@ -106,7 +105,7 @@ full_search:
4376 }
4377 return -ENOMEM;
4378 }
4379 - if (likely(!vma || addr + len <= vma->vm_start)) {
4380 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4381 /*
4382 * Remember the place where we stopped the search:
4383 */
4384 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4385 addr = PAGE_ALIGN(addr);
4386
4387 vma = find_vma(mm, addr);
4388 - if (TASK_SIZE - len >= addr &&
4389 - (!vma || addr + len <= vma->vm_start))
4390 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4391 return addr;
4392 }
4393
4394 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4395 /* make sure it can fit in the remaining address space */
4396 if (likely(addr > len)) {
4397 vma = find_vma(mm, addr-len);
4398 - if (!vma || addr <= vma->vm_start) {
4399 + if (check_heap_stack_gap(vma, addr - len, len)) {
4400 /* remember the address as a hint for next time */
4401 return (mm->free_area_cache = addr-len);
4402 }
4403 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4404 if (unlikely(mm->mmap_base < len))
4405 goto bottomup;
4406
4407 - addr = mm->mmap_base-len;
4408 - if (do_colour_align)
4409 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4410 + addr = mm->mmap_base - len;
4411
4412 do {
4413 + if (do_colour_align)
4414 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4415 /*
4416 * Lookup failure means no vma is above this address,
4417 * else if new region fits below vma->vm_start,
4418 * return with success:
4419 */
4420 vma = find_vma(mm, addr);
4421 - if (likely(!vma || addr+len <= vma->vm_start)) {
4422 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4423 /* remember the address as a hint for next time */
4424 return (mm->free_area_cache = addr);
4425 }
4426 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4427 mm->cached_hole_size = vma->vm_start - addr;
4428
4429 /* try just below the current vma->vm_start */
4430 - addr = vma->vm_start-len;
4431 - if (do_colour_align)
4432 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4433 - } while (likely(len < vma->vm_start));
4434 + addr = skip_heap_stack_gap(vma, len);
4435 + } while (!IS_ERR_VALUE(addr));
4436
4437 bottomup:
4438 /*
4439 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
4440 index 05ef538..dc9c857 100644
4441 --- a/arch/sparc/Kconfig
4442 +++ b/arch/sparc/Kconfig
4443 @@ -32,6 +32,7 @@ config SPARC
4444
4445 config SPARC32
4446 def_bool !64BIT
4447 + select GENERIC_ATOMIC64
4448
4449 config SPARC64
4450 def_bool 64BIT
4451 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4452 index 113225b..7fd04e7 100644
4453 --- a/arch/sparc/Makefile
4454 +++ b/arch/sparc/Makefile
4455 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4456 # Export what is needed by arch/sparc/boot/Makefile
4457 export VMLINUX_INIT VMLINUX_MAIN
4458 VMLINUX_INIT := $(head-y) $(init-y)
4459 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4460 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4461 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4462 VMLINUX_MAIN += $(drivers-y) $(net-y)
4463
4464 diff --git a/arch/sparc/include/asm/atomic.h b/arch/sparc/include/asm/atomic.h
4465 index 8ff83d8..4a459c2 100644
4466 --- a/arch/sparc/include/asm/atomic.h
4467 +++ b/arch/sparc/include/asm/atomic.h
4468 @@ -4,5 +4,6 @@
4469 #include <asm/atomic_64.h>
4470 #else
4471 #include <asm/atomic_32.h>
4472 +#include <asm-generic/atomic64.h>
4473 #endif
4474 #endif
4475 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4476 index f5cc06f..f858d47 100644
4477 --- a/arch/sparc/include/asm/atomic_64.h
4478 +++ b/arch/sparc/include/asm/atomic_64.h
4479 @@ -14,18 +14,40 @@
4480 #define ATOMIC64_INIT(i) { (i) }
4481
4482 #define atomic_read(v) ((v)->counter)
4483 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4484 +{
4485 + return v->counter;
4486 +}
4487 #define atomic64_read(v) ((v)->counter)
4488 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4489 +{
4490 + return v->counter;
4491 +}
4492
4493 #define atomic_set(v, i) (((v)->counter) = i)
4494 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4495 +{
4496 + v->counter = i;
4497 +}
4498 #define atomic64_set(v, i) (((v)->counter) = i)
4499 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4500 +{
4501 + v->counter = i;
4502 +}
4503
4504 extern void atomic_add(int, atomic_t *);
4505 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4506 extern void atomic64_add(long, atomic64_t *);
4507 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4508 extern void atomic_sub(int, atomic_t *);
4509 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4510 extern void atomic64_sub(long, atomic64_t *);
4511 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4512
4513 extern int atomic_add_ret(int, atomic_t *);
4514 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4515 extern long atomic64_add_ret(long, atomic64_t *);
4516 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4517 extern int atomic_sub_ret(int, atomic_t *);
4518 extern long atomic64_sub_ret(long, atomic64_t *);
4519
4520 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4521 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4522
4523 #define atomic_inc_return(v) atomic_add_ret(1, v)
4524 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4525 +{
4526 + return atomic_add_ret_unchecked(1, v);
4527 +}
4528 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4529 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4530 +{
4531 + return atomic64_add_ret_unchecked(1, v);
4532 +}
4533
4534 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4535 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4536
4537 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4538 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4539 +{
4540 + return atomic_add_ret_unchecked(i, v);
4541 +}
4542 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4543 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4544 +{
4545 + return atomic64_add_ret_unchecked(i, v);
4546 +}
4547
4548 /*
4549 * atomic_inc_and_test - increment and test
4550 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4551 * other cases.
4552 */
4553 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4554 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4555 +{
4556 + return atomic_inc_return_unchecked(v) == 0;
4557 +}
4558 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4559
4560 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4561 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4562 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4563
4564 #define atomic_inc(v) atomic_add(1, v)
4565 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4566 +{
4567 + atomic_add_unchecked(1, v);
4568 +}
4569 #define atomic64_inc(v) atomic64_add(1, v)
4570 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4571 +{
4572 + atomic64_add_unchecked(1, v);
4573 +}
4574
4575 #define atomic_dec(v) atomic_sub(1, v)
4576 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4577 +{
4578 + atomic_sub_unchecked(1, v);
4579 +}
4580 #define atomic64_dec(v) atomic64_sub(1, v)
4581 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4582 +{
4583 + atomic64_sub_unchecked(1, v);
4584 +}
4585
4586 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4587 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4588
4589 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4590 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4591 +{
4592 + return cmpxchg(&v->counter, old, new);
4593 +}
4594 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4595 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4596 +{
4597 + return xchg(&v->counter, new);
4598 +}
4599
4600 static inline int atomic_add_unless(atomic_t *v, int a, int u)
4601 {
4602 - int c, old;
4603 + int c, old, new;
4604 c = atomic_read(v);
4605 for (;;) {
4606 - if (unlikely(c == (u)))
4607 + if (unlikely(c == u))
4608 break;
4609 - old = atomic_cmpxchg((v), c, c + (a));
4610 +
4611 + asm volatile("addcc %2, %0, %0\n"
4612 +
4613 +#ifdef CONFIG_PAX_REFCOUNT
4614 + "tvs %%icc, 6\n"
4615 +#endif
4616 +
4617 + : "=r" (new)
4618 + : "0" (c), "ir" (a)
4619 + : "cc");
4620 +
4621 + old = atomic_cmpxchg(v, c, new);
4622 if (likely(old == c))
4623 break;
4624 c = old;
4625 }
4626 - return c != (u);
4627 + return c != u;
4628 }
4629
4630 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
4631 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
4632 #define atomic64_cmpxchg(v, o, n) \
4633 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4634 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4635 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4636 +{
4637 + return xchg(&v->counter, new);
4638 +}
4639
4640 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4641 {
4642 - long c, old;
4643 + long c, old, new;
4644 c = atomic64_read(v);
4645 for (;;) {
4646 - if (unlikely(c == (u)))
4647 + if (unlikely(c == u))
4648 break;
4649 - old = atomic64_cmpxchg((v), c, c + (a));
4650 +
4651 + asm volatile("addcc %2, %0, %0\n"
4652 +
4653 +#ifdef CONFIG_PAX_REFCOUNT
4654 + "tvs %%xcc, 6\n"
4655 +#endif
4656 +
4657 + : "=r" (new)
4658 + : "0" (c), "ir" (a)
4659 + : "cc");
4660 +
4661 + old = atomic64_cmpxchg(v, c, new);
4662 if (likely(old == c))
4663 break;
4664 c = old;
4665 }
4666 - return c != (u);
4667 + return c != u;
4668 }
4669
4670 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4671 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4672 index 41f85ae..fb54d5e 100644
4673 --- a/arch/sparc/include/asm/cache.h
4674 +++ b/arch/sparc/include/asm/cache.h
4675 @@ -8,7 +8,7 @@
4676 #define _SPARC_CACHE_H
4677
4678 #define L1_CACHE_SHIFT 5
4679 -#define L1_CACHE_BYTES 32
4680 +#define L1_CACHE_BYTES 32UL
4681 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
4682
4683 #ifdef CONFIG_SPARC32
4684 diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
4685 index 5a8c308..38def92 100644
4686 --- a/arch/sparc/include/asm/dma-mapping.h
4687 +++ b/arch/sparc/include/asm/dma-mapping.h
4688 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
4689 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
4690 #define dma_is_consistent(d, h) (1)
4691
4692 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
4693 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
4694 extern struct bus_type pci_bus_type;
4695
4696 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4697 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
4698 {
4699 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
4700 if (dev->bus == &pci_bus_type)
4701 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4702 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4703 dma_addr_t *dma_handle, gfp_t flag)
4704 {
4705 - struct dma_map_ops *ops = get_dma_ops(dev);
4706 + const struct dma_map_ops *ops = get_dma_ops(dev);
4707 void *cpu_addr;
4708
4709 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
4710 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4711 static inline void dma_free_coherent(struct device *dev, size_t size,
4712 void *cpu_addr, dma_addr_t dma_handle)
4713 {
4714 - struct dma_map_ops *ops = get_dma_ops(dev);
4715 + const struct dma_map_ops *ops = get_dma_ops(dev);
4716
4717 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
4718 ops->free_coherent(dev, size, cpu_addr, dma_handle);
4719 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4720 index 381a1b5..b97e3ff 100644
4721 --- a/arch/sparc/include/asm/elf_32.h
4722 +++ b/arch/sparc/include/asm/elf_32.h
4723 @@ -116,6 +116,13 @@ typedef struct {
4724
4725 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4726
4727 +#ifdef CONFIG_PAX_ASLR
4728 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
4729 +
4730 +#define PAX_DELTA_MMAP_LEN 16
4731 +#define PAX_DELTA_STACK_LEN 16
4732 +#endif
4733 +
4734 /* This yields a mask that user programs can use to figure out what
4735 instruction set this cpu supports. This can NOT be done in userspace
4736 on Sparc. */
4737 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4738 index 9968085..c2106ef 100644
4739 --- a/arch/sparc/include/asm/elf_64.h
4740 +++ b/arch/sparc/include/asm/elf_64.h
4741 @@ -163,6 +163,12 @@ typedef struct {
4742 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4743 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4744
4745 +#ifdef CONFIG_PAX_ASLR
4746 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4747 +
4748 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4749 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4750 +#endif
4751
4752 /* This yields a mask that user programs can use to figure out what
4753 instruction set this cpu supports. */
4754 diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
4755 index 156707b..aefa786 100644
4756 --- a/arch/sparc/include/asm/page_32.h
4757 +++ b/arch/sparc/include/asm/page_32.h
4758 @@ -8,6 +8,8 @@
4759 #ifndef _SPARC_PAGE_H
4760 #define _SPARC_PAGE_H
4761
4762 +#include <linux/const.h>
4763 +
4764 #define PAGE_SHIFT 12
4765
4766 #ifndef __ASSEMBLY__
4767 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4768 index e0cabe7..efd60f1 100644
4769 --- a/arch/sparc/include/asm/pgtable_32.h
4770 +++ b/arch/sparc/include/asm/pgtable_32.h
4771 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4772 BTFIXUPDEF_INT(page_none)
4773 BTFIXUPDEF_INT(page_copy)
4774 BTFIXUPDEF_INT(page_readonly)
4775 +
4776 +#ifdef CONFIG_PAX_PAGEEXEC
4777 +BTFIXUPDEF_INT(page_shared_noexec)
4778 +BTFIXUPDEF_INT(page_copy_noexec)
4779 +BTFIXUPDEF_INT(page_readonly_noexec)
4780 +#endif
4781 +
4782 BTFIXUPDEF_INT(page_kernel)
4783
4784 #define PMD_SHIFT SUN4C_PMD_SHIFT
4785 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4786 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4787 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4788
4789 +#ifdef CONFIG_PAX_PAGEEXEC
4790 +extern pgprot_t PAGE_SHARED_NOEXEC;
4791 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4792 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4793 +#else
4794 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
4795 +# define PAGE_COPY_NOEXEC PAGE_COPY
4796 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
4797 +#endif
4798 +
4799 extern unsigned long page_kernel;
4800
4801 #ifdef MODULE
4802 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4803 index 1407c07..7e10231 100644
4804 --- a/arch/sparc/include/asm/pgtsrmmu.h
4805 +++ b/arch/sparc/include/asm/pgtsrmmu.h
4806 @@ -115,6 +115,13 @@
4807 SRMMU_EXEC | SRMMU_REF)
4808 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4809 SRMMU_EXEC | SRMMU_REF)
4810 +
4811 +#ifdef CONFIG_PAX_PAGEEXEC
4812 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4813 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4814 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4815 +#endif
4816 +
4817 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4818 SRMMU_DIRTY | SRMMU_REF)
4819
4820 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4821 index 43e5147..47622a1 100644
4822 --- a/arch/sparc/include/asm/spinlock_64.h
4823 +++ b/arch/sparc/include/asm/spinlock_64.h
4824 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
4825
4826 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4827
4828 -static void inline arch_read_lock(raw_rwlock_t *lock)
4829 +static inline void arch_read_lock(raw_rwlock_t *lock)
4830 {
4831 unsigned long tmp1, tmp2;
4832
4833 __asm__ __volatile__ (
4834 "1: ldsw [%2], %0\n"
4835 " brlz,pn %0, 2f\n"
4836 -"4: add %0, 1, %1\n"
4837 +"4: addcc %0, 1, %1\n"
4838 +
4839 +#ifdef CONFIG_PAX_REFCOUNT
4840 +" tvs %%icc, 6\n"
4841 +#endif
4842 +
4843 " cas [%2], %0, %1\n"
4844 " cmp %0, %1\n"
4845 " bne,pn %%icc, 1b\n"
4846 @@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
4847 " .previous"
4848 : "=&r" (tmp1), "=&r" (tmp2)
4849 : "r" (lock)
4850 - : "memory");
4851 + : "memory", "cc");
4852 }
4853
4854 -static int inline arch_read_trylock(raw_rwlock_t *lock)
4855 +static inline int arch_read_trylock(raw_rwlock_t *lock)
4856 {
4857 int tmp1, tmp2;
4858
4859 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4860 "1: ldsw [%2], %0\n"
4861 " brlz,a,pn %0, 2f\n"
4862 " mov 0, %0\n"
4863 -" add %0, 1, %1\n"
4864 +" addcc %0, 1, %1\n"
4865 +
4866 +#ifdef CONFIG_PAX_REFCOUNT
4867 +" tvs %%icc, 6\n"
4868 +#endif
4869 +
4870 " cas [%2], %0, %1\n"
4871 " cmp %0, %1\n"
4872 " bne,pn %%icc, 1b\n"
4873 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4874 return tmp1;
4875 }
4876
4877 -static void inline arch_read_unlock(raw_rwlock_t *lock)
4878 +static inline void arch_read_unlock(raw_rwlock_t *lock)
4879 {
4880 unsigned long tmp1, tmp2;
4881
4882 __asm__ __volatile__(
4883 "1: lduw [%2], %0\n"
4884 -" sub %0, 1, %1\n"
4885 +" subcc %0, 1, %1\n"
4886 +
4887 +#ifdef CONFIG_PAX_REFCOUNT
4888 +" tvs %%icc, 6\n"
4889 +#endif
4890 +
4891 " cas [%2], %0, %1\n"
4892 " cmp %0, %1\n"
4893 " bne,pn %%xcc, 1b\n"
4894 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
4895 : "memory");
4896 }
4897
4898 -static void inline arch_write_lock(raw_rwlock_t *lock)
4899 +static inline void arch_write_lock(raw_rwlock_t *lock)
4900 {
4901 unsigned long mask, tmp1, tmp2;
4902
4903 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
4904 : "memory");
4905 }
4906
4907 -static void inline arch_write_unlock(raw_rwlock_t *lock)
4908 +static inline void arch_write_unlock(raw_rwlock_t *lock)
4909 {
4910 __asm__ __volatile__(
4911 " stw %%g0, [%0]"
4912 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
4913 : "memory");
4914 }
4915
4916 -static int inline arch_write_trylock(raw_rwlock_t *lock)
4917 +static inline int arch_write_trylock(raw_rwlock_t *lock)
4918 {
4919 unsigned long mask, tmp1, tmp2, result;
4920
4921 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
4922 index 844d73a..f787fb9 100644
4923 --- a/arch/sparc/include/asm/thread_info_32.h
4924 +++ b/arch/sparc/include/asm/thread_info_32.h
4925 @@ -50,6 +50,8 @@ struct thread_info {
4926 unsigned long w_saved;
4927
4928 struct restart_block restart_block;
4929 +
4930 + unsigned long lowest_stack;
4931 };
4932
4933 /*
4934 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
4935 index f78ad9a..9f55fc7 100644
4936 --- a/arch/sparc/include/asm/thread_info_64.h
4937 +++ b/arch/sparc/include/asm/thread_info_64.h
4938 @@ -68,6 +68,8 @@ struct thread_info {
4939 struct pt_regs *kern_una_regs;
4940 unsigned int kern_una_insn;
4941
4942 + unsigned long lowest_stack;
4943 +
4944 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4945 };
4946
4947 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
4948 index e88fbe5..96b0ce5 100644
4949 --- a/arch/sparc/include/asm/uaccess.h
4950 +++ b/arch/sparc/include/asm/uaccess.h
4951 @@ -1,5 +1,13 @@
4952 #ifndef ___ASM_SPARC_UACCESS_H
4953 #define ___ASM_SPARC_UACCESS_H
4954 +
4955 +#ifdef __KERNEL__
4956 +#ifndef __ASSEMBLY__
4957 +#include <linux/types.h>
4958 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4959 +#endif
4960 +#endif
4961 +
4962 #if defined(__sparc__) && defined(__arch64__)
4963 #include <asm/uaccess_64.h>
4964 #else
4965 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
4966 index 8303ac4..07f333d 100644
4967 --- a/arch/sparc/include/asm/uaccess_32.h
4968 +++ b/arch/sparc/include/asm/uaccess_32.h
4969 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
4970
4971 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4972 {
4973 - if (n && __access_ok((unsigned long) to, n))
4974 + if ((long)n < 0)
4975 + return n;
4976 +
4977 + if (n && __access_ok((unsigned long) to, n)) {
4978 + if (!__builtin_constant_p(n))
4979 + check_object_size(from, n, true);
4980 return __copy_user(to, (__force void __user *) from, n);
4981 - else
4982 + } else
4983 return n;
4984 }
4985
4986 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4987 {
4988 + if ((long)n < 0)
4989 + return n;
4990 +
4991 + if (!__builtin_constant_p(n))
4992 + check_object_size(from, n, true);
4993 +
4994 return __copy_user(to, (__force void __user *) from, n);
4995 }
4996
4997 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4998 {
4999 - if (n && __access_ok((unsigned long) from, n))
5000 + if ((long)n < 0)
5001 + return n;
5002 +
5003 + if (n && __access_ok((unsigned long) from, n)) {
5004 + if (!__builtin_constant_p(n))
5005 + check_object_size(to, n, false);
5006 return __copy_user((__force void __user *) to, from, n);
5007 - else
5008 + } else
5009 return n;
5010 }
5011
5012 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5013 {
5014 + if ((long)n < 0)
5015 + return n;
5016 +
5017 return __copy_user((__force void __user *) to, from, n);
5018 }
5019
5020 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5021 index 9ea271e..7b8a271 100644
5022 --- a/arch/sparc/include/asm/uaccess_64.h
5023 +++ b/arch/sparc/include/asm/uaccess_64.h
5024 @@ -9,6 +9,7 @@
5025 #include <linux/compiler.h>
5026 #include <linux/string.h>
5027 #include <linux/thread_info.h>
5028 +#include <linux/kernel.h>
5029 #include <asm/asi.h>
5030 #include <asm/system.h>
5031 #include <asm/spitfire.h>
5032 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5033 static inline unsigned long __must_check
5034 copy_from_user(void *to, const void __user *from, unsigned long size)
5035 {
5036 - unsigned long ret = ___copy_from_user(to, from, size);
5037 + unsigned long ret;
5038
5039 + if ((long)size < 0 || size > INT_MAX)
5040 + return size;
5041 +
5042 + if (!__builtin_constant_p(size))
5043 + check_object_size(to, size, false);
5044 +
5045 + ret = ___copy_from_user(to, from, size);
5046 if (unlikely(ret))
5047 ret = copy_from_user_fixup(to, from, size);
5048 return ret;
5049 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5050 static inline unsigned long __must_check
5051 copy_to_user(void __user *to, const void *from, unsigned long size)
5052 {
5053 - unsigned long ret = ___copy_to_user(to, from, size);
5054 + unsigned long ret;
5055
5056 + if ((long)size < 0 || size > INT_MAX)
5057 + return size;
5058 +
5059 + if (!__builtin_constant_p(size))
5060 + check_object_size(from, size, true);
5061 +
5062 + ret = ___copy_to_user(to, from, size);
5063 if (unlikely(ret))
5064 ret = copy_to_user_fixup(to, from, size);
5065 return ret;
5066 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5067 index 2782681..77ded84 100644
5068 --- a/arch/sparc/kernel/Makefile
5069 +++ b/arch/sparc/kernel/Makefile
5070 @@ -3,7 +3,7 @@
5071 #
5072
5073 asflags-y := -ansi
5074 -ccflags-y := -Werror
5075 +#ccflags-y := -Werror
5076
5077 extra-y := head_$(BITS).o
5078 extra-y += init_task.o
5079 diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
5080 index 7690cc2..ece64c9 100644
5081 --- a/arch/sparc/kernel/iommu.c
5082 +++ b/arch/sparc/kernel/iommu.c
5083 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
5084 spin_unlock_irqrestore(&iommu->lock, flags);
5085 }
5086
5087 -static struct dma_map_ops sun4u_dma_ops = {
5088 +static const struct dma_map_ops sun4u_dma_ops = {
5089 .alloc_coherent = dma_4u_alloc_coherent,
5090 .free_coherent = dma_4u_free_coherent,
5091 .map_page = dma_4u_map_page,
5092 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
5093 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
5094 };
5095
5096 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5097 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5098 EXPORT_SYMBOL(dma_ops);
5099
5100 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
5101 diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
5102 index 9f61fd8..bd048db 100644
5103 --- a/arch/sparc/kernel/ioport.c
5104 +++ b/arch/sparc/kernel/ioport.c
5105 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
5106 BUG();
5107 }
5108
5109 -struct dma_map_ops sbus_dma_ops = {
5110 +const struct dma_map_ops sbus_dma_ops = {
5111 .alloc_coherent = sbus_alloc_coherent,
5112 .free_coherent = sbus_free_coherent,
5113 .map_page = sbus_map_page,
5114 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
5115 .sync_sg_for_device = sbus_sync_sg_for_device,
5116 };
5117
5118 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
5119 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
5120 EXPORT_SYMBOL(dma_ops);
5121
5122 static int __init sparc_register_ioport(void)
5123 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
5124 }
5125 }
5126
5127 -struct dma_map_ops pci32_dma_ops = {
5128 +const struct dma_map_ops pci32_dma_ops = {
5129 .alloc_coherent = pci32_alloc_coherent,
5130 .free_coherent = pci32_free_coherent,
5131 .map_page = pci32_map_page,
5132 diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
5133 index 04df4ed..55c4b6e 100644
5134 --- a/arch/sparc/kernel/kgdb_32.c
5135 +++ b/arch/sparc/kernel/kgdb_32.c
5136 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
5137 {
5138 }
5139
5140 -struct kgdb_arch arch_kgdb_ops = {
5141 +const struct kgdb_arch arch_kgdb_ops = {
5142 /* Breakpoint instruction: ta 0x7d */
5143 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
5144 };
5145 diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
5146 index f5a0fd4..d886f71 100644
5147 --- a/arch/sparc/kernel/kgdb_64.c
5148 +++ b/arch/sparc/kernel/kgdb_64.c
5149 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
5150 {
5151 }
5152
5153 -struct kgdb_arch arch_kgdb_ops = {
5154 +const struct kgdb_arch arch_kgdb_ops = {
5155 /* Breakpoint instruction: ta 0x72 */
5156 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
5157 };
5158 diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
5159 index 23c33ff..d137fbd 100644
5160 --- a/arch/sparc/kernel/pci_sun4v.c
5161 +++ b/arch/sparc/kernel/pci_sun4v.c
5162 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
5163 spin_unlock_irqrestore(&iommu->lock, flags);
5164 }
5165
5166 -static struct dma_map_ops sun4v_dma_ops = {
5167 +static const struct dma_map_ops sun4v_dma_ops = {
5168 .alloc_coherent = dma_4v_alloc_coherent,
5169 .free_coherent = dma_4v_free_coherent,
5170 .map_page = dma_4v_map_page,
5171 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5172 index c49865b..b41a81b 100644
5173 --- a/arch/sparc/kernel/process_32.c
5174 +++ b/arch/sparc/kernel/process_32.c
5175 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
5176 rw->ins[4], rw->ins[5],
5177 rw->ins[6],
5178 rw->ins[7]);
5179 - printk("%pS\n", (void *) rw->ins[7]);
5180 + printk("%pA\n", (void *) rw->ins[7]);
5181 rw = (struct reg_window32 *) rw->ins[6];
5182 }
5183 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5184 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
5185
5186 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5187 r->psr, r->pc, r->npc, r->y, print_tainted());
5188 - printk("PC: <%pS>\n", (void *) r->pc);
5189 + printk("PC: <%pA>\n", (void *) r->pc);
5190 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5191 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5192 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5193 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5194 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5195 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5196 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5197 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5198
5199 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5200 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5201 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5202 rw = (struct reg_window32 *) fp;
5203 pc = rw->ins[7];
5204 printk("[%08lx : ", pc);
5205 - printk("%pS ] ", (void *) pc);
5206 + printk("%pA ] ", (void *) pc);
5207 fp = rw->ins[6];
5208 } while (++count < 16);
5209 printk("\n");
5210 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5211 index cb70476..3d0c191 100644
5212 --- a/arch/sparc/kernel/process_64.c
5213 +++ b/arch/sparc/kernel/process_64.c
5214 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
5215 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5216 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5217 if (regs->tstate & TSTATE_PRIV)
5218 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5219 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5220 }
5221
5222 void show_regs(struct pt_regs *regs)
5223 {
5224 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5225 regs->tpc, regs->tnpc, regs->y, print_tainted());
5226 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5227 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5228 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5229 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5230 regs->u_regs[3]);
5231 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
5232 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5233 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5234 regs->u_regs[15]);
5235 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5236 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5237 show_regwindow(regs);
5238 }
5239
5240 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5241 ((tp && tp->task) ? tp->task->pid : -1));
5242
5243 if (gp->tstate & TSTATE_PRIV) {
5244 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5245 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5246 (void *) gp->tpc,
5247 (void *) gp->o7,
5248 (void *) gp->i7,
5249 diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
5250 index 6edc4e5..06a69b4 100644
5251 --- a/arch/sparc/kernel/sigutil_64.c
5252 +++ b/arch/sparc/kernel/sigutil_64.c
5253 @@ -2,6 +2,7 @@
5254 #include <linux/types.h>
5255 #include <linux/thread_info.h>
5256 #include <linux/uaccess.h>
5257 +#include <linux/errno.h>
5258
5259 #include <asm/sigcontext.h>
5260 #include <asm/fpumacro.h>
5261 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5262 index 3a82e65..ce0a53a 100644
5263 --- a/arch/sparc/kernel/sys_sparc_32.c
5264 +++ b/arch/sparc/kernel/sys_sparc_32.c
5265 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5266 if (ARCH_SUN4C && len > 0x20000000)
5267 return -ENOMEM;
5268 if (!addr)
5269 - addr = TASK_UNMAPPED_BASE;
5270 + addr = current->mm->mmap_base;
5271
5272 if (flags & MAP_SHARED)
5273 addr = COLOUR_ALIGN(addr);
5274 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5275 }
5276 if (TASK_SIZE - PAGE_SIZE - len < addr)
5277 return -ENOMEM;
5278 - if (!vmm || addr + len <= vmm->vm_start)
5279 + if (check_heap_stack_gap(vmm, addr, len))
5280 return addr;
5281 addr = vmm->vm_end;
5282 if (flags & MAP_SHARED)
5283 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5284 index cfa0e19..98972ac 100644
5285 --- a/arch/sparc/kernel/sys_sparc_64.c
5286 +++ b/arch/sparc/kernel/sys_sparc_64.c
5287 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5288 /* We do not accept a shared mapping if it would violate
5289 * cache aliasing constraints.
5290 */
5291 - if ((flags & MAP_SHARED) &&
5292 + if ((filp || (flags & MAP_SHARED)) &&
5293 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5294 return -EINVAL;
5295 return addr;
5296 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5297 if (filp || (flags & MAP_SHARED))
5298 do_color_align = 1;
5299
5300 +#ifdef CONFIG_PAX_RANDMMAP
5301 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5302 +#endif
5303 +
5304 if (addr) {
5305 if (do_color_align)
5306 addr = COLOUR_ALIGN(addr, pgoff);
5307 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5308 addr = PAGE_ALIGN(addr);
5309
5310 vma = find_vma(mm, addr);
5311 - if (task_size - len >= addr &&
5312 - (!vma || addr + len <= vma->vm_start))
5313 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5314 return addr;
5315 }
5316
5317 if (len > mm->cached_hole_size) {
5318 - start_addr = addr = mm->free_area_cache;
5319 + start_addr = addr = mm->free_area_cache;
5320 } else {
5321 - start_addr = addr = TASK_UNMAPPED_BASE;
5322 + start_addr = addr = mm->mmap_base;
5323 mm->cached_hole_size = 0;
5324 }
5325
5326 @@ -175,14 +178,14 @@ full_search:
5327 vma = find_vma(mm, VA_EXCLUDE_END);
5328 }
5329 if (unlikely(task_size < addr)) {
5330 - if (start_addr != TASK_UNMAPPED_BASE) {
5331 - start_addr = addr = TASK_UNMAPPED_BASE;
5332 + if (start_addr != mm->mmap_base) {
5333 + start_addr = addr = mm->mmap_base;
5334 mm->cached_hole_size = 0;
5335 goto full_search;
5336 }
5337 return -ENOMEM;
5338 }
5339 - if (likely(!vma || addr + len <= vma->vm_start)) {
5340 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5341 /*
5342 * Remember the place where we stopped the search:
5343 */
5344 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5345 /* We do not accept a shared mapping if it would violate
5346 * cache aliasing constraints.
5347 */
5348 - if ((flags & MAP_SHARED) &&
5349 + if ((filp || (flags & MAP_SHARED)) &&
5350 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5351 return -EINVAL;
5352 return addr;
5353 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5354 addr = PAGE_ALIGN(addr);
5355
5356 vma = find_vma(mm, addr);
5357 - if (task_size - len >= addr &&
5358 - (!vma || addr + len <= vma->vm_start))
5359 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5360 return addr;
5361 }
5362
5363 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5364 /* make sure it can fit in the remaining address space */
5365 if (likely(addr > len)) {
5366 vma = find_vma(mm, addr-len);
5367 - if (!vma || addr <= vma->vm_start) {
5368 + if (check_heap_stack_gap(vma, addr - len, len)) {
5369 /* remember the address as a hint for next time */
5370 return (mm->free_area_cache = addr-len);
5371 }
5372 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5373 if (unlikely(mm->mmap_base < len))
5374 goto bottomup;
5375
5376 - addr = mm->mmap_base-len;
5377 - if (do_color_align)
5378 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5379 + addr = mm->mmap_base - len;
5380
5381 do {
5382 + if (do_color_align)
5383 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5384 /*
5385 * Lookup failure means no vma is above this address,
5386 * else if new region fits below vma->vm_start,
5387 * return with success:
5388 */
5389 vma = find_vma(mm, addr);
5390 - if (likely(!vma || addr+len <= vma->vm_start)) {
5391 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5392 /* remember the address as a hint for next time */
5393 return (mm->free_area_cache = addr);
5394 }
5395 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5396 mm->cached_hole_size = vma->vm_start - addr;
5397
5398 /* try just below the current vma->vm_start */
5399 - addr = vma->vm_start-len;
5400 - if (do_color_align)
5401 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5402 - } while (likely(len < vma->vm_start));
5403 + addr = skip_heap_stack_gap(vma, len);
5404 + } while (!IS_ERR_VALUE(addr));
5405
5406 bottomup:
5407 /*
5408 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5409 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
5410 sysctl_legacy_va_layout) {
5411 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5412 +
5413 +#ifdef CONFIG_PAX_RANDMMAP
5414 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5415 + mm->mmap_base += mm->delta_mmap;
5416 +#endif
5417 +
5418 mm->get_unmapped_area = arch_get_unmapped_area;
5419 mm->unmap_area = arch_unmap_area;
5420 } else {
5421 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5422 gap = (task_size / 6 * 5);
5423
5424 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5425 +
5426 +#ifdef CONFIG_PAX_RANDMMAP
5427 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5428 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5429 +#endif
5430 +
5431 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5432 mm->unmap_area = arch_unmap_area_topdown;
5433 }
5434 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5435 index c0490c7..84959d1 100644
5436 --- a/arch/sparc/kernel/traps_32.c
5437 +++ b/arch/sparc/kernel/traps_32.c
5438 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
5439 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5440 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5441
5442 +extern void gr_handle_kernel_exploit(void);
5443 +
5444 void die_if_kernel(char *str, struct pt_regs *regs)
5445 {
5446 static int die_counter;
5447 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5448 count++ < 30 &&
5449 (((unsigned long) rw) >= PAGE_OFFSET) &&
5450 !(((unsigned long) rw) & 0x7)) {
5451 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
5452 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
5453 (void *) rw->ins[7]);
5454 rw = (struct reg_window32 *)rw->ins[6];
5455 }
5456 }
5457 printk("Instruction DUMP:");
5458 instruction_dump ((unsigned long *) regs->pc);
5459 - if(regs->psr & PSR_PS)
5460 + if(regs->psr & PSR_PS) {
5461 + gr_handle_kernel_exploit();
5462 do_exit(SIGKILL);
5463 + }
5464 do_exit(SIGSEGV);
5465 }
5466
5467 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5468 index 10f7bb9..cdb6793 100644
5469 --- a/arch/sparc/kernel/traps_64.c
5470 +++ b/arch/sparc/kernel/traps_64.c
5471 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5472 i + 1,
5473 p->trapstack[i].tstate, p->trapstack[i].tpc,
5474 p->trapstack[i].tnpc, p->trapstack[i].tt);
5475 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5476 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5477 }
5478 }
5479
5480 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5481
5482 lvl -= 0x100;
5483 if (regs->tstate & TSTATE_PRIV) {
5484 +
5485 +#ifdef CONFIG_PAX_REFCOUNT
5486 + if (lvl == 6)
5487 + pax_report_refcount_overflow(regs);
5488 +#endif
5489 +
5490 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5491 die_if_kernel(buffer, regs);
5492 }
5493 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5494 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5495 {
5496 char buffer[32];
5497 -
5498 +
5499 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5500 0, lvl, SIGTRAP) == NOTIFY_STOP)
5501 return;
5502
5503 +#ifdef CONFIG_PAX_REFCOUNT
5504 + if (lvl == 6)
5505 + pax_report_refcount_overflow(regs);
5506 +#endif
5507 +
5508 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5509
5510 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5511 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5512 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5513 printk("%s" "ERROR(%d): ",
5514 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5515 - printk("TPC<%pS>\n", (void *) regs->tpc);
5516 + printk("TPC<%pA>\n", (void *) regs->tpc);
5517 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5518 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5519 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5520 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5521 smp_processor_id(),
5522 (type & 0x1) ? 'I' : 'D',
5523 regs->tpc);
5524 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5525 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5526 panic("Irrecoverable Cheetah+ parity error.");
5527 }
5528
5529 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5530 smp_processor_id(),
5531 (type & 0x1) ? 'I' : 'D',
5532 regs->tpc);
5533 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5534 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5535 }
5536
5537 struct sun4v_error_entry {
5538 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5539
5540 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5541 regs->tpc, tl);
5542 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5543 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5544 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5545 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5546 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5547 (void *) regs->u_regs[UREG_I7]);
5548 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5549 "pte[%lx] error[%lx]\n",
5550 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5551
5552 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5553 regs->tpc, tl);
5554 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5555 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5556 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5557 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5558 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5559 (void *) regs->u_regs[UREG_I7]);
5560 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5561 "pte[%lx] error[%lx]\n",
5562 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5563 fp = (unsigned long)sf->fp + STACK_BIAS;
5564 }
5565
5566 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5567 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5568 } while (++count < 16);
5569 }
5570
5571 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5572 return (struct reg_window *) (fp + STACK_BIAS);
5573 }
5574
5575 +extern void gr_handle_kernel_exploit(void);
5576 +
5577 void die_if_kernel(char *str, struct pt_regs *regs)
5578 {
5579 static int die_counter;
5580 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5581 while (rw &&
5582 count++ < 30&&
5583 is_kernel_stack(current, rw)) {
5584 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
5585 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
5586 (void *) rw->ins[7]);
5587
5588 rw = kernel_stack_up(rw);
5589 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5590 }
5591 user_instruction_dump ((unsigned int __user *) regs->tpc);
5592 }
5593 - if (regs->tstate & TSTATE_PRIV)
5594 + if (regs->tstate & TSTATE_PRIV) {
5595 + gr_handle_kernel_exploit();
5596 do_exit(SIGKILL);
5597 + }
5598 +
5599 do_exit(SIGSEGV);
5600 }
5601 EXPORT_SYMBOL(die_if_kernel);
5602 diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
5603 index be183fe..1c8d332 100644
5604 --- a/arch/sparc/kernel/una_asm_64.S
5605 +++ b/arch/sparc/kernel/una_asm_64.S
5606 @@ -127,7 +127,7 @@ do_int_load:
5607 wr %o5, 0x0, %asi
5608 retl
5609 mov 0, %o0
5610 - .size __do_int_load, .-__do_int_load
5611 + .size do_int_load, .-do_int_load
5612
5613 .section __ex_table,"a"
5614 .word 4b, __retl_efault
5615 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5616 index 3792099..2af17d8 100644
5617 --- a/arch/sparc/kernel/unaligned_64.c
5618 +++ b/arch/sparc/kernel/unaligned_64.c
5619 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
5620 if (count < 5) {
5621 last_time = jiffies;
5622 count++;
5623 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
5624 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
5625 regs->tpc, (void *) regs->tpc);
5626 }
5627 }
5628 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5629 index e75faf0..24f12f9 100644
5630 --- a/arch/sparc/lib/Makefile
5631 +++ b/arch/sparc/lib/Makefile
5632 @@ -2,7 +2,7 @@
5633 #
5634
5635 asflags-y := -ansi -DST_DIV0=0x02
5636 -ccflags-y := -Werror
5637 +#ccflags-y := -Werror
5638
5639 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5640 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5641 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5642 index 0268210..f0291ca 100644
5643 --- a/arch/sparc/lib/atomic_64.S
5644 +++ b/arch/sparc/lib/atomic_64.S
5645 @@ -18,7 +18,12 @@
5646 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5647 BACKOFF_SETUP(%o2)
5648 1: lduw [%o1], %g1
5649 - add %g1, %o0, %g7
5650 + addcc %g1, %o0, %g7
5651 +
5652 +#ifdef CONFIG_PAX_REFCOUNT
5653 + tvs %icc, 6
5654 +#endif
5655 +
5656 cas [%o1], %g1, %g7
5657 cmp %g1, %g7
5658 bne,pn %icc, 2f
5659 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5660 2: BACKOFF_SPIN(%o2, %o3, 1b)
5661 .size atomic_add, .-atomic_add
5662
5663 + .globl atomic_add_unchecked
5664 + .type atomic_add_unchecked,#function
5665 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5666 + BACKOFF_SETUP(%o2)
5667 +1: lduw [%o1], %g1
5668 + add %g1, %o0, %g7
5669 + cas [%o1], %g1, %g7
5670 + cmp %g1, %g7
5671 + bne,pn %icc, 2f
5672 + nop
5673 + retl
5674 + nop
5675 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5676 + .size atomic_add_unchecked, .-atomic_add_unchecked
5677 +
5678 .globl atomic_sub
5679 .type atomic_sub,#function
5680 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5681 BACKOFF_SETUP(%o2)
5682 1: lduw [%o1], %g1
5683 - sub %g1, %o0, %g7
5684 + subcc %g1, %o0, %g7
5685 +
5686 +#ifdef CONFIG_PAX_REFCOUNT
5687 + tvs %icc, 6
5688 +#endif
5689 +
5690 cas [%o1], %g1, %g7
5691 cmp %g1, %g7
5692 bne,pn %icc, 2f
5693 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5694 2: BACKOFF_SPIN(%o2, %o3, 1b)
5695 .size atomic_sub, .-atomic_sub
5696
5697 + .globl atomic_sub_unchecked
5698 + .type atomic_sub_unchecked,#function
5699 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5700 + BACKOFF_SETUP(%o2)
5701 +1: lduw [%o1], %g1
5702 + sub %g1, %o0, %g7
5703 + cas [%o1], %g1, %g7
5704 + cmp %g1, %g7
5705 + bne,pn %icc, 2f
5706 + nop
5707 + retl
5708 + nop
5709 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5710 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
5711 +
5712 .globl atomic_add_ret
5713 .type atomic_add_ret,#function
5714 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5715 BACKOFF_SETUP(%o2)
5716 1: lduw [%o1], %g1
5717 - add %g1, %o0, %g7
5718 + addcc %g1, %o0, %g7
5719 +
5720 +#ifdef CONFIG_PAX_REFCOUNT
5721 + tvs %icc, 6
5722 +#endif
5723 +
5724 cas [%o1], %g1, %g7
5725 cmp %g1, %g7
5726 bne,pn %icc, 2f
5727 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5728 2: BACKOFF_SPIN(%o2, %o3, 1b)
5729 .size atomic_add_ret, .-atomic_add_ret
5730
5731 + .globl atomic_add_ret_unchecked
5732 + .type atomic_add_ret_unchecked,#function
5733 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5734 + BACKOFF_SETUP(%o2)
5735 +1: lduw [%o1], %g1
5736 + addcc %g1, %o0, %g7
5737 + cas [%o1], %g1, %g7
5738 + cmp %g1, %g7
5739 + bne,pn %icc, 2f
5740 + add %g7, %o0, %g7
5741 + sra %g7, 0, %o0
5742 + retl
5743 + nop
5744 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5745 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5746 +
5747 .globl atomic_sub_ret
5748 .type atomic_sub_ret,#function
5749 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5750 BACKOFF_SETUP(%o2)
5751 1: lduw [%o1], %g1
5752 - sub %g1, %o0, %g7
5753 + subcc %g1, %o0, %g7
5754 +
5755 +#ifdef CONFIG_PAX_REFCOUNT
5756 + tvs %icc, 6
5757 +#endif
5758 +
5759 cas [%o1], %g1, %g7
5760 cmp %g1, %g7
5761 bne,pn %icc, 2f
5762 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5763 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5764 BACKOFF_SETUP(%o2)
5765 1: ldx [%o1], %g1
5766 - add %g1, %o0, %g7
5767 + addcc %g1, %o0, %g7
5768 +
5769 +#ifdef CONFIG_PAX_REFCOUNT
5770 + tvs %xcc, 6
5771 +#endif
5772 +
5773 casx [%o1], %g1, %g7
5774 cmp %g1, %g7
5775 bne,pn %xcc, 2f
5776 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5777 2: BACKOFF_SPIN(%o2, %o3, 1b)
5778 .size atomic64_add, .-atomic64_add
5779
5780 + .globl atomic64_add_unchecked
5781 + .type atomic64_add_unchecked,#function
5782 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5783 + BACKOFF_SETUP(%o2)
5784 +1: ldx [%o1], %g1
5785 + addcc %g1, %o0, %g7
5786 + casx [%o1], %g1, %g7
5787 + cmp %g1, %g7
5788 + bne,pn %xcc, 2f
5789 + nop
5790 + retl
5791 + nop
5792 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5793 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
5794 +
5795 .globl atomic64_sub
5796 .type atomic64_sub,#function
5797 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5798 BACKOFF_SETUP(%o2)
5799 1: ldx [%o1], %g1
5800 - sub %g1, %o0, %g7
5801 + subcc %g1, %o0, %g7
5802 +
5803 +#ifdef CONFIG_PAX_REFCOUNT
5804 + tvs %xcc, 6
5805 +#endif
5806 +
5807 casx [%o1], %g1, %g7
5808 cmp %g1, %g7
5809 bne,pn %xcc, 2f
5810 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5811 2: BACKOFF_SPIN(%o2, %o3, 1b)
5812 .size atomic64_sub, .-atomic64_sub
5813
5814 + .globl atomic64_sub_unchecked
5815 + .type atomic64_sub_unchecked,#function
5816 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5817 + BACKOFF_SETUP(%o2)
5818 +1: ldx [%o1], %g1
5819 + subcc %g1, %o0, %g7
5820 + casx [%o1], %g1, %g7
5821 + cmp %g1, %g7
5822 + bne,pn %xcc, 2f
5823 + nop
5824 + retl
5825 + nop
5826 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5827 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5828 +
5829 .globl atomic64_add_ret
5830 .type atomic64_add_ret,#function
5831 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5832 BACKOFF_SETUP(%o2)
5833 1: ldx [%o1], %g1
5834 - add %g1, %o0, %g7
5835 + addcc %g1, %o0, %g7
5836 +
5837 +#ifdef CONFIG_PAX_REFCOUNT
5838 + tvs %xcc, 6
5839 +#endif
5840 +
5841 casx [%o1], %g1, %g7
5842 cmp %g1, %g7
5843 bne,pn %xcc, 2f
5844 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5845 2: BACKOFF_SPIN(%o2, %o3, 1b)
5846 .size atomic64_add_ret, .-atomic64_add_ret
5847
5848 + .globl atomic64_add_ret_unchecked
5849 + .type atomic64_add_ret_unchecked,#function
5850 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5851 + BACKOFF_SETUP(%o2)
5852 +1: ldx [%o1], %g1
5853 + addcc %g1, %o0, %g7
5854 + casx [%o1], %g1, %g7
5855 + cmp %g1, %g7
5856 + bne,pn %xcc, 2f
5857 + add %g7, %o0, %g7
5858 + mov %g7, %o0
5859 + retl
5860 + nop
5861 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5862 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5863 +
5864 .globl atomic64_sub_ret
5865 .type atomic64_sub_ret,#function
5866 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5867 BACKOFF_SETUP(%o2)
5868 1: ldx [%o1], %g1
5869 - sub %g1, %o0, %g7
5870 + subcc %g1, %o0, %g7
5871 +
5872 +#ifdef CONFIG_PAX_REFCOUNT
5873 + tvs %xcc, 6
5874 +#endif
5875 +
5876 casx [%o1], %g1, %g7
5877 cmp %g1, %g7
5878 bne,pn %xcc, 2f
5879 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5880 index 704b126..2e79d76 100644
5881 --- a/arch/sparc/lib/ksyms.c
5882 +++ b/arch/sparc/lib/ksyms.c
5883 @@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5884
5885 /* Atomic counter implementation. */
5886 EXPORT_SYMBOL(atomic_add);
5887 +EXPORT_SYMBOL(atomic_add_unchecked);
5888 EXPORT_SYMBOL(atomic_add_ret);
5889 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
5890 EXPORT_SYMBOL(atomic_sub);
5891 +EXPORT_SYMBOL(atomic_sub_unchecked);
5892 EXPORT_SYMBOL(atomic_sub_ret);
5893 EXPORT_SYMBOL(atomic64_add);
5894 +EXPORT_SYMBOL(atomic64_add_unchecked);
5895 EXPORT_SYMBOL(atomic64_add_ret);
5896 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5897 EXPORT_SYMBOL(atomic64_sub);
5898 +EXPORT_SYMBOL(atomic64_sub_unchecked);
5899 EXPORT_SYMBOL(atomic64_sub_ret);
5900
5901 /* Atomic bit operations. */
5902 diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
5903 index 91a7d29..ce75c29 100644
5904 --- a/arch/sparc/lib/rwsem_64.S
5905 +++ b/arch/sparc/lib/rwsem_64.S
5906 @@ -11,7 +11,12 @@
5907 .globl __down_read
5908 __down_read:
5909 1: lduw [%o0], %g1
5910 - add %g1, 1, %g7
5911 + addcc %g1, 1, %g7
5912 +
5913 +#ifdef CONFIG_PAX_REFCOUNT
5914 + tvs %icc, 6
5915 +#endif
5916 +
5917 cas [%o0], %g1, %g7
5918 cmp %g1, %g7
5919 bne,pn %icc, 1b
5920 @@ -33,7 +38,12 @@ __down_read:
5921 .globl __down_read_trylock
5922 __down_read_trylock:
5923 1: lduw [%o0], %g1
5924 - add %g1, 1, %g7
5925 + addcc %g1, 1, %g7
5926 +
5927 +#ifdef CONFIG_PAX_REFCOUNT
5928 + tvs %icc, 6
5929 +#endif
5930 +
5931 cmp %g7, 0
5932 bl,pn %icc, 2f
5933 mov 0, %o1
5934 @@ -51,7 +61,12 @@ __down_write:
5935 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5936 1:
5937 lduw [%o0], %g3
5938 - add %g3, %g1, %g7
5939 + addcc %g3, %g1, %g7
5940 +
5941 +#ifdef CONFIG_PAX_REFCOUNT
5942 + tvs %icc, 6
5943 +#endif
5944 +
5945 cas [%o0], %g3, %g7
5946 cmp %g3, %g7
5947 bne,pn %icc, 1b
5948 @@ -77,7 +92,12 @@ __down_write_trylock:
5949 cmp %g3, 0
5950 bne,pn %icc, 2f
5951 mov 0, %o1
5952 - add %g3, %g1, %g7
5953 + addcc %g3, %g1, %g7
5954 +
5955 +#ifdef CONFIG_PAX_REFCOUNT
5956 + tvs %icc, 6
5957 +#endif
5958 +
5959 cas [%o0], %g3, %g7
5960 cmp %g3, %g7
5961 bne,pn %icc, 1b
5962 @@ -90,7 +110,12 @@ __down_write_trylock:
5963 __up_read:
5964 1:
5965 lduw [%o0], %g1
5966 - sub %g1, 1, %g7
5967 + subcc %g1, 1, %g7
5968 +
5969 +#ifdef CONFIG_PAX_REFCOUNT
5970 + tvs %icc, 6
5971 +#endif
5972 +
5973 cas [%o0], %g1, %g7
5974 cmp %g1, %g7
5975 bne,pn %icc, 1b
5976 @@ -118,7 +143,12 @@ __up_write:
5977 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5978 1:
5979 lduw [%o0], %g3
5980 - sub %g3, %g1, %g7
5981 + subcc %g3, %g1, %g7
5982 +
5983 +#ifdef CONFIG_PAX_REFCOUNT
5984 + tvs %icc, 6
5985 +#endif
5986 +
5987 cas [%o0], %g3, %g7
5988 cmp %g3, %g7
5989 bne,pn %icc, 1b
5990 @@ -143,7 +173,12 @@ __downgrade_write:
5991 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5992 1:
5993 lduw [%o0], %g3
5994 - sub %g3, %g1, %g7
5995 + subcc %g3, %g1, %g7
5996 +
5997 +#ifdef CONFIG_PAX_REFCOUNT
5998 + tvs %icc, 6
5999 +#endif
6000 +
6001 cas [%o0], %g3, %g7
6002 cmp %g3, %g7
6003 bne,pn %icc, 1b
6004 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6005 index 79836a7..62f47a2 100644
6006 --- a/arch/sparc/mm/Makefile
6007 +++ b/arch/sparc/mm/Makefile
6008 @@ -2,7 +2,7 @@
6009 #
6010
6011 asflags-y := -ansi
6012 -ccflags-y := -Werror
6013 +#ccflags-y := -Werror
6014
6015 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6016 obj-y += fault_$(BITS).o
6017 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6018 index b99f81c..3453e93 100644
6019 --- a/arch/sparc/mm/fault_32.c
6020 +++ b/arch/sparc/mm/fault_32.c
6021 @@ -21,6 +21,9 @@
6022 #include <linux/interrupt.h>
6023 #include <linux/module.h>
6024 #include <linux/kdebug.h>
6025 +#include <linux/slab.h>
6026 +#include <linux/pagemap.h>
6027 +#include <linux/compiler.h>
6028
6029 #include <asm/system.h>
6030 #include <asm/page.h>
6031 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6032 return safe_compute_effective_address(regs, insn);
6033 }
6034
6035 +#ifdef CONFIG_PAX_PAGEEXEC
6036 +#ifdef CONFIG_PAX_DLRESOLVE
6037 +static void pax_emuplt_close(struct vm_area_struct *vma)
6038 +{
6039 + vma->vm_mm->call_dl_resolve = 0UL;
6040 +}
6041 +
6042 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6043 +{
6044 + unsigned int *kaddr;
6045 +
6046 + vmf->page = alloc_page(GFP_HIGHUSER);
6047 + if (!vmf->page)
6048 + return VM_FAULT_OOM;
6049 +
6050 + kaddr = kmap(vmf->page);
6051 + memset(kaddr, 0, PAGE_SIZE);
6052 + kaddr[0] = 0x9DE3BFA8U; /* save */
6053 + flush_dcache_page(vmf->page);
6054 + kunmap(vmf->page);
6055 + return VM_FAULT_MAJOR;
6056 +}
6057 +
6058 +static const struct vm_operations_struct pax_vm_ops = {
6059 + .close = pax_emuplt_close,
6060 + .fault = pax_emuplt_fault
6061 +};
6062 +
6063 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6064 +{
6065 + int ret;
6066 +
6067 + vma->vm_mm = current->mm;
6068 + vma->vm_start = addr;
6069 + vma->vm_end = addr + PAGE_SIZE;
6070 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6071 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6072 + vma->vm_ops = &pax_vm_ops;
6073 +
6074 + ret = insert_vm_struct(current->mm, vma);
6075 + if (ret)
6076 + return ret;
6077 +
6078 + ++current->mm->total_vm;
6079 + return 0;
6080 +}
6081 +#endif
6082 +
6083 +/*
6084 + * PaX: decide what to do with offenders (regs->pc = fault address)
6085 + *
6086 + * returns 1 when task should be killed
6087 + * 2 when patched PLT trampoline was detected
6088 + * 3 when unpatched PLT trampoline was detected
6089 + */
6090 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6091 +{
6092 +
6093 +#ifdef CONFIG_PAX_EMUPLT
6094 + int err;
6095 +
6096 + do { /* PaX: patched PLT emulation #1 */
6097 + unsigned int sethi1, sethi2, jmpl;
6098 +
6099 + err = get_user(sethi1, (unsigned int *)regs->pc);
6100 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6101 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6102 +
6103 + if (err)
6104 + break;
6105 +
6106 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6107 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6108 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6109 + {
6110 + unsigned int addr;
6111 +
6112 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6113 + addr = regs->u_regs[UREG_G1];
6114 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6115 + regs->pc = addr;
6116 + regs->npc = addr+4;
6117 + return 2;
6118 + }
6119 + } while (0);
6120 +
6121 + { /* PaX: patched PLT emulation #2 */
6122 + unsigned int ba;
6123 +
6124 + err = get_user(ba, (unsigned int *)regs->pc);
6125 +
6126 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6127 + unsigned int addr;
6128 +
6129 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6130 + regs->pc = addr;
6131 + regs->npc = addr+4;
6132 + return 2;
6133 + }
6134 + }
6135 +
6136 + do { /* PaX: patched PLT emulation #3 */
6137 + unsigned int sethi, jmpl, nop;
6138 +
6139 + err = get_user(sethi, (unsigned int *)regs->pc);
6140 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6141 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6142 +
6143 + if (err)
6144 + break;
6145 +
6146 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6147 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6148 + nop == 0x01000000U)
6149 + {
6150 + unsigned int addr;
6151 +
6152 + addr = (sethi & 0x003FFFFFU) << 10;
6153 + regs->u_regs[UREG_G1] = addr;
6154 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6155 + regs->pc = addr;
6156 + regs->npc = addr+4;
6157 + return 2;
6158 + }
6159 + } while (0);
6160 +
6161 + do { /* PaX: unpatched PLT emulation step 1 */
6162 + unsigned int sethi, ba, nop;
6163 +
6164 + err = get_user(sethi, (unsigned int *)regs->pc);
6165 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
6166 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6167 +
6168 + if (err)
6169 + break;
6170 +
6171 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6172 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6173 + nop == 0x01000000U)
6174 + {
6175 + unsigned int addr, save, call;
6176 +
6177 + if ((ba & 0xFFC00000U) == 0x30800000U)
6178 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6179 + else
6180 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6181 +
6182 + err = get_user(save, (unsigned int *)addr);
6183 + err |= get_user(call, (unsigned int *)(addr+4));
6184 + err |= get_user(nop, (unsigned int *)(addr+8));
6185 + if (err)
6186 + break;
6187 +
6188 +#ifdef CONFIG_PAX_DLRESOLVE
6189 + if (save == 0x9DE3BFA8U &&
6190 + (call & 0xC0000000U) == 0x40000000U &&
6191 + nop == 0x01000000U)
6192 + {
6193 + struct vm_area_struct *vma;
6194 + unsigned long call_dl_resolve;
6195 +
6196 + down_read(&current->mm->mmap_sem);
6197 + call_dl_resolve = current->mm->call_dl_resolve;
6198 + up_read(&current->mm->mmap_sem);
6199 + if (likely(call_dl_resolve))
6200 + goto emulate;
6201 +
6202 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6203 +
6204 + down_write(&current->mm->mmap_sem);
6205 + if (current->mm->call_dl_resolve) {
6206 + call_dl_resolve = current->mm->call_dl_resolve;
6207 + up_write(&current->mm->mmap_sem);
6208 + if (vma)
6209 + kmem_cache_free(vm_area_cachep, vma);
6210 + goto emulate;
6211 + }
6212 +
6213 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6214 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6215 + up_write(&current->mm->mmap_sem);
6216 + if (vma)
6217 + kmem_cache_free(vm_area_cachep, vma);
6218 + return 1;
6219 + }
6220 +
6221 + if (pax_insert_vma(vma, call_dl_resolve)) {
6222 + up_write(&current->mm->mmap_sem);
6223 + kmem_cache_free(vm_area_cachep, vma);
6224 + return 1;
6225 + }
6226 +
6227 + current->mm->call_dl_resolve = call_dl_resolve;
6228 + up_write(&current->mm->mmap_sem);
6229 +
6230 +emulate:
6231 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6232 + regs->pc = call_dl_resolve;
6233 + regs->npc = addr+4;
6234 + return 3;
6235 + }
6236 +#endif
6237 +
6238 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6239 + if ((save & 0xFFC00000U) == 0x05000000U &&
6240 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6241 + nop == 0x01000000U)
6242 + {
6243 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6244 + regs->u_regs[UREG_G2] = addr + 4;
6245 + addr = (save & 0x003FFFFFU) << 10;
6246 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6247 + regs->pc = addr;
6248 + regs->npc = addr+4;
6249 + return 3;
6250 + }
6251 + }
6252 + } while (0);
6253 +
6254 + do { /* PaX: unpatched PLT emulation step 2 */
6255 + unsigned int save, call, nop;
6256 +
6257 + err = get_user(save, (unsigned int *)(regs->pc-4));
6258 + err |= get_user(call, (unsigned int *)regs->pc);
6259 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6260 + if (err)
6261 + break;
6262 +
6263 + if (save == 0x9DE3BFA8U &&
6264 + (call & 0xC0000000U) == 0x40000000U &&
6265 + nop == 0x01000000U)
6266 + {
6267 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6268 +
6269 + regs->u_regs[UREG_RETPC] = regs->pc;
6270 + regs->pc = dl_resolve;
6271 + regs->npc = dl_resolve+4;
6272 + return 3;
6273 + }
6274 + } while (0);
6275 +#endif
6276 +
6277 + return 1;
6278 +}
6279 +
6280 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6281 +{
6282 + unsigned long i;
6283 +
6284 + printk(KERN_ERR "PAX: bytes at PC: ");
6285 + for (i = 0; i < 8; i++) {
6286 + unsigned int c;
6287 + if (get_user(c, (unsigned int *)pc+i))
6288 + printk(KERN_CONT "???????? ");
6289 + else
6290 + printk(KERN_CONT "%08x ", c);
6291 + }
6292 + printk("\n");
6293 +}
6294 +#endif
6295 +
6296 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
6297 unsigned long address)
6298 {
6299 @@ -231,6 +495,24 @@ good_area:
6300 if(!(vma->vm_flags & VM_WRITE))
6301 goto bad_area;
6302 } else {
6303 +
6304 +#ifdef CONFIG_PAX_PAGEEXEC
6305 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6306 + up_read(&mm->mmap_sem);
6307 + switch (pax_handle_fetch_fault(regs)) {
6308 +
6309 +#ifdef CONFIG_PAX_EMUPLT
6310 + case 2:
6311 + case 3:
6312 + return;
6313 +#endif
6314 +
6315 + }
6316 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6317 + do_group_exit(SIGKILL);
6318 + }
6319 +#endif
6320 +
6321 /* Allow reads even for write-only mappings */
6322 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6323 goto bad_area;
6324 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6325 index 43b0da9..a0b78f9 100644
6326 --- a/arch/sparc/mm/fault_64.c
6327 +++ b/arch/sparc/mm/fault_64.c
6328 @@ -20,6 +20,9 @@
6329 #include <linux/kprobes.h>
6330 #include <linux/kdebug.h>
6331 #include <linux/percpu.h>
6332 +#include <linux/slab.h>
6333 +#include <linux/pagemap.h>
6334 +#include <linux/compiler.h>
6335
6336 #include <asm/page.h>
6337 #include <asm/pgtable.h>
6338 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6339 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6340 regs->tpc);
6341 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6342 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6343 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6344 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6345 dump_stack();
6346 unhandled_fault(regs->tpc, current, regs);
6347 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
6348 show_regs(regs);
6349 }
6350
6351 +#ifdef CONFIG_PAX_PAGEEXEC
6352 +#ifdef CONFIG_PAX_DLRESOLVE
6353 +static void pax_emuplt_close(struct vm_area_struct *vma)
6354 +{
6355 + vma->vm_mm->call_dl_resolve = 0UL;
6356 +}
6357 +
6358 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6359 +{
6360 + unsigned int *kaddr;
6361 +
6362 + vmf->page = alloc_page(GFP_HIGHUSER);
6363 + if (!vmf->page)
6364 + return VM_FAULT_OOM;
6365 +
6366 + kaddr = kmap(vmf->page);
6367 + memset(kaddr, 0, PAGE_SIZE);
6368 + kaddr[0] = 0x9DE3BFA8U; /* save */
6369 + flush_dcache_page(vmf->page);
6370 + kunmap(vmf->page);
6371 + return VM_FAULT_MAJOR;
6372 +}
6373 +
6374 +static const struct vm_operations_struct pax_vm_ops = {
6375 + .close = pax_emuplt_close,
6376 + .fault = pax_emuplt_fault
6377 +};
6378 +
6379 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6380 +{
6381 + int ret;
6382 +
6383 + vma->vm_mm = current->mm;
6384 + vma->vm_start = addr;
6385 + vma->vm_end = addr + PAGE_SIZE;
6386 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6387 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6388 + vma->vm_ops = &pax_vm_ops;
6389 +
6390 + ret = insert_vm_struct(current->mm, vma);
6391 + if (ret)
6392 + return ret;
6393 +
6394 + ++current->mm->total_vm;
6395 + return 0;
6396 +}
6397 +#endif
6398 +
6399 +/*
6400 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6401 + *
6402 + * returns 1 when task should be killed
6403 + * 2 when patched PLT trampoline was detected
6404 + * 3 when unpatched PLT trampoline was detected
6405 + */
6406 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6407 +{
6408 +
6409 +#ifdef CONFIG_PAX_EMUPLT
6410 + int err;
6411 +
6412 + do { /* PaX: patched PLT emulation #1 */
6413 + unsigned int sethi1, sethi2, jmpl;
6414 +
6415 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6416 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6417 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6418 +
6419 + if (err)
6420 + break;
6421 +
6422 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6423 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6424 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6425 + {
6426 + unsigned long addr;
6427 +
6428 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6429 + addr = regs->u_regs[UREG_G1];
6430 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6431 +
6432 + if (test_thread_flag(TIF_32BIT))
6433 + addr &= 0xFFFFFFFFUL;
6434 +
6435 + regs->tpc = addr;
6436 + regs->tnpc = addr+4;
6437 + return 2;
6438 + }
6439 + } while (0);
6440 +
6441 + { /* PaX: patched PLT emulation #2 */
6442 + unsigned int ba;
6443 +
6444 + err = get_user(ba, (unsigned int *)regs->tpc);
6445 +
6446 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6447 + unsigned long addr;
6448 +
6449 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6450 +
6451 + if (test_thread_flag(TIF_32BIT))
6452 + addr &= 0xFFFFFFFFUL;
6453 +
6454 + regs->tpc = addr;
6455 + regs->tnpc = addr+4;
6456 + return 2;
6457 + }
6458 + }
6459 +
6460 + do { /* PaX: patched PLT emulation #3 */
6461 + unsigned int sethi, jmpl, nop;
6462 +
6463 + err = get_user(sethi, (unsigned int *)regs->tpc);
6464 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6465 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6466 +
6467 + if (err)
6468 + break;
6469 +
6470 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6471 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6472 + nop == 0x01000000U)
6473 + {
6474 + unsigned long addr;
6475 +
6476 + addr = (sethi & 0x003FFFFFU) << 10;
6477 + regs->u_regs[UREG_G1] = addr;
6478 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6479 +
6480 + if (test_thread_flag(TIF_32BIT))
6481 + addr &= 0xFFFFFFFFUL;
6482 +
6483 + regs->tpc = addr;
6484 + regs->tnpc = addr+4;
6485 + return 2;
6486 + }
6487 + } while (0);
6488 +
6489 + do { /* PaX: patched PLT emulation #4 */
6490 + unsigned int sethi, mov1, call, mov2;
6491 +
6492 + err = get_user(sethi, (unsigned int *)regs->tpc);
6493 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6494 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
6495 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6496 +
6497 + if (err)
6498 + break;
6499 +
6500 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6501 + mov1 == 0x8210000FU &&
6502 + (call & 0xC0000000U) == 0x40000000U &&
6503 + mov2 == 0x9E100001U)
6504 + {
6505 + unsigned long addr;
6506 +
6507 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6508 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6509 +
6510 + if (test_thread_flag(TIF_32BIT))
6511 + addr &= 0xFFFFFFFFUL;
6512 +
6513 + regs->tpc = addr;
6514 + regs->tnpc = addr+4;
6515 + return 2;
6516 + }
6517 + } while (0);
6518 +
6519 + do { /* PaX: patched PLT emulation #5 */
6520 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6521 +
6522 + err = get_user(sethi, (unsigned int *)regs->tpc);
6523 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6524 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6525 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6526 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6527 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6528 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6529 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6530 +
6531 + if (err)
6532 + break;
6533 +
6534 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6535 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6536 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6537 + (or1 & 0xFFFFE000U) == 0x82106000U &&
6538 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6539 + sllx == 0x83287020U &&
6540 + jmpl == 0x81C04005U &&
6541 + nop == 0x01000000U)
6542 + {
6543 + unsigned long addr;
6544 +
6545 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6546 + regs->u_regs[UREG_G1] <<= 32;
6547 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6548 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6549 + regs->tpc = addr;
6550 + regs->tnpc = addr+4;
6551 + return 2;
6552 + }
6553 + } while (0);
6554 +
6555 + do { /* PaX: patched PLT emulation #6 */
6556 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6557 +
6558 + err = get_user(sethi, (unsigned int *)regs->tpc);
6559 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6560 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6561 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6562 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
6563 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6564 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6565 +
6566 + if (err)
6567 + break;
6568 +
6569 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6570 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6571 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6572 + sllx == 0x83287020U &&
6573 + (or & 0xFFFFE000U) == 0x8A116000U &&
6574 + jmpl == 0x81C04005U &&
6575 + nop == 0x01000000U)
6576 + {
6577 + unsigned long addr;
6578 +
6579 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6580 + regs->u_regs[UREG_G1] <<= 32;
6581 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6582 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6583 + regs->tpc = addr;
6584 + regs->tnpc = addr+4;
6585 + return 2;
6586 + }
6587 + } while (0);
6588 +
6589 + do { /* PaX: unpatched PLT emulation step 1 */
6590 + unsigned int sethi, ba, nop;
6591 +
6592 + err = get_user(sethi, (unsigned int *)regs->tpc);
6593 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6594 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6595 +
6596 + if (err)
6597 + break;
6598 +
6599 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6600 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6601 + nop == 0x01000000U)
6602 + {
6603 + unsigned long addr;
6604 + unsigned int save, call;
6605 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6606 +
6607 + if ((ba & 0xFFC00000U) == 0x30800000U)
6608 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6609 + else
6610 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6611 +
6612 + if (test_thread_flag(TIF_32BIT))
6613 + addr &= 0xFFFFFFFFUL;
6614 +
6615 + err = get_user(save, (unsigned int *)addr);
6616 + err |= get_user(call, (unsigned int *)(addr+4));
6617 + err |= get_user(nop, (unsigned int *)(addr+8));
6618 + if (err)
6619 + break;
6620 +
6621 +#ifdef CONFIG_PAX_DLRESOLVE
6622 + if (save == 0x9DE3BFA8U &&
6623 + (call & 0xC0000000U) == 0x40000000U &&
6624 + nop == 0x01000000U)
6625 + {
6626 + struct vm_area_struct *vma;
6627 + unsigned long call_dl_resolve;
6628 +
6629 + down_read(&current->mm->mmap_sem);
6630 + call_dl_resolve = current->mm->call_dl_resolve;
6631 + up_read(&current->mm->mmap_sem);
6632 + if (likely(call_dl_resolve))
6633 + goto emulate;
6634 +
6635 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6636 +
6637 + down_write(&current->mm->mmap_sem);
6638 + if (current->mm->call_dl_resolve) {
6639 + call_dl_resolve = current->mm->call_dl_resolve;
6640 + up_write(&current->mm->mmap_sem);
6641 + if (vma)
6642 + kmem_cache_free(vm_area_cachep, vma);
6643 + goto emulate;
6644 + }
6645 +
6646 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6647 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6648 + up_write(&current->mm->mmap_sem);
6649 + if (vma)
6650 + kmem_cache_free(vm_area_cachep, vma);
6651 + return 1;
6652 + }
6653 +
6654 + if (pax_insert_vma(vma, call_dl_resolve)) {
6655 + up_write(&current->mm->mmap_sem);
6656 + kmem_cache_free(vm_area_cachep, vma);
6657 + return 1;
6658 + }
6659 +
6660 + current->mm->call_dl_resolve = call_dl_resolve;
6661 + up_write(&current->mm->mmap_sem);
6662 +
6663 +emulate:
6664 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6665 + regs->tpc = call_dl_resolve;
6666 + regs->tnpc = addr+4;
6667 + return 3;
6668 + }
6669 +#endif
6670 +
6671 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6672 + if ((save & 0xFFC00000U) == 0x05000000U &&
6673 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6674 + nop == 0x01000000U)
6675 + {
6676 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6677 + regs->u_regs[UREG_G2] = addr + 4;
6678 + addr = (save & 0x003FFFFFU) << 10;
6679 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6680 +
6681 + if (test_thread_flag(TIF_32BIT))
6682 + addr &= 0xFFFFFFFFUL;
6683 +
6684 + regs->tpc = addr;
6685 + regs->tnpc = addr+4;
6686 + return 3;
6687 + }
6688 +
6689 + /* PaX: 64-bit PLT stub */
6690 + err = get_user(sethi1, (unsigned int *)addr);
6691 + err |= get_user(sethi2, (unsigned int *)(addr+4));
6692 + err |= get_user(or1, (unsigned int *)(addr+8));
6693 + err |= get_user(or2, (unsigned int *)(addr+12));
6694 + err |= get_user(sllx, (unsigned int *)(addr+16));
6695 + err |= get_user(add, (unsigned int *)(addr+20));
6696 + err |= get_user(jmpl, (unsigned int *)(addr+24));
6697 + err |= get_user(nop, (unsigned int *)(addr+28));
6698 + if (err)
6699 + break;
6700 +
6701 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6702 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6703 + (or1 & 0xFFFFE000U) == 0x88112000U &&
6704 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6705 + sllx == 0x89293020U &&
6706 + add == 0x8A010005U &&
6707 + jmpl == 0x89C14000U &&
6708 + nop == 0x01000000U)
6709 + {
6710 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6711 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6712 + regs->u_regs[UREG_G4] <<= 32;
6713 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6714 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6715 + regs->u_regs[UREG_G4] = addr + 24;
6716 + addr = regs->u_regs[UREG_G5];
6717 + regs->tpc = addr;
6718 + regs->tnpc = addr+4;
6719 + return 3;
6720 + }
6721 + }
6722 + } while (0);
6723 +
6724 +#ifdef CONFIG_PAX_DLRESOLVE
6725 + do { /* PaX: unpatched PLT emulation step 2 */
6726 + unsigned int save, call, nop;
6727 +
6728 + err = get_user(save, (unsigned int *)(regs->tpc-4));
6729 + err |= get_user(call, (unsigned int *)regs->tpc);
6730 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6731 + if (err)
6732 + break;
6733 +
6734 + if (save == 0x9DE3BFA8U &&
6735 + (call & 0xC0000000U) == 0x40000000U &&
6736 + nop == 0x01000000U)
6737 + {
6738 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6739 +
6740 + if (test_thread_flag(TIF_32BIT))
6741 + dl_resolve &= 0xFFFFFFFFUL;
6742 +
6743 + regs->u_regs[UREG_RETPC] = regs->tpc;
6744 + regs->tpc = dl_resolve;
6745 + regs->tnpc = dl_resolve+4;
6746 + return 3;
6747 + }
6748 + } while (0);
6749 +#endif
6750 +
6751 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6752 + unsigned int sethi, ba, nop;
6753 +
6754 + err = get_user(sethi, (unsigned int *)regs->tpc);
6755 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6756 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6757 +
6758 + if (err)
6759 + break;
6760 +
6761 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6762 + (ba & 0xFFF00000U) == 0x30600000U &&
6763 + nop == 0x01000000U)
6764 + {
6765 + unsigned long addr;
6766 +
6767 + addr = (sethi & 0x003FFFFFU) << 10;
6768 + regs->u_regs[UREG_G1] = addr;
6769 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6770 +
6771 + if (test_thread_flag(TIF_32BIT))
6772 + addr &= 0xFFFFFFFFUL;
6773 +
6774 + regs->tpc = addr;
6775 + regs->tnpc = addr+4;
6776 + return 2;
6777 + }
6778 + } while (0);
6779 +
6780 +#endif
6781 +
6782 + return 1;
6783 +}
6784 +
6785 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6786 +{
6787 + unsigned long i;
6788 +
6789 + printk(KERN_ERR "PAX: bytes at PC: ");
6790 + for (i = 0; i < 8; i++) {
6791 + unsigned int c;
6792 + if (get_user(c, (unsigned int *)pc+i))
6793 + printk(KERN_CONT "???????? ");
6794 + else
6795 + printk(KERN_CONT "%08x ", c);
6796 + }
6797 + printk("\n");
6798 +}
6799 +#endif
6800 +
6801 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6802 {
6803 struct mm_struct *mm = current->mm;
6804 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6805 if (!vma)
6806 goto bad_area;
6807
6808 +#ifdef CONFIG_PAX_PAGEEXEC
6809 + /* PaX: detect ITLB misses on non-exec pages */
6810 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6811 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6812 + {
6813 + if (address != regs->tpc)
6814 + goto good_area;
6815 +
6816 + up_read(&mm->mmap_sem);
6817 + switch (pax_handle_fetch_fault(regs)) {
6818 +
6819 +#ifdef CONFIG_PAX_EMUPLT
6820 + case 2:
6821 + case 3:
6822 + return;
6823 +#endif
6824 +
6825 + }
6826 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6827 + do_group_exit(SIGKILL);
6828 + }
6829 +#endif
6830 +
6831 /* Pure DTLB misses do not tell us whether the fault causing
6832 * load/store/atomic was a write or not, it only says that there
6833 * was no match. So in such a case we (carefully) read the
6834 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6835 index f27d103..1b06377 100644
6836 --- a/arch/sparc/mm/hugetlbpage.c
6837 +++ b/arch/sparc/mm/hugetlbpage.c
6838 @@ -69,7 +69,7 @@ full_search:
6839 }
6840 return -ENOMEM;
6841 }
6842 - if (likely(!vma || addr + len <= vma->vm_start)) {
6843 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6844 /*
6845 * Remember the place where we stopped the search:
6846 */
6847 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6848 /* make sure it can fit in the remaining address space */
6849 if (likely(addr > len)) {
6850 vma = find_vma(mm, addr-len);
6851 - if (!vma || addr <= vma->vm_start) {
6852 + if (check_heap_stack_gap(vma, addr - len, len)) {
6853 /* remember the address as a hint for next time */
6854 return (mm->free_area_cache = addr-len);
6855 }
6856 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6857 if (unlikely(mm->mmap_base < len))
6858 goto bottomup;
6859
6860 - addr = (mm->mmap_base-len) & HPAGE_MASK;
6861 + addr = mm->mmap_base - len;
6862
6863 do {
6864 + addr &= HPAGE_MASK;
6865 /*
6866 * Lookup failure means no vma is above this address,
6867 * else if new region fits below vma->vm_start,
6868 * return with success:
6869 */
6870 vma = find_vma(mm, addr);
6871 - if (likely(!vma || addr+len <= vma->vm_start)) {
6872 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6873 /* remember the address as a hint for next time */
6874 return (mm->free_area_cache = addr);
6875 }
6876 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6877 mm->cached_hole_size = vma->vm_start - addr;
6878
6879 /* try just below the current vma->vm_start */
6880 - addr = (vma->vm_start-len) & HPAGE_MASK;
6881 - } while (likely(len < vma->vm_start));
6882 + addr = skip_heap_stack_gap(vma, len);
6883 + } while (!IS_ERR_VALUE(addr));
6884
6885 bottomup:
6886 /*
6887 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6888 if (addr) {
6889 addr = ALIGN(addr, HPAGE_SIZE);
6890 vma = find_vma(mm, addr);
6891 - if (task_size - len >= addr &&
6892 - (!vma || addr + len <= vma->vm_start))
6893 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6894 return addr;
6895 }
6896 if (mm->get_unmapped_area == arch_get_unmapped_area)
6897 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6898 index dc7c3b1..34c0070 100644
6899 --- a/arch/sparc/mm/init_32.c
6900 +++ b/arch/sparc/mm/init_32.c
6901 @@ -317,6 +317,9 @@ extern void device_scan(void);
6902 pgprot_t PAGE_SHARED __read_mostly;
6903 EXPORT_SYMBOL(PAGE_SHARED);
6904
6905 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6906 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6907 +
6908 void __init paging_init(void)
6909 {
6910 switch(sparc_cpu_model) {
6911 @@ -345,17 +348,17 @@ void __init paging_init(void)
6912
6913 /* Initialize the protection map with non-constant, MMU dependent values. */
6914 protection_map[0] = PAGE_NONE;
6915 - protection_map[1] = PAGE_READONLY;
6916 - protection_map[2] = PAGE_COPY;
6917 - protection_map[3] = PAGE_COPY;
6918 + protection_map[1] = PAGE_READONLY_NOEXEC;
6919 + protection_map[2] = PAGE_COPY_NOEXEC;
6920 + protection_map[3] = PAGE_COPY_NOEXEC;
6921 protection_map[4] = PAGE_READONLY;
6922 protection_map[5] = PAGE_READONLY;
6923 protection_map[6] = PAGE_COPY;
6924 protection_map[7] = PAGE_COPY;
6925 protection_map[8] = PAGE_NONE;
6926 - protection_map[9] = PAGE_READONLY;
6927 - protection_map[10] = PAGE_SHARED;
6928 - protection_map[11] = PAGE_SHARED;
6929 + protection_map[9] = PAGE_READONLY_NOEXEC;
6930 + protection_map[10] = PAGE_SHARED_NOEXEC;
6931 + protection_map[11] = PAGE_SHARED_NOEXEC;
6932 protection_map[12] = PAGE_READONLY;
6933 protection_map[13] = PAGE_READONLY;
6934 protection_map[14] = PAGE_SHARED;
6935 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6936 index 509b1ff..bfd7118 100644
6937 --- a/arch/sparc/mm/srmmu.c
6938 +++ b/arch/sparc/mm/srmmu.c
6939 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6940 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6941 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6942 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6943 +
6944 +#ifdef CONFIG_PAX_PAGEEXEC
6945 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6946 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6947 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6948 +#endif
6949 +
6950 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6951 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6952
6953 diff --git a/arch/um/Makefile b/arch/um/Makefile
6954 index fc633db..5e1a1c2 100644
6955 --- a/arch/um/Makefile
6956 +++ b/arch/um/Makefile
6957 @@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6958 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6959 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
6960
6961 +ifdef CONSTIFY_PLUGIN
6962 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6963 +endif
6964 +
6965 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
6966
6967 #This will adjust *FLAGS accordingly to the platform.
6968 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6969 index 6c03acd..a5e0215 100644
6970 --- a/arch/um/include/asm/kmap_types.h
6971 +++ b/arch/um/include/asm/kmap_types.h
6972 @@ -23,6 +23,7 @@ enum km_type {
6973 KM_IRQ1,
6974 KM_SOFTIRQ0,
6975 KM_SOFTIRQ1,
6976 + KM_CLEARPAGE,
6977 KM_TYPE_NR
6978 };
6979
6980 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6981 index 4cc9b6c..02e5029 100644
6982 --- a/arch/um/include/asm/page.h
6983 +++ b/arch/um/include/asm/page.h
6984 @@ -14,6 +14,9 @@
6985 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6986 #define PAGE_MASK (~(PAGE_SIZE-1))
6987
6988 +#define ktla_ktva(addr) (addr)
6989 +#define ktva_ktla(addr) (addr)
6990 +
6991 #ifndef __ASSEMBLY__
6992
6993 struct page;
6994 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6995 index 4a28a15..654dc2a 100644
6996 --- a/arch/um/kernel/process.c
6997 +++ b/arch/um/kernel/process.c
6998 @@ -393,22 +393,6 @@ int singlestepping(void * t)
6999 return 2;
7000 }
7001
7002 -/*
7003 - * Only x86 and x86_64 have an arch_align_stack().
7004 - * All other arches have "#define arch_align_stack(x) (x)"
7005 - * in their asm/system.h
7006 - * As this is included in UML from asm-um/system-generic.h,
7007 - * we can use it to behave as the subarch does.
7008 - */
7009 -#ifndef arch_align_stack
7010 -unsigned long arch_align_stack(unsigned long sp)
7011 -{
7012 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7013 - sp -= get_random_int() % 8192;
7014 - return sp & ~0xf;
7015 -}
7016 -#endif
7017 -
7018 unsigned long get_wchan(struct task_struct *p)
7019 {
7020 unsigned long stack_page, sp, ip;
7021 diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
7022 index d1b93c4..ae1b7fd 100644
7023 --- a/arch/um/sys-i386/shared/sysdep/system.h
7024 +++ b/arch/um/sys-i386/shared/sysdep/system.h
7025 @@ -17,7 +17,7 @@
7026 # define AT_VECTOR_SIZE_ARCH 1
7027 #endif
7028
7029 -extern unsigned long arch_align_stack(unsigned long sp);
7030 +#define arch_align_stack(x) ((x) & ~0xfUL)
7031
7032 void default_idle(void);
7033
7034 diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
7035 index 857ca0b..9a2669d 100644
7036 --- a/arch/um/sys-i386/syscalls.c
7037 +++ b/arch/um/sys-i386/syscalls.c
7038 @@ -11,6 +11,21 @@
7039 #include "asm/uaccess.h"
7040 #include "asm/unistd.h"
7041
7042 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
7043 +{
7044 + unsigned long pax_task_size = TASK_SIZE;
7045 +
7046 +#ifdef CONFIG_PAX_SEGMEXEC
7047 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
7048 + pax_task_size = SEGMEXEC_TASK_SIZE;
7049 +#endif
7050 +
7051 + if (len > pax_task_size || addr > pax_task_size - len)
7052 + return -EINVAL;
7053 +
7054 + return 0;
7055 +}
7056 +
7057 /*
7058 * Perform the select(nd, in, out, ex, tv) and mmap() system
7059 * calls. Linux/i386 didn't use to be able to handle more than
7060 diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
7061 index d1b93c4..ae1b7fd 100644
7062 --- a/arch/um/sys-x86_64/shared/sysdep/system.h
7063 +++ b/arch/um/sys-x86_64/shared/sysdep/system.h
7064 @@ -17,7 +17,7 @@
7065 # define AT_VECTOR_SIZE_ARCH 1
7066 #endif
7067
7068 -extern unsigned long arch_align_stack(unsigned long sp);
7069 +#define arch_align_stack(x) ((x) & ~0xfUL)
7070
7071 void default_idle(void);
7072
7073 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7074 index 73ae02a..f932de5 100644
7075 --- a/arch/x86/Kconfig
7076 +++ b/arch/x86/Kconfig
7077 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
7078
7079 config X86_32_LAZY_GS
7080 def_bool y
7081 - depends on X86_32 && !CC_STACKPROTECTOR
7082 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7083
7084 config KTIME_SCALAR
7085 def_bool X86_32
7086 @@ -1008,7 +1008,7 @@ choice
7087
7088 config NOHIGHMEM
7089 bool "off"
7090 - depends on !X86_NUMAQ
7091 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7092 ---help---
7093 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7094 However, the address space of 32-bit x86 processors is only 4
7095 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
7096
7097 config HIGHMEM4G
7098 bool "4GB"
7099 - depends on !X86_NUMAQ
7100 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7101 ---help---
7102 Select this if you have a 32-bit processor and between 1 and 4
7103 gigabytes of physical RAM.
7104 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
7105 hex
7106 default 0xB0000000 if VMSPLIT_3G_OPT
7107 default 0x80000000 if VMSPLIT_2G
7108 - default 0x78000000 if VMSPLIT_2G_OPT
7109 + default 0x70000000 if VMSPLIT_2G_OPT
7110 default 0x40000000 if VMSPLIT_1G
7111 default 0xC0000000
7112 depends on X86_32
7113 @@ -1460,6 +1460,7 @@ config SECCOMP
7114
7115 config CC_STACKPROTECTOR
7116 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7117 + depends on X86_64 || !PAX_MEMORY_UDEREF
7118 ---help---
7119 This option turns on the -fstack-protector GCC feature. This
7120 feature puts, at the beginning of functions, a canary value on
7121 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
7122 config PHYSICAL_START
7123 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
7124 default "0x1000000"
7125 + range 0x400000 0x40000000
7126 ---help---
7127 This gives the physical address where the kernel is loaded.
7128
7129 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
7130 hex
7131 prompt "Alignment value to which kernel should be aligned" if X86_32
7132 default "0x1000000"
7133 + range 0x400000 0x1000000 if PAX_KERNEXEC
7134 range 0x2000 0x1000000
7135 ---help---
7136 This value puts the alignment restrictions on physical address
7137 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
7138 Say N if you want to disable CPU hotplug.
7139
7140 config COMPAT_VDSO
7141 - def_bool y
7142 + def_bool n
7143 prompt "Compat VDSO support"
7144 depends on X86_32 || IA32_EMULATION
7145 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7146 ---help---
7147 Map the 32-bit VDSO to the predictable old-style address too.
7148 ---help---
7149 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7150 index 0e566103..1a6b57e 100644
7151 --- a/arch/x86/Kconfig.cpu
7152 +++ b/arch/x86/Kconfig.cpu
7153 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
7154
7155 config X86_F00F_BUG
7156 def_bool y
7157 - depends on M586MMX || M586TSC || M586 || M486 || M386
7158 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7159
7160 config X86_WP_WORKS_OK
7161 def_bool y
7162 @@ -360,7 +360,7 @@ config X86_POPAD_OK
7163
7164 config X86_ALIGNMENT_16
7165 def_bool y
7166 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7167 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7168
7169 config X86_INTEL_USERCOPY
7170 def_bool y
7171 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
7172 # generates cmov.
7173 config X86_CMOV
7174 def_bool y
7175 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
7176 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
7177
7178 config X86_MINIMUM_CPU_FAMILY
7179 int
7180 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7181 index d105f29..c928727 100644
7182 --- a/arch/x86/Kconfig.debug
7183 +++ b/arch/x86/Kconfig.debug
7184 @@ -99,7 +99,7 @@ config X86_PTDUMP
7185 config DEBUG_RODATA
7186 bool "Write protect kernel read-only data structures"
7187 default y
7188 - depends on DEBUG_KERNEL
7189 + depends on DEBUG_KERNEL && BROKEN
7190 ---help---
7191 Mark the kernel read-only data as write-protected in the pagetables,
7192 in order to catch accidental (and incorrect) writes to such const
7193 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7194 index d2d24c9..0f21f8d 100644
7195 --- a/arch/x86/Makefile
7196 +++ b/arch/x86/Makefile
7197 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
7198 else
7199 BITS := 64
7200 UTS_MACHINE := x86_64
7201 + biarch := $(call cc-option,-m64)
7202 CHECKFLAGS += -D__x86_64__ -m64
7203
7204 KBUILD_AFLAGS += -m64
7205 @@ -189,3 +190,12 @@ define archhelp
7206 echo ' FDARGS="..." arguments for the booted kernel'
7207 echo ' FDINITRD=file initrd for the booted kernel'
7208 endef
7209 +
7210 +define OLD_LD
7211 +
7212 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7213 +*** Please upgrade your binutils to 2.18 or newer
7214 +endef
7215 +
7216 +archprepare:
7217 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7218 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7219 index ec749c2..bbb5319 100644
7220 --- a/arch/x86/boot/Makefile
7221 +++ b/arch/x86/boot/Makefile
7222 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7223 $(call cc-option, -fno-stack-protector) \
7224 $(call cc-option, -mpreferred-stack-boundary=2)
7225 KBUILD_CFLAGS += $(call cc-option, -m32)
7226 +ifdef CONSTIFY_PLUGIN
7227 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7228 +endif
7229 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7230 GCOV_PROFILE := n
7231
7232 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7233 index 878e4b9..20537ab 100644
7234 --- a/arch/x86/boot/bitops.h
7235 +++ b/arch/x86/boot/bitops.h
7236 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7237 u8 v;
7238 const u32 *p = (const u32 *)addr;
7239
7240 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7241 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7242 return v;
7243 }
7244
7245 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7246
7247 static inline void set_bit(int nr, void *addr)
7248 {
7249 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7250 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7251 }
7252
7253 #endif /* BOOT_BITOPS_H */
7254 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7255 index 98239d2..f40214c 100644
7256 --- a/arch/x86/boot/boot.h
7257 +++ b/arch/x86/boot/boot.h
7258 @@ -82,7 +82,7 @@ static inline void io_delay(void)
7259 static inline u16 ds(void)
7260 {
7261 u16 seg;
7262 - asm("movw %%ds,%0" : "=rm" (seg));
7263 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7264 return seg;
7265 }
7266
7267 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7268 static inline int memcmp(const void *s1, const void *s2, size_t len)
7269 {
7270 u8 diff;
7271 - asm("repe; cmpsb; setnz %0"
7272 + asm volatile("repe; cmpsb; setnz %0"
7273 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7274 return diff;
7275 }
7276 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7277 index f8ed065..5bf5ff3 100644
7278 --- a/arch/x86/boot/compressed/Makefile
7279 +++ b/arch/x86/boot/compressed/Makefile
7280 @@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7281 KBUILD_CFLAGS += $(cflags-y)
7282 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7283 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7284 +ifdef CONSTIFY_PLUGIN
7285 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7286 +endif
7287
7288 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7289 GCOV_PROFILE := n
7290 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7291 index f543b70..b60fba8 100644
7292 --- a/arch/x86/boot/compressed/head_32.S
7293 +++ b/arch/x86/boot/compressed/head_32.S
7294 @@ -76,7 +76,7 @@ ENTRY(startup_32)
7295 notl %eax
7296 andl %eax, %ebx
7297 #else
7298 - movl $LOAD_PHYSICAL_ADDR, %ebx
7299 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7300 #endif
7301
7302 /* Target address to relocate to for decompression */
7303 @@ -149,7 +149,7 @@ relocated:
7304 * and where it was actually loaded.
7305 */
7306 movl %ebp, %ebx
7307 - subl $LOAD_PHYSICAL_ADDR, %ebx
7308 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7309 jz 2f /* Nothing to be done if loaded at compiled addr. */
7310 /*
7311 * Process relocations.
7312 @@ -157,8 +157,7 @@ relocated:
7313
7314 1: subl $4, %edi
7315 movl (%edi), %ecx
7316 - testl %ecx, %ecx
7317 - jz 2f
7318 + jecxz 2f
7319 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7320 jmp 1b
7321 2:
7322 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7323 index 077e1b6..2c6b13b 100644
7324 --- a/arch/x86/boot/compressed/head_64.S
7325 +++ b/arch/x86/boot/compressed/head_64.S
7326 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7327 notl %eax
7328 andl %eax, %ebx
7329 #else
7330 - movl $LOAD_PHYSICAL_ADDR, %ebx
7331 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7332 #endif
7333
7334 /* Target address to relocate to for decompression */
7335 @@ -183,7 +183,7 @@ no_longmode:
7336 hlt
7337 jmp 1b
7338
7339 -#include "../../kernel/verify_cpu_64.S"
7340 +#include "../../kernel/verify_cpu.S"
7341
7342 /*
7343 * Be careful here startup_64 needs to be at a predictable
7344 @@ -234,7 +234,7 @@ ENTRY(startup_64)
7345 notq %rax
7346 andq %rax, %rbp
7347 #else
7348 - movq $LOAD_PHYSICAL_ADDR, %rbp
7349 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7350 #endif
7351
7352 /* Target address to relocate to for decompression */
7353 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7354 index 842b2a3..f00178b 100644
7355 --- a/arch/x86/boot/compressed/misc.c
7356 +++ b/arch/x86/boot/compressed/misc.c
7357 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
7358 case PT_LOAD:
7359 #ifdef CONFIG_RELOCATABLE
7360 dest = output;
7361 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7362 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7363 #else
7364 dest = (void *)(phdr->p_paddr);
7365 #endif
7366 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7367 error("Destination address too large");
7368 #endif
7369 #ifndef CONFIG_RELOCATABLE
7370 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7371 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7372 error("Wrong destination address");
7373 #endif
7374
7375 diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
7376 index bcbd36c..b1754af 100644
7377 --- a/arch/x86/boot/compressed/mkpiggy.c
7378 +++ b/arch/x86/boot/compressed/mkpiggy.c
7379 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
7380
7381 offs = (olen > ilen) ? olen - ilen : 0;
7382 offs += olen >> 12; /* Add 8 bytes for each 32K block */
7383 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
7384 + offs += 64*1024; /* Add 64K bytes slack */
7385 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
7386
7387 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
7388 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7389 index bbeb0c3..f5167ab 100644
7390 --- a/arch/x86/boot/compressed/relocs.c
7391 +++ b/arch/x86/boot/compressed/relocs.c
7392 @@ -10,8 +10,11 @@
7393 #define USE_BSD
7394 #include <endian.h>
7395
7396 +#include "../../../../include/linux/autoconf.h"
7397 +
7398 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7399 static Elf32_Ehdr ehdr;
7400 +static Elf32_Phdr *phdr;
7401 static unsigned long reloc_count, reloc_idx;
7402 static unsigned long *relocs;
7403
7404 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
7405
7406 static int is_safe_abs_reloc(const char* sym_name)
7407 {
7408 - int i;
7409 + unsigned int i;
7410
7411 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
7412 if (!strcmp(sym_name, safe_abs_relocs[i]))
7413 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
7414 }
7415 }
7416
7417 +static void read_phdrs(FILE *fp)
7418 +{
7419 + unsigned int i;
7420 +
7421 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7422 + if (!phdr) {
7423 + die("Unable to allocate %d program headers\n",
7424 + ehdr.e_phnum);
7425 + }
7426 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7427 + die("Seek to %d failed: %s\n",
7428 + ehdr.e_phoff, strerror(errno));
7429 + }
7430 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7431 + die("Cannot read ELF program headers: %s\n",
7432 + strerror(errno));
7433 + }
7434 + for(i = 0; i < ehdr.e_phnum; i++) {
7435 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7436 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7437 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7438 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7439 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7440 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7441 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7442 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7443 + }
7444 +
7445 +}
7446 +
7447 static void read_shdrs(FILE *fp)
7448 {
7449 - int i;
7450 + unsigned int i;
7451 Elf32_Shdr shdr;
7452
7453 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7454 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
7455
7456 static void read_strtabs(FILE *fp)
7457 {
7458 - int i;
7459 + unsigned int i;
7460 for (i = 0; i < ehdr.e_shnum; i++) {
7461 struct section *sec = &secs[i];
7462 if (sec->shdr.sh_type != SHT_STRTAB) {
7463 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
7464
7465 static void read_symtabs(FILE *fp)
7466 {
7467 - int i,j;
7468 + unsigned int i,j;
7469 for (i = 0; i < ehdr.e_shnum; i++) {
7470 struct section *sec = &secs[i];
7471 if (sec->shdr.sh_type != SHT_SYMTAB) {
7472 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
7473
7474 static void read_relocs(FILE *fp)
7475 {
7476 - int i,j;
7477 + unsigned int i,j;
7478 + uint32_t base;
7479 +
7480 for (i = 0; i < ehdr.e_shnum; i++) {
7481 struct section *sec = &secs[i];
7482 if (sec->shdr.sh_type != SHT_REL) {
7483 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
7484 die("Cannot read symbol table: %s\n",
7485 strerror(errno));
7486 }
7487 + base = 0;
7488 + for (j = 0; j < ehdr.e_phnum; j++) {
7489 + if (phdr[j].p_type != PT_LOAD )
7490 + continue;
7491 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7492 + continue;
7493 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7494 + break;
7495 + }
7496 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7497 Elf32_Rel *rel = &sec->reltab[j];
7498 - rel->r_offset = elf32_to_cpu(rel->r_offset);
7499 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7500 rel->r_info = elf32_to_cpu(rel->r_info);
7501 }
7502 }
7503 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
7504
7505 static void print_absolute_symbols(void)
7506 {
7507 - int i;
7508 + unsigned int i;
7509 printf("Absolute symbols\n");
7510 printf(" Num: Value Size Type Bind Visibility Name\n");
7511 for (i = 0; i < ehdr.e_shnum; i++) {
7512 struct section *sec = &secs[i];
7513 char *sym_strtab;
7514 Elf32_Sym *sh_symtab;
7515 - int j;
7516 + unsigned int j;
7517
7518 if (sec->shdr.sh_type != SHT_SYMTAB) {
7519 continue;
7520 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
7521
7522 static void print_absolute_relocs(void)
7523 {
7524 - int i, printed = 0;
7525 + unsigned int i, printed = 0;
7526
7527 for (i = 0; i < ehdr.e_shnum; i++) {
7528 struct section *sec = &secs[i];
7529 struct section *sec_applies, *sec_symtab;
7530 char *sym_strtab;
7531 Elf32_Sym *sh_symtab;
7532 - int j;
7533 + unsigned int j;
7534 if (sec->shdr.sh_type != SHT_REL) {
7535 continue;
7536 }
7537 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
7538
7539 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7540 {
7541 - int i;
7542 + unsigned int i;
7543 /* Walk through the relocations */
7544 for (i = 0; i < ehdr.e_shnum; i++) {
7545 char *sym_strtab;
7546 Elf32_Sym *sh_symtab;
7547 struct section *sec_applies, *sec_symtab;
7548 - int j;
7549 + unsigned int j;
7550 struct section *sec = &secs[i];
7551
7552 if (sec->shdr.sh_type != SHT_REL) {
7553 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7554 if (sym->st_shndx == SHN_ABS) {
7555 continue;
7556 }
7557 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7558 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7559 + continue;
7560 +
7561 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7562 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7563 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7564 + continue;
7565 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7566 + continue;
7567 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7568 + continue;
7569 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7570 + continue;
7571 +#endif
7572 if (r_type == R_386_NONE || r_type == R_386_PC32) {
7573 /*
7574 * NONE can be ignored and and PC relative
7575 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
7576
7577 static void emit_relocs(int as_text)
7578 {
7579 - int i;
7580 + unsigned int i;
7581 /* Count how many relocations I have and allocate space for them. */
7582 reloc_count = 0;
7583 walk_relocs(count_reloc);
7584 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
7585 fname, strerror(errno));
7586 }
7587 read_ehdr(fp);
7588 + read_phdrs(fp);
7589 read_shdrs(fp);
7590 read_strtabs(fp);
7591 read_symtabs(fp);
7592 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7593 index 4d3ff03..e4972ff 100644
7594 --- a/arch/x86/boot/cpucheck.c
7595 +++ b/arch/x86/boot/cpucheck.c
7596 @@ -74,7 +74,7 @@ static int has_fpu(void)
7597 u16 fcw = -1, fsw = -1;
7598 u32 cr0;
7599
7600 - asm("movl %%cr0,%0" : "=r" (cr0));
7601 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7602 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7603 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7604 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7605 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7606 {
7607 u32 f0, f1;
7608
7609 - asm("pushfl ; "
7610 + asm volatile("pushfl ; "
7611 "pushfl ; "
7612 "popl %0 ; "
7613 "movl %0,%1 ; "
7614 @@ -115,7 +115,7 @@ static void get_flags(void)
7615 set_bit(X86_FEATURE_FPU, cpu.flags);
7616
7617 if (has_eflag(X86_EFLAGS_ID)) {
7618 - asm("cpuid"
7619 + asm volatile("cpuid"
7620 : "=a" (max_intel_level),
7621 "=b" (cpu_vendor[0]),
7622 "=d" (cpu_vendor[1]),
7623 @@ -124,7 +124,7 @@ static void get_flags(void)
7624
7625 if (max_intel_level >= 0x00000001 &&
7626 max_intel_level <= 0x0000ffff) {
7627 - asm("cpuid"
7628 + asm volatile("cpuid"
7629 : "=a" (tfms),
7630 "=c" (cpu.flags[4]),
7631 "=d" (cpu.flags[0])
7632 @@ -136,7 +136,7 @@ static void get_flags(void)
7633 cpu.model += ((tfms >> 16) & 0xf) << 4;
7634 }
7635
7636 - asm("cpuid"
7637 + asm volatile("cpuid"
7638 : "=a" (max_amd_level)
7639 : "a" (0x80000000)
7640 : "ebx", "ecx", "edx");
7641 @@ -144,7 +144,7 @@ static void get_flags(void)
7642 if (max_amd_level >= 0x80000001 &&
7643 max_amd_level <= 0x8000ffff) {
7644 u32 eax = 0x80000001;
7645 - asm("cpuid"
7646 + asm volatile("cpuid"
7647 : "+a" (eax),
7648 "=c" (cpu.flags[6]),
7649 "=d" (cpu.flags[1])
7650 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7651 u32 ecx = MSR_K7_HWCR;
7652 u32 eax, edx;
7653
7654 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7655 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7656 eax &= ~(1 << 15);
7657 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7658 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7659
7660 get_flags(); /* Make sure it really did something */
7661 err = check_flags();
7662 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7663 u32 ecx = MSR_VIA_FCR;
7664 u32 eax, edx;
7665
7666 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7667 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7668 eax |= (1<<1)|(1<<7);
7669 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7670 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7671
7672 set_bit(X86_FEATURE_CX8, cpu.flags);
7673 err = check_flags();
7674 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7675 u32 eax, edx;
7676 u32 level = 1;
7677
7678 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7679 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7680 - asm("cpuid"
7681 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7682 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7683 + asm volatile("cpuid"
7684 : "+a" (level), "=d" (cpu.flags[0])
7685 : : "ecx", "ebx");
7686 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7687 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7688
7689 err = check_flags();
7690 }
7691 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7692 index b31cc54..8d69237 100644
7693 --- a/arch/x86/boot/header.S
7694 +++ b/arch/x86/boot/header.S
7695 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7696 # single linked list of
7697 # struct setup_data
7698
7699 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7700 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7701
7702 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7703 #define VO_INIT_SIZE (VO__end - VO__text)
7704 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7705 index cae3feb..ff8ff2a 100644
7706 --- a/arch/x86/boot/memory.c
7707 +++ b/arch/x86/boot/memory.c
7708 @@ -19,7 +19,7 @@
7709
7710 static int detect_memory_e820(void)
7711 {
7712 - int count = 0;
7713 + unsigned int count = 0;
7714 struct biosregs ireg, oreg;
7715 struct e820entry *desc = boot_params.e820_map;
7716 static struct e820entry buf; /* static so it is zeroed */
7717 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7718 index 11e8c6e..fdbb1ed 100644
7719 --- a/arch/x86/boot/video-vesa.c
7720 +++ b/arch/x86/boot/video-vesa.c
7721 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7722
7723 boot_params.screen_info.vesapm_seg = oreg.es;
7724 boot_params.screen_info.vesapm_off = oreg.di;
7725 + boot_params.screen_info.vesapm_size = oreg.cx;
7726 }
7727
7728 /*
7729 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7730 index d42da38..787cdf3 100644
7731 --- a/arch/x86/boot/video.c
7732 +++ b/arch/x86/boot/video.c
7733 @@ -90,7 +90,7 @@ static void store_mode_params(void)
7734 static unsigned int get_entry(void)
7735 {
7736 char entry_buf[4];
7737 - int i, len = 0;
7738 + unsigned int i, len = 0;
7739 int key;
7740 unsigned int v;
7741
7742 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7743 index 5b577d5..3c1fed4 100644
7744 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
7745 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7746 @@ -8,6 +8,8 @@
7747 * including this sentence is retained in full.
7748 */
7749
7750 +#include <asm/alternative-asm.h>
7751 +
7752 .extern crypto_ft_tab
7753 .extern crypto_it_tab
7754 .extern crypto_fl_tab
7755 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7756 je B192; \
7757 leaq 32(r9),r9;
7758
7759 +#define ret pax_force_retaddr 0, 1; ret
7760 +
7761 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7762 movq r1,r2; \
7763 movq r3,r4; \
7764 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7765 index eb0566e..e3ebad8 100644
7766 --- a/arch/x86/crypto/aesni-intel_asm.S
7767 +++ b/arch/x86/crypto/aesni-intel_asm.S
7768 @@ -16,6 +16,7 @@
7769 */
7770
7771 #include <linux/linkage.h>
7772 +#include <asm/alternative-asm.h>
7773
7774 .text
7775
7776 @@ -52,6 +53,7 @@ _key_expansion_256a:
7777 pxor %xmm1, %xmm0
7778 movaps %xmm0, (%rcx)
7779 add $0x10, %rcx
7780 + pax_force_retaddr_bts
7781 ret
7782
7783 _key_expansion_192a:
7784 @@ -75,6 +77,7 @@ _key_expansion_192a:
7785 shufps $0b01001110, %xmm2, %xmm1
7786 movaps %xmm1, 16(%rcx)
7787 add $0x20, %rcx
7788 + pax_force_retaddr_bts
7789 ret
7790
7791 _key_expansion_192b:
7792 @@ -93,6 +96,7 @@ _key_expansion_192b:
7793
7794 movaps %xmm0, (%rcx)
7795 add $0x10, %rcx
7796 + pax_force_retaddr_bts
7797 ret
7798
7799 _key_expansion_256b:
7800 @@ -104,6 +108,7 @@ _key_expansion_256b:
7801 pxor %xmm1, %xmm2
7802 movaps %xmm2, (%rcx)
7803 add $0x10, %rcx
7804 + pax_force_retaddr_bts
7805 ret
7806
7807 /*
7808 @@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
7809 cmp %rcx, %rdi
7810 jb .Ldec_key_loop
7811 xor %rax, %rax
7812 + pax_force_retaddr 0, 1
7813 ret
7814 +ENDPROC(aesni_set_key)
7815
7816 /*
7817 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7818 @@ -249,7 +256,9 @@ ENTRY(aesni_enc)
7819 movups (INP), STATE # input
7820 call _aesni_enc1
7821 movups STATE, (OUTP) # output
7822 + pax_force_retaddr 0, 1
7823 ret
7824 +ENDPROC(aesni_enc)
7825
7826 /*
7827 * _aesni_enc1: internal ABI
7828 @@ -319,6 +328,7 @@ _aesni_enc1:
7829 movaps 0x70(TKEYP), KEY
7830 # aesenclast KEY, STATE # last round
7831 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
7832 + pax_force_retaddr_bts
7833 ret
7834
7835 /*
7836 @@ -482,6 +492,7 @@ _aesni_enc4:
7837 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
7838 # aesenclast KEY, STATE4
7839 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
7840 + pax_force_retaddr_bts
7841 ret
7842
7843 /*
7844 @@ -493,7 +504,9 @@ ENTRY(aesni_dec)
7845 movups (INP), STATE # input
7846 call _aesni_dec1
7847 movups STATE, (OUTP) #output
7848 + pax_force_retaddr 0, 1
7849 ret
7850 +ENDPROC(aesni_dec)
7851
7852 /*
7853 * _aesni_dec1: internal ABI
7854 @@ -563,6 +576,7 @@ _aesni_dec1:
7855 movaps 0x70(TKEYP), KEY
7856 # aesdeclast KEY, STATE # last round
7857 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
7858 + pax_force_retaddr_bts
7859 ret
7860
7861 /*
7862 @@ -726,6 +740,7 @@ _aesni_dec4:
7863 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
7864 # aesdeclast KEY, STATE4
7865 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
7866 + pax_force_retaddr_bts
7867 ret
7868
7869 /*
7870 @@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
7871 cmp $16, LEN
7872 jge .Lecb_enc_loop1
7873 .Lecb_enc_ret:
7874 + pax_force_retaddr 0, 1
7875 ret
7876 +ENDPROC(aesni_ecb_enc)
7877
7878 /*
7879 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7880 @@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
7881 cmp $16, LEN
7882 jge .Lecb_dec_loop1
7883 .Lecb_dec_ret:
7884 + pax_force_retaddr 0, 1
7885 ret
7886 +ENDPROC(aesni_ecb_dec)
7887
7888 /*
7889 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7890 @@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
7891 jge .Lcbc_enc_loop
7892 movups STATE, (IVP)
7893 .Lcbc_enc_ret:
7894 + pax_force_retaddr 0, 1
7895 ret
7896 +ENDPROC(aesni_cbc_enc)
7897
7898 /*
7899 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7900 @@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
7901 .Lcbc_dec_ret:
7902 movups IV, (IVP)
7903 .Lcbc_dec_just_ret:
7904 + pax_force_retaddr 0, 1
7905 ret
7906 +ENDPROC(aesni_cbc_dec)
7907 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7908 index 6214a9b..1f4fc9a 100644
7909 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7910 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7911 @@ -1,3 +1,5 @@
7912 +#include <asm/alternative-asm.h>
7913 +
7914 # enter ECRYPT_encrypt_bytes
7915 .text
7916 .p2align 5
7917 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7918 add %r11,%rsp
7919 mov %rdi,%rax
7920 mov %rsi,%rdx
7921 + pax_force_retaddr 0, 1
7922 ret
7923 # bytesatleast65:
7924 ._bytesatleast65:
7925 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
7926 add %r11,%rsp
7927 mov %rdi,%rax
7928 mov %rsi,%rdx
7929 + pax_force_retaddr
7930 ret
7931 # enter ECRYPT_ivsetup
7932 .text
7933 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7934 add %r11,%rsp
7935 mov %rdi,%rax
7936 mov %rsi,%rdx
7937 + pax_force_retaddr
7938 ret
7939 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
7940 index 35974a5..5662ae2 100644
7941 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
7942 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
7943 @@ -21,6 +21,7 @@
7944 .text
7945
7946 #include <asm/asm-offsets.h>
7947 +#include <asm/alternative-asm.h>
7948
7949 #define a_offset 0
7950 #define b_offset 4
7951 @@ -269,6 +270,7 @@ twofish_enc_blk:
7952
7953 popq R1
7954 movq $1,%rax
7955 + pax_force_retaddr 0, 1
7956 ret
7957
7958 twofish_dec_blk:
7959 @@ -321,4 +323,5 @@ twofish_dec_blk:
7960
7961 popq R1
7962 movq $1,%rax
7963 + pax_force_retaddr 0, 1
7964 ret
7965 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
7966 index 14531ab..a89a0c0 100644
7967 --- a/arch/x86/ia32/ia32_aout.c
7968 +++ b/arch/x86/ia32/ia32_aout.c
7969 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7970 unsigned long dump_start, dump_size;
7971 struct user32 dump;
7972
7973 + memset(&dump, 0, sizeof(dump));
7974 +
7975 fs = get_fs();
7976 set_fs(KERNEL_DS);
7977 has_dumped = 1;
7978 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7979 dump_size = dump.u_ssize << PAGE_SHIFT;
7980 DUMP_WRITE(dump_start, dump_size);
7981 }
7982 - /*
7983 - * Finally dump the task struct. Not be used by gdb, but
7984 - * could be useful
7985 - */
7986 - set_fs(KERNEL_DS);
7987 - DUMP_WRITE(current, sizeof(*current));
7988 end_coredump:
7989 set_fs(fs);
7990 return has_dumped;
7991 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
7992 index 588a7aa..a3468b0 100644
7993 --- a/arch/x86/ia32/ia32_signal.c
7994 +++ b/arch/x86/ia32/ia32_signal.c
7995 @@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
7996 }
7997 seg = get_fs();
7998 set_fs(KERNEL_DS);
7999 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8000 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8001 set_fs(seg);
8002 if (ret >= 0 && uoss_ptr) {
8003 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8004 @@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8005 */
8006 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8007 size_t frame_size,
8008 - void **fpstate)
8009 + void __user **fpstate)
8010 {
8011 unsigned long sp;
8012
8013 @@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8014
8015 if (used_math()) {
8016 sp = sp - sig_xstate_ia32_size;
8017 - *fpstate = (struct _fpstate_ia32 *) sp;
8018 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8019 if (save_i387_xstate_ia32(*fpstate) < 0)
8020 return (void __user *) -1L;
8021 }
8022 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8023 sp -= frame_size;
8024 /* Align the stack pointer according to the i386 ABI,
8025 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8026 - sp = ((sp + 4) & -16ul) - 4;
8027 + sp = ((sp - 12) & -16ul) - 4;
8028 return (void __user *) sp;
8029 }
8030
8031 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8032 * These are actually not used anymore, but left because some
8033 * gdb versions depend on them as a marker.
8034 */
8035 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8036 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8037 } put_user_catch(err);
8038
8039 if (err)
8040 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8041 0xb8,
8042 __NR_ia32_rt_sigreturn,
8043 0x80cd,
8044 - 0,
8045 + 0
8046 };
8047
8048 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8049 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8050
8051 if (ka->sa.sa_flags & SA_RESTORER)
8052 restorer = ka->sa.sa_restorer;
8053 + else if (current->mm->context.vdso)
8054 + /* Return stub is in 32bit vsyscall page */
8055 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8056 else
8057 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8058 - rt_sigreturn);
8059 + restorer = &frame->retcode;
8060 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8061
8062 /*
8063 * Not actually used anymore, but left because some gdb
8064 * versions need it.
8065 */
8066 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8067 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8068 } put_user_catch(err);
8069
8070 if (err)
8071 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8072 index 4edd8eb..29124b4 100644
8073 --- a/arch/x86/ia32/ia32entry.S
8074 +++ b/arch/x86/ia32/ia32entry.S
8075 @@ -13,7 +13,9 @@
8076 #include <asm/thread_info.h>
8077 #include <asm/segment.h>
8078 #include <asm/irqflags.h>
8079 +#include <asm/pgtable.h>
8080 #include <linux/linkage.h>
8081 +#include <asm/alternative-asm.h>
8082
8083 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8084 #include <linux/elf-em.h>
8085 @@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
8086 ENDPROC(native_irq_enable_sysexit)
8087 #endif
8088
8089 + .macro pax_enter_kernel_user
8090 + pax_set_fptr_mask
8091 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8092 + call pax_enter_kernel_user
8093 +#endif
8094 + .endm
8095 +
8096 + .macro pax_exit_kernel_user
8097 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8098 + call pax_exit_kernel_user
8099 +#endif
8100 +#ifdef CONFIG_PAX_RANDKSTACK
8101 + pushq %rax
8102 + pushq %r11
8103 + call pax_randomize_kstack
8104 + popq %r11
8105 + popq %rax
8106 +#endif
8107 + .endm
8108 +
8109 +.macro pax_erase_kstack
8110 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8111 + call pax_erase_kstack
8112 +#endif
8113 +.endm
8114 +
8115 /*
8116 * 32bit SYSENTER instruction entry.
8117 *
8118 @@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
8119 CFI_REGISTER rsp,rbp
8120 SWAPGS_UNSAFE_STACK
8121 movq PER_CPU_VAR(kernel_stack), %rsp
8122 - addq $(KERNEL_STACK_OFFSET),%rsp
8123 - /*
8124 - * No need to follow this irqs on/off section: the syscall
8125 - * disabled irqs, here we enable it straight after entry:
8126 - */
8127 - ENABLE_INTERRUPTS(CLBR_NONE)
8128 movl %ebp,%ebp /* zero extension */
8129 pushq $__USER32_DS
8130 CFI_ADJUST_CFA_OFFSET 8
8131 @@ -135,28 +157,42 @@ ENTRY(ia32_sysenter_target)
8132 pushfq
8133 CFI_ADJUST_CFA_OFFSET 8
8134 /*CFI_REL_OFFSET rflags,0*/
8135 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
8136 - CFI_REGISTER rip,r10
8137 + orl $X86_EFLAGS_IF,(%rsp)
8138 + GET_THREAD_INFO(%r11)
8139 + movl TI_sysenter_return(%r11), %r11d
8140 + CFI_REGISTER rip,r11
8141 pushq $__USER32_CS
8142 CFI_ADJUST_CFA_OFFSET 8
8143 /*CFI_REL_OFFSET cs,0*/
8144 movl %eax, %eax
8145 - pushq %r10
8146 + pushq %r11
8147 CFI_ADJUST_CFA_OFFSET 8
8148 CFI_REL_OFFSET rip,0
8149 pushq %rax
8150 CFI_ADJUST_CFA_OFFSET 8
8151 cld
8152 SAVE_ARGS 0,0,1
8153 + pax_enter_kernel_user
8154 + /*
8155 + * No need to follow this irqs on/off section: the syscall
8156 + * disabled irqs, here we enable it straight after entry:
8157 + */
8158 + ENABLE_INTERRUPTS(CLBR_NONE)
8159 /* no need to do an access_ok check here because rbp has been
8160 32bit zero extended */
8161 +
8162 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8163 + mov $PAX_USER_SHADOW_BASE,%r11
8164 + add %r11,%rbp
8165 +#endif
8166 +
8167 1: movl (%rbp),%ebp
8168 .section __ex_table,"a"
8169 .quad 1b,ia32_badarg
8170 .previous
8171 - GET_THREAD_INFO(%r10)
8172 - orl $TS_COMPAT,TI_status(%r10)
8173 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8174 + GET_THREAD_INFO(%r11)
8175 + orl $TS_COMPAT,TI_status(%r11)
8176 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8177 CFI_REMEMBER_STATE
8178 jnz sysenter_tracesys
8179 cmpq $(IA32_NR_syscalls-1),%rax
8180 @@ -166,13 +202,15 @@ sysenter_do_call:
8181 sysenter_dispatch:
8182 call *ia32_sys_call_table(,%rax,8)
8183 movq %rax,RAX-ARGOFFSET(%rsp)
8184 - GET_THREAD_INFO(%r10)
8185 + GET_THREAD_INFO(%r11)
8186 DISABLE_INTERRUPTS(CLBR_NONE)
8187 TRACE_IRQS_OFF
8188 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8189 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8190 jnz sysexit_audit
8191 sysexit_from_sys_call:
8192 - andl $~TS_COMPAT,TI_status(%r10)
8193 + pax_exit_kernel_user
8194 + pax_erase_kstack
8195 + andl $~TS_COMPAT,TI_status(%r11)
8196 /* clear IF, that popfq doesn't enable interrupts early */
8197 andl $~0x200,EFLAGS-R11(%rsp)
8198 movl RIP-R11(%rsp),%edx /* User %eip */
8199 @@ -200,6 +238,9 @@ sysexit_from_sys_call:
8200 movl %eax,%esi /* 2nd arg: syscall number */
8201 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8202 call audit_syscall_entry
8203 +
8204 + pax_erase_kstack
8205 +
8206 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8207 cmpq $(IA32_NR_syscalls-1),%rax
8208 ja ia32_badsys
8209 @@ -211,7 +252,7 @@ sysexit_from_sys_call:
8210 .endm
8211
8212 .macro auditsys_exit exit
8213 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8214 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8215 jnz ia32_ret_from_sys_call
8216 TRACE_IRQS_ON
8217 sti
8218 @@ -221,12 +262,12 @@ sysexit_from_sys_call:
8219 movzbl %al,%edi /* zero-extend that into %edi */
8220 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
8221 call audit_syscall_exit
8222 - GET_THREAD_INFO(%r10)
8223 + GET_THREAD_INFO(%r11)
8224 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
8225 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8226 cli
8227 TRACE_IRQS_OFF
8228 - testl %edi,TI_flags(%r10)
8229 + testl %edi,TI_flags(%r11)
8230 jz \exit
8231 CLEAR_RREGS -ARGOFFSET
8232 jmp int_with_check
8233 @@ -244,7 +285,7 @@ sysexit_audit:
8234
8235 sysenter_tracesys:
8236 #ifdef CONFIG_AUDITSYSCALL
8237 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8238 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8239 jz sysenter_auditsys
8240 #endif
8241 SAVE_REST
8242 @@ -252,6 +293,9 @@ sysenter_tracesys:
8243 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8244 movq %rsp,%rdi /* &pt_regs -> arg1 */
8245 call syscall_trace_enter
8246 +
8247 + pax_erase_kstack
8248 +
8249 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8250 RESTORE_REST
8251 cmpq $(IA32_NR_syscalls-1),%rax
8252 @@ -283,19 +327,20 @@ ENDPROC(ia32_sysenter_target)
8253 ENTRY(ia32_cstar_target)
8254 CFI_STARTPROC32 simple
8255 CFI_SIGNAL_FRAME
8256 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8257 + CFI_DEF_CFA rsp,0
8258 CFI_REGISTER rip,rcx
8259 /*CFI_REGISTER rflags,r11*/
8260 SWAPGS_UNSAFE_STACK
8261 movl %esp,%r8d
8262 CFI_REGISTER rsp,r8
8263 movq PER_CPU_VAR(kernel_stack),%rsp
8264 + SAVE_ARGS 8*6,1,1
8265 + pax_enter_kernel_user
8266 /*
8267 * No need to follow this irqs on/off section: the syscall
8268 * disabled irqs and here we enable it straight after entry:
8269 */
8270 ENABLE_INTERRUPTS(CLBR_NONE)
8271 - SAVE_ARGS 8,1,1
8272 movl %eax,%eax /* zero extension */
8273 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8274 movq %rcx,RIP-ARGOFFSET(%rsp)
8275 @@ -311,13 +356,19 @@ ENTRY(ia32_cstar_target)
8276 /* no need to do an access_ok check here because r8 has been
8277 32bit zero extended */
8278 /* hardware stack frame is complete now */
8279 +
8280 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8281 + mov $PAX_USER_SHADOW_BASE,%r11
8282 + add %r11,%r8
8283 +#endif
8284 +
8285 1: movl (%r8),%r9d
8286 .section __ex_table,"a"
8287 .quad 1b,ia32_badarg
8288 .previous
8289 - GET_THREAD_INFO(%r10)
8290 - orl $TS_COMPAT,TI_status(%r10)
8291 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8292 + GET_THREAD_INFO(%r11)
8293 + orl $TS_COMPAT,TI_status(%r11)
8294 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8295 CFI_REMEMBER_STATE
8296 jnz cstar_tracesys
8297 cmpq $IA32_NR_syscalls-1,%rax
8298 @@ -327,13 +378,15 @@ cstar_do_call:
8299 cstar_dispatch:
8300 call *ia32_sys_call_table(,%rax,8)
8301 movq %rax,RAX-ARGOFFSET(%rsp)
8302 - GET_THREAD_INFO(%r10)
8303 + GET_THREAD_INFO(%r11)
8304 DISABLE_INTERRUPTS(CLBR_NONE)
8305 TRACE_IRQS_OFF
8306 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8307 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8308 jnz sysretl_audit
8309 sysretl_from_sys_call:
8310 - andl $~TS_COMPAT,TI_status(%r10)
8311 + pax_exit_kernel_user
8312 + pax_erase_kstack
8313 + andl $~TS_COMPAT,TI_status(%r11)
8314 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
8315 movl RIP-ARGOFFSET(%rsp),%ecx
8316 CFI_REGISTER rip,rcx
8317 @@ -361,7 +414,7 @@ sysretl_audit:
8318
8319 cstar_tracesys:
8320 #ifdef CONFIG_AUDITSYSCALL
8321 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8322 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8323 jz cstar_auditsys
8324 #endif
8325 xchgl %r9d,%ebp
8326 @@ -370,6 +423,9 @@ cstar_tracesys:
8327 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8328 movq %rsp,%rdi /* &pt_regs -> arg1 */
8329 call syscall_trace_enter
8330 +
8331 + pax_erase_kstack
8332 +
8333 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8334 RESTORE_REST
8335 xchgl %ebp,%r9d
8336 @@ -415,11 +471,6 @@ ENTRY(ia32_syscall)
8337 CFI_REL_OFFSET rip,RIP-RIP
8338 PARAVIRT_ADJUST_EXCEPTION_FRAME
8339 SWAPGS
8340 - /*
8341 - * No need to follow this irqs on/off section: the syscall
8342 - * disabled irqs and here we enable it straight after entry:
8343 - */
8344 - ENABLE_INTERRUPTS(CLBR_NONE)
8345 movl %eax,%eax
8346 pushq %rax
8347 CFI_ADJUST_CFA_OFFSET 8
8348 @@ -427,9 +478,15 @@ ENTRY(ia32_syscall)
8349 /* note the registers are not zero extended to the sf.
8350 this could be a problem. */
8351 SAVE_ARGS 0,0,1
8352 - GET_THREAD_INFO(%r10)
8353 - orl $TS_COMPAT,TI_status(%r10)
8354 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8355 + pax_enter_kernel_user
8356 + /*
8357 + * No need to follow this irqs on/off section: the syscall
8358 + * disabled irqs and here we enable it straight after entry:
8359 + */
8360 + ENABLE_INTERRUPTS(CLBR_NONE)
8361 + GET_THREAD_INFO(%r11)
8362 + orl $TS_COMPAT,TI_status(%r11)
8363 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8364 jnz ia32_tracesys
8365 cmpq $(IA32_NR_syscalls-1),%rax
8366 ja ia32_badsys
8367 @@ -448,6 +505,9 @@ ia32_tracesys:
8368 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8369 movq %rsp,%rdi /* &pt_regs -> arg1 */
8370 call syscall_trace_enter
8371 +
8372 + pax_erase_kstack
8373 +
8374 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8375 RESTORE_REST
8376 cmpq $(IA32_NR_syscalls-1),%rax
8377 @@ -462,6 +522,7 @@ ia32_badsys:
8378
8379 quiet_ni_syscall:
8380 movq $-ENOSYS,%rax
8381 + pax_force_retaddr
8382 ret
8383 CFI_ENDPROC
8384
8385 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8386 index 016218c..47ccbdd 100644
8387 --- a/arch/x86/ia32/sys_ia32.c
8388 +++ b/arch/x86/ia32/sys_ia32.c
8389 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8390 */
8391 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8392 {
8393 - typeof(ubuf->st_uid) uid = 0;
8394 - typeof(ubuf->st_gid) gid = 0;
8395 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8396 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8397 SET_UID(uid, stat->uid);
8398 SET_GID(gid, stat->gid);
8399 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8400 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8401 }
8402 set_fs(KERNEL_DS);
8403 ret = sys_rt_sigprocmask(how,
8404 - set ? (sigset_t __user *)&s : NULL,
8405 - oset ? (sigset_t __user *)&s : NULL,
8406 + set ? (sigset_t __force_user *)&s : NULL,
8407 + oset ? (sigset_t __force_user *)&s : NULL,
8408 sigsetsize);
8409 set_fs(old_fs);
8410 if (ret)
8411 @@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8412 mm_segment_t old_fs = get_fs();
8413
8414 set_fs(KERNEL_DS);
8415 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8416 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8417 set_fs(old_fs);
8418 if (put_compat_timespec(&t, interval))
8419 return -EFAULT;
8420 @@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8421 mm_segment_t old_fs = get_fs();
8422
8423 set_fs(KERNEL_DS);
8424 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8425 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8426 set_fs(old_fs);
8427 if (!ret) {
8428 switch (_NSIG_WORDS) {
8429 @@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8430 if (copy_siginfo_from_user32(&info, uinfo))
8431 return -EFAULT;
8432 set_fs(KERNEL_DS);
8433 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8434 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8435 set_fs(old_fs);
8436 return ret;
8437 }
8438 @@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8439 return -EFAULT;
8440
8441 set_fs(KERNEL_DS);
8442 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8443 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8444 count);
8445 set_fs(old_fs);
8446
8447 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8448 index e2077d3..17d07ad 100644
8449 --- a/arch/x86/include/asm/alternative-asm.h
8450 +++ b/arch/x86/include/asm/alternative-asm.h
8451 @@ -8,10 +8,10 @@
8452
8453 #ifdef CONFIG_SMP
8454 .macro LOCK_PREFIX
8455 -1: lock
8456 +672: lock
8457 .section .smp_locks,"a"
8458 .align 4
8459 - X86_ALIGN 1b
8460 + X86_ALIGN 672b
8461 .previous
8462 .endm
8463 #else
8464 @@ -19,4 +19,43 @@
8465 .endm
8466 #endif
8467
8468 +#ifdef KERNEXEC_PLUGIN
8469 + .macro pax_force_retaddr_bts rip=0
8470 + btsq $63,\rip(%rsp)
8471 + .endm
8472 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8473 + .macro pax_force_retaddr rip=0, reload=0
8474 + btsq $63,\rip(%rsp)
8475 + .endm
8476 + .macro pax_force_fptr ptr
8477 + btsq $63,\ptr
8478 + .endm
8479 + .macro pax_set_fptr_mask
8480 + .endm
8481 +#endif
8482 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8483 + .macro pax_force_retaddr rip=0, reload=0
8484 + .if \reload
8485 + pax_set_fptr_mask
8486 + .endif
8487 + orq %r10,\rip(%rsp)
8488 + .endm
8489 + .macro pax_force_fptr ptr
8490 + orq %r10,\ptr
8491 + .endm
8492 + .macro pax_set_fptr_mask
8493 + movabs $0x8000000000000000,%r10
8494 + .endm
8495 +#endif
8496 +#else
8497 + .macro pax_force_retaddr rip=0, reload=0
8498 + .endm
8499 + .macro pax_force_fptr ptr
8500 + .endm
8501 + .macro pax_force_retaddr_bts rip=0
8502 + .endm
8503 + .macro pax_set_fptr_mask
8504 + .endm
8505 +#endif
8506 +
8507 #endif /* __ASSEMBLY__ */
8508 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8509 index c240efc..fdfadf3 100644
8510 --- a/arch/x86/include/asm/alternative.h
8511 +++ b/arch/x86/include/asm/alternative.h
8512 @@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
8513 " .byte 662b-661b\n" /* sourcelen */ \
8514 " .byte 664f-663f\n" /* replacementlen */ \
8515 ".previous\n" \
8516 - ".section .altinstr_replacement, \"ax\"\n" \
8517 + ".section .altinstr_replacement, \"a\"\n" \
8518 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8519 ".previous"
8520
8521 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8522 index 474d80d..1f97d58 100644
8523 --- a/arch/x86/include/asm/apic.h
8524 +++ b/arch/x86/include/asm/apic.h
8525 @@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
8526
8527 #ifdef CONFIG_X86_LOCAL_APIC
8528
8529 -extern unsigned int apic_verbosity;
8530 +extern int apic_verbosity;
8531 extern int local_apic_timer_c2_ok;
8532
8533 extern int disable_apic;
8534 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8535 index 20370c6..a2eb9b0 100644
8536 --- a/arch/x86/include/asm/apm.h
8537 +++ b/arch/x86/include/asm/apm.h
8538 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8539 __asm__ __volatile__(APM_DO_ZERO_SEGS
8540 "pushl %%edi\n\t"
8541 "pushl %%ebp\n\t"
8542 - "lcall *%%cs:apm_bios_entry\n\t"
8543 + "lcall *%%ss:apm_bios_entry\n\t"
8544 "setc %%al\n\t"
8545 "popl %%ebp\n\t"
8546 "popl %%edi\n\t"
8547 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8548 __asm__ __volatile__(APM_DO_ZERO_SEGS
8549 "pushl %%edi\n\t"
8550 "pushl %%ebp\n\t"
8551 - "lcall *%%cs:apm_bios_entry\n\t"
8552 + "lcall *%%ss:apm_bios_entry\n\t"
8553 "setc %%bl\n\t"
8554 "popl %%ebp\n\t"
8555 "popl %%edi\n\t"
8556 diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
8557 index dc5a667..939040c 100644
8558 --- a/arch/x86/include/asm/atomic_32.h
8559 +++ b/arch/x86/include/asm/atomic_32.h
8560 @@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
8561 }
8562
8563 /**
8564 + * atomic_read_unchecked - read atomic variable
8565 + * @v: pointer of type atomic_unchecked_t
8566 + *
8567 + * Atomically reads the value of @v.
8568 + */
8569 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8570 +{
8571 + return v->counter;
8572 +}
8573 +
8574 +/**
8575 * atomic_set - set atomic variable
8576 * @v: pointer of type atomic_t
8577 * @i: required value
8578 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
8579 }
8580
8581 /**
8582 + * atomic_set_unchecked - set atomic variable
8583 + * @v: pointer of type atomic_unchecked_t
8584 + * @i: required value
8585 + *
8586 + * Atomically sets the value of @v to @i.
8587 + */
8588 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8589 +{
8590 + v->counter = i;
8591 +}
8592 +
8593 +/**
8594 * atomic_add - add integer to atomic variable
8595 * @i: integer value to add
8596 * @v: pointer of type atomic_t
8597 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
8598 */
8599 static inline void atomic_add(int i, atomic_t *v)
8600 {
8601 - asm volatile(LOCK_PREFIX "addl %1,%0"
8602 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8603 +
8604 +#ifdef CONFIG_PAX_REFCOUNT
8605 + "jno 0f\n"
8606 + LOCK_PREFIX "subl %1,%0\n"
8607 + "int $4\n0:\n"
8608 + _ASM_EXTABLE(0b, 0b)
8609 +#endif
8610 +
8611 + : "+m" (v->counter)
8612 + : "ir" (i));
8613 +}
8614 +
8615 +/**
8616 + * atomic_add_unchecked - add integer to atomic variable
8617 + * @i: integer value to add
8618 + * @v: pointer of type atomic_unchecked_t
8619 + *
8620 + * Atomically adds @i to @v.
8621 + */
8622 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8623 +{
8624 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8625 : "+m" (v->counter)
8626 : "ir" (i));
8627 }
8628 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
8629 */
8630 static inline void atomic_sub(int i, atomic_t *v)
8631 {
8632 - asm volatile(LOCK_PREFIX "subl %1,%0"
8633 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8634 +
8635 +#ifdef CONFIG_PAX_REFCOUNT
8636 + "jno 0f\n"
8637 + LOCK_PREFIX "addl %1,%0\n"
8638 + "int $4\n0:\n"
8639 + _ASM_EXTABLE(0b, 0b)
8640 +#endif
8641 +
8642 + : "+m" (v->counter)
8643 + : "ir" (i));
8644 +}
8645 +
8646 +/**
8647 + * atomic_sub_unchecked - subtract integer from atomic variable
8648 + * @i: integer value to subtract
8649 + * @v: pointer of type atomic_unchecked_t
8650 + *
8651 + * Atomically subtracts @i from @v.
8652 + */
8653 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8654 +{
8655 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8656 : "+m" (v->counter)
8657 : "ir" (i));
8658 }
8659 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8660 {
8661 unsigned char c;
8662
8663 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8664 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
8665 +
8666 +#ifdef CONFIG_PAX_REFCOUNT
8667 + "jno 0f\n"
8668 + LOCK_PREFIX "addl %2,%0\n"
8669 + "int $4\n0:\n"
8670 + _ASM_EXTABLE(0b, 0b)
8671 +#endif
8672 +
8673 + "sete %1\n"
8674 : "+m" (v->counter), "=qm" (c)
8675 : "ir" (i) : "memory");
8676 return c;
8677 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8678 */
8679 static inline void atomic_inc(atomic_t *v)
8680 {
8681 - asm volatile(LOCK_PREFIX "incl %0"
8682 + asm volatile(LOCK_PREFIX "incl %0\n"
8683 +
8684 +#ifdef CONFIG_PAX_REFCOUNT
8685 + "jno 0f\n"
8686 + LOCK_PREFIX "decl %0\n"
8687 + "int $4\n0:\n"
8688 + _ASM_EXTABLE(0b, 0b)
8689 +#endif
8690 +
8691 + : "+m" (v->counter));
8692 +}
8693 +
8694 +/**
8695 + * atomic_inc_unchecked - increment atomic variable
8696 + * @v: pointer of type atomic_unchecked_t
8697 + *
8698 + * Atomically increments @v by 1.
8699 + */
8700 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8701 +{
8702 + asm volatile(LOCK_PREFIX "incl %0\n"
8703 : "+m" (v->counter));
8704 }
8705
8706 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
8707 */
8708 static inline void atomic_dec(atomic_t *v)
8709 {
8710 - asm volatile(LOCK_PREFIX "decl %0"
8711 + asm volatile(LOCK_PREFIX "decl %0\n"
8712 +
8713 +#ifdef CONFIG_PAX_REFCOUNT
8714 + "jno 0f\n"
8715 + LOCK_PREFIX "incl %0\n"
8716 + "int $4\n0:\n"
8717 + _ASM_EXTABLE(0b, 0b)
8718 +#endif
8719 +
8720 + : "+m" (v->counter));
8721 +}
8722 +
8723 +/**
8724 + * atomic_dec_unchecked - decrement atomic variable
8725 + * @v: pointer of type atomic_unchecked_t
8726 + *
8727 + * Atomically decrements @v by 1.
8728 + */
8729 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8730 +{
8731 + asm volatile(LOCK_PREFIX "decl %0\n"
8732 : "+m" (v->counter));
8733 }
8734
8735 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8736 {
8737 unsigned char c;
8738
8739 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
8740 + asm volatile(LOCK_PREFIX "decl %0\n"
8741 +
8742 +#ifdef CONFIG_PAX_REFCOUNT
8743 + "jno 0f\n"
8744 + LOCK_PREFIX "incl %0\n"
8745 + "int $4\n0:\n"
8746 + _ASM_EXTABLE(0b, 0b)
8747 +#endif
8748 +
8749 + "sete %1\n"
8750 : "+m" (v->counter), "=qm" (c)
8751 : : "memory");
8752 return c != 0;
8753 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8754 {
8755 unsigned char c;
8756
8757 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
8758 + asm volatile(LOCK_PREFIX "incl %0\n"
8759 +
8760 +#ifdef CONFIG_PAX_REFCOUNT
8761 + "jno 0f\n"
8762 + LOCK_PREFIX "decl %0\n"
8763 + "into\n0:\n"
8764 + _ASM_EXTABLE(0b, 0b)
8765 +#endif
8766 +
8767 + "sete %1\n"
8768 + : "+m" (v->counter), "=qm" (c)
8769 + : : "memory");
8770 + return c != 0;
8771 +}
8772 +
8773 +/**
8774 + * atomic_inc_and_test_unchecked - increment and test
8775 + * @v: pointer of type atomic_unchecked_t
8776 + *
8777 + * Atomically increments @v by 1
8778 + * and returns true if the result is zero, or false for all
8779 + * other cases.
8780 + */
8781 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8782 +{
8783 + unsigned char c;
8784 +
8785 + asm volatile(LOCK_PREFIX "incl %0\n"
8786 + "sete %1\n"
8787 : "+m" (v->counter), "=qm" (c)
8788 : : "memory");
8789 return c != 0;
8790 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8791 {
8792 unsigned char c;
8793
8794 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8795 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
8796 +
8797 +#ifdef CONFIG_PAX_REFCOUNT
8798 + "jno 0f\n"
8799 + LOCK_PREFIX "subl %2,%0\n"
8800 + "int $4\n0:\n"
8801 + _ASM_EXTABLE(0b, 0b)
8802 +#endif
8803 +
8804 + "sets %1\n"
8805 : "+m" (v->counter), "=qm" (c)
8806 : "ir" (i) : "memory");
8807 return c;
8808 @@ -179,7 +341,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
8809 #endif
8810 /* Modern 486+ processor */
8811 __i = i;
8812 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
8813 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
8814 +
8815 +#ifdef CONFIG_PAX_REFCOUNT
8816 + "jno 0f\n"
8817 + "movl %0, %1\n"
8818 + "int $4\n0:\n"
8819 + _ASM_EXTABLE(0b, 0b)
8820 +#endif
8821 +
8822 : "+r" (i), "+m" (v->counter)
8823 : : "memory");
8824 return i + __i;
8825 @@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */
8826 }
8827
8828 /**
8829 + * atomic_add_return_unchecked - add integer and return
8830 + * @v: pointer of type atomic_unchecked_t
8831 + * @i: integer value to add
8832 + *
8833 + * Atomically adds @i to @v and returns @i + @v
8834 + */
8835 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8836 +{
8837 + int __i;
8838 +#ifdef CONFIG_M386
8839 + unsigned long flags;
8840 + if (unlikely(boot_cpu_data.x86 <= 3))
8841 + goto no_xadd;
8842 +#endif
8843 + /* Modern 486+ processor */
8844 + __i = i;
8845 + asm volatile(LOCK_PREFIX "xaddl %0, %1"
8846 + : "+r" (i), "+m" (v->counter)
8847 + : : "memory");
8848 + return i + __i;
8849 +
8850 +#ifdef CONFIG_M386
8851 +no_xadd: /* Legacy 386 processor */
8852 + local_irq_save(flags);
8853 + __i = atomic_read_unchecked(v);
8854 + atomic_set_unchecked(v, i + __i);
8855 + local_irq_restore(flags);
8856 + return i + __i;
8857 +#endif
8858 +}
8859 +
8860 +/**
8861 * atomic_sub_return - subtract integer and return
8862 * @v: pointer of type atomic_t
8863 * @i: integer value to subtract
8864 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8865 return cmpxchg(&v->counter, old, new);
8866 }
8867
8868 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8869 +{
8870 + return cmpxchg(&v->counter, old, new);
8871 +}
8872 +
8873 static inline int atomic_xchg(atomic_t *v, int new)
8874 {
8875 return xchg(&v->counter, new);
8876 }
8877
8878 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8879 +{
8880 + return xchg(&v->counter, new);
8881 +}
8882 +
8883 /**
8884 * atomic_add_unless - add unless the number is already a given value
8885 * @v: pointer of type atomic_t
8886 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
8887 */
8888 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8889 {
8890 - int c, old;
8891 + int c, old, new;
8892 c = atomic_read(v);
8893 for (;;) {
8894 - if (unlikely(c == (u)))
8895 + if (unlikely(c == u))
8896 break;
8897 - old = atomic_cmpxchg((v), c, c + (a));
8898 +
8899 + asm volatile("addl %2,%0\n"
8900 +
8901 +#ifdef CONFIG_PAX_REFCOUNT
8902 + "jno 0f\n"
8903 + "subl %2,%0\n"
8904 + "int $4\n0:\n"
8905 + _ASM_EXTABLE(0b, 0b)
8906 +#endif
8907 +
8908 + : "=r" (new)
8909 + : "0" (c), "ir" (a));
8910 +
8911 + old = atomic_cmpxchg(v, c, new);
8912 if (likely(old == c))
8913 break;
8914 c = old;
8915 }
8916 - return c != (u);
8917 + return c != u;
8918 }
8919
8920 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8921
8922 #define atomic_inc_return(v) (atomic_add_return(1, v))
8923 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8924 +{
8925 + return atomic_add_return_unchecked(1, v);
8926 +}
8927 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8928
8929 /* These are x86-specific, used by some header files */
8930 @@ -266,9 +495,18 @@ typedef struct {
8931 u64 __aligned(8) counter;
8932 } atomic64_t;
8933
8934 +#ifdef CONFIG_PAX_REFCOUNT
8935 +typedef struct {
8936 + u64 __aligned(8) counter;
8937 +} atomic64_unchecked_t;
8938 +#else
8939 +typedef atomic64_t atomic64_unchecked_t;
8940 +#endif
8941 +
8942 #define ATOMIC64_INIT(val) { (val) }
8943
8944 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8945 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
8946
8947 /**
8948 * atomic64_xchg - xchg atomic64 variable
8949 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8950 * the old value.
8951 */
8952 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8953 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8954
8955 /**
8956 * atomic64_set - set atomic64 variable
8957 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8958 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
8959
8960 /**
8961 + * atomic64_unchecked_set - set atomic64 variable
8962 + * @ptr: pointer to type atomic64_unchecked_t
8963 + * @new_val: value to assign
8964 + *
8965 + * Atomically sets the value of @ptr to @new_val.
8966 + */
8967 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8968 +
8969 +/**
8970 * atomic64_read - read atomic64 variable
8971 * @ptr: pointer to type atomic64_t
8972 *
8973 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
8974 return res;
8975 }
8976
8977 -extern u64 atomic64_read(atomic64_t *ptr);
8978 +/**
8979 + * atomic64_read_unchecked - read atomic64 variable
8980 + * @ptr: pointer to type atomic64_unchecked_t
8981 + *
8982 + * Atomically reads the value of @ptr and returns it.
8983 + */
8984 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
8985 +{
8986 + u64 res;
8987 +
8988 + /*
8989 + * Note, we inline this atomic64_unchecked_t primitive because
8990 + * it only clobbers EAX/EDX and leaves the others
8991 + * untouched. We also (somewhat subtly) rely on the
8992 + * fact that cmpxchg8b returns the current 64-bit value
8993 + * of the memory location we are touching:
8994 + */
8995 + asm volatile(
8996 + "mov %%ebx, %%eax\n\t"
8997 + "mov %%ecx, %%edx\n\t"
8998 + LOCK_PREFIX "cmpxchg8b %1\n"
8999 + : "=&A" (res)
9000 + : "m" (*ptr)
9001 + );
9002 +
9003 + return res;
9004 +}
9005
9006 /**
9007 * atomic64_add_return - add and return
9008 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
9009 * Other variants with different arithmetic operators:
9010 */
9011 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
9012 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9013 extern u64 atomic64_inc_return(atomic64_t *ptr);
9014 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
9015 extern u64 atomic64_dec_return(atomic64_t *ptr);
9016 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
9017
9018 /**
9019 * atomic64_add - add integer to atomic64 variable
9020 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
9021 extern void atomic64_add(u64 delta, atomic64_t *ptr);
9022
9023 /**
9024 + * atomic64_add_unchecked - add integer to atomic64 variable
9025 + * @delta: integer value to add
9026 + * @ptr: pointer to type atomic64_unchecked_t
9027 + *
9028 + * Atomically adds @delta to @ptr.
9029 + */
9030 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9031 +
9032 +/**
9033 * atomic64_sub - subtract the atomic64 variable
9034 * @delta: integer value to subtract
9035 * @ptr: pointer to type atomic64_t
9036 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
9037 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
9038
9039 /**
9040 + * atomic64_sub_unchecked - subtract the atomic64 variable
9041 + * @delta: integer value to subtract
9042 + * @ptr: pointer to type atomic64_unchecked_t
9043 + *
9044 + * Atomically subtracts @delta from @ptr.
9045 + */
9046 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9047 +
9048 +/**
9049 * atomic64_sub_and_test - subtract value from variable and test result
9050 * @delta: integer value to subtract
9051 * @ptr: pointer to type atomic64_t
9052 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
9053 extern void atomic64_inc(atomic64_t *ptr);
9054
9055 /**
9056 + * atomic64_inc_unchecked - increment atomic64 variable
9057 + * @ptr: pointer to type atomic64_unchecked_t
9058 + *
9059 + * Atomically increments @ptr by 1.
9060 + */
9061 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
9062 +
9063 +/**
9064 * atomic64_dec - decrement atomic64 variable
9065 * @ptr: pointer to type atomic64_t
9066 *
9067 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
9068 extern void atomic64_dec(atomic64_t *ptr);
9069
9070 /**
9071 + * atomic64_dec_unchecked - decrement atomic64 variable
9072 + * @ptr: pointer to type atomic64_unchecked_t
9073 + *
9074 + * Atomically decrements @ptr by 1.
9075 + */
9076 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
9077 +
9078 +/**
9079 * atomic64_dec_and_test - decrement and test
9080 * @ptr: pointer to type atomic64_t
9081 *
9082 diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
9083 index d605dc2..fafd7bd 100644
9084 --- a/arch/x86/include/asm/atomic_64.h
9085 +++ b/arch/x86/include/asm/atomic_64.h
9086 @@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
9087 }
9088
9089 /**
9090 + * atomic_read_unchecked - read atomic variable
9091 + * @v: pointer of type atomic_unchecked_t
9092 + *
9093 + * Atomically reads the value of @v.
9094 + */
9095 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9096 +{
9097 + return v->counter;
9098 +}
9099 +
9100 +/**
9101 * atomic_set - set atomic variable
9102 * @v: pointer of type atomic_t
9103 * @i: required value
9104 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
9105 }
9106
9107 /**
9108 + * atomic_set_unchecked - set atomic variable
9109 + * @v: pointer of type atomic_unchecked_t
9110 + * @i: required value
9111 + *
9112 + * Atomically sets the value of @v to @i.
9113 + */
9114 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9115 +{
9116 + v->counter = i;
9117 +}
9118 +
9119 +/**
9120 * atomic_add - add integer to atomic variable
9121 * @i: integer value to add
9122 * @v: pointer of type atomic_t
9123 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
9124 */
9125 static inline void atomic_add(int i, atomic_t *v)
9126 {
9127 - asm volatile(LOCK_PREFIX "addl %1,%0"
9128 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9129 +
9130 +#ifdef CONFIG_PAX_REFCOUNT
9131 + "jno 0f\n"
9132 + LOCK_PREFIX "subl %1,%0\n"
9133 + "int $4\n0:\n"
9134 + _ASM_EXTABLE(0b, 0b)
9135 +#endif
9136 +
9137 + : "=m" (v->counter)
9138 + : "ir" (i), "m" (v->counter));
9139 +}
9140 +
9141 +/**
9142 + * atomic_add_unchecked - add integer to atomic variable
9143 + * @i: integer value to add
9144 + * @v: pointer of type atomic_unchecked_t
9145 + *
9146 + * Atomically adds @i to @v.
9147 + */
9148 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9149 +{
9150 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9151 : "=m" (v->counter)
9152 : "ir" (i), "m" (v->counter));
9153 }
9154 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
9155 */
9156 static inline void atomic_sub(int i, atomic_t *v)
9157 {
9158 - asm volatile(LOCK_PREFIX "subl %1,%0"
9159 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9160 +
9161 +#ifdef CONFIG_PAX_REFCOUNT
9162 + "jno 0f\n"
9163 + LOCK_PREFIX "addl %1,%0\n"
9164 + "int $4\n0:\n"
9165 + _ASM_EXTABLE(0b, 0b)
9166 +#endif
9167 +
9168 + : "=m" (v->counter)
9169 + : "ir" (i), "m" (v->counter));
9170 +}
9171 +
9172 +/**
9173 + * atomic_sub_unchecked - subtract the atomic variable
9174 + * @i: integer value to subtract
9175 + * @v: pointer of type atomic_unchecked_t
9176 + *
9177 + * Atomically subtracts @i from @v.
9178 + */
9179 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9180 +{
9181 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9182 : "=m" (v->counter)
9183 : "ir" (i), "m" (v->counter));
9184 }
9185 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9186 {
9187 unsigned char c;
9188
9189 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9190 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
9191 +
9192 +#ifdef CONFIG_PAX_REFCOUNT
9193 + "jno 0f\n"
9194 + LOCK_PREFIX "addl %2,%0\n"
9195 + "int $4\n0:\n"
9196 + _ASM_EXTABLE(0b, 0b)
9197 +#endif
9198 +
9199 + "sete %1\n"
9200 : "=m" (v->counter), "=qm" (c)
9201 : "ir" (i), "m" (v->counter) : "memory");
9202 return c;
9203 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9204 */
9205 static inline void atomic_inc(atomic_t *v)
9206 {
9207 - asm volatile(LOCK_PREFIX "incl %0"
9208 + asm volatile(LOCK_PREFIX "incl %0\n"
9209 +
9210 +#ifdef CONFIG_PAX_REFCOUNT
9211 + "jno 0f\n"
9212 + LOCK_PREFIX "decl %0\n"
9213 + "int $4\n0:\n"
9214 + _ASM_EXTABLE(0b, 0b)
9215 +#endif
9216 +
9217 + : "=m" (v->counter)
9218 + : "m" (v->counter));
9219 +}
9220 +
9221 +/**
9222 + * atomic_inc_unchecked - increment atomic variable
9223 + * @v: pointer of type atomic_unchecked_t
9224 + *
9225 + * Atomically increments @v by 1.
9226 + */
9227 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9228 +{
9229 + asm volatile(LOCK_PREFIX "incl %0\n"
9230 : "=m" (v->counter)
9231 : "m" (v->counter));
9232 }
9233 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
9234 */
9235 static inline void atomic_dec(atomic_t *v)
9236 {
9237 - asm volatile(LOCK_PREFIX "decl %0"
9238 + asm volatile(LOCK_PREFIX "decl %0\n"
9239 +
9240 +#ifdef CONFIG_PAX_REFCOUNT
9241 + "jno 0f\n"
9242 + LOCK_PREFIX "incl %0\n"
9243 + "int $4\n0:\n"
9244 + _ASM_EXTABLE(0b, 0b)
9245 +#endif
9246 +
9247 + : "=m" (v->counter)
9248 + : "m" (v->counter));
9249 +}
9250 +
9251 +/**
9252 + * atomic_dec_unchecked - decrement atomic variable
9253 + * @v: pointer of type atomic_unchecked_t
9254 + *
9255 + * Atomically decrements @v by 1.
9256 + */
9257 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9258 +{
9259 + asm volatile(LOCK_PREFIX "decl %0\n"
9260 : "=m" (v->counter)
9261 : "m" (v->counter));
9262 }
9263 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9264 {
9265 unsigned char c;
9266
9267 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
9268 + asm volatile(LOCK_PREFIX "decl %0\n"
9269 +
9270 +#ifdef CONFIG_PAX_REFCOUNT
9271 + "jno 0f\n"
9272 + LOCK_PREFIX "incl %0\n"
9273 + "int $4\n0:\n"
9274 + _ASM_EXTABLE(0b, 0b)
9275 +#endif
9276 +
9277 + "sete %1\n"
9278 : "=m" (v->counter), "=qm" (c)
9279 : "m" (v->counter) : "memory");
9280 return c != 0;
9281 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9282 {
9283 unsigned char c;
9284
9285 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
9286 + asm volatile(LOCK_PREFIX "incl %0\n"
9287 +
9288 +#ifdef CONFIG_PAX_REFCOUNT
9289 + "jno 0f\n"
9290 + LOCK_PREFIX "decl %0\n"
9291 + "int $4\n0:\n"
9292 + _ASM_EXTABLE(0b, 0b)
9293 +#endif
9294 +
9295 + "sete %1\n"
9296 + : "=m" (v->counter), "=qm" (c)
9297 + : "m" (v->counter) : "memory");
9298 + return c != 0;
9299 +}
9300 +
9301 +/**
9302 + * atomic_inc_and_test_unchecked - increment and test
9303 + * @v: pointer of type atomic_unchecked_t
9304 + *
9305 + * Atomically increments @v by 1
9306 + * and returns true if the result is zero, or false for all
9307 + * other cases.
9308 + */
9309 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9310 +{
9311 + unsigned char c;
9312 +
9313 + asm volatile(LOCK_PREFIX "incl %0\n"
9314 + "sete %1\n"
9315 : "=m" (v->counter), "=qm" (c)
9316 : "m" (v->counter) : "memory");
9317 return c != 0;
9318 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9319 {
9320 unsigned char c;
9321
9322 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9323 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
9324 +
9325 +#ifdef CONFIG_PAX_REFCOUNT
9326 + "jno 0f\n"
9327 + LOCK_PREFIX "subl %2,%0\n"
9328 + "int $4\n0:\n"
9329 + _ASM_EXTABLE(0b, 0b)
9330 +#endif
9331 +
9332 + "sets %1\n"
9333 : "=m" (v->counter), "=qm" (c)
9334 : "ir" (i), "m" (v->counter) : "memory");
9335 return c;
9336 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9337 static inline int atomic_add_return(int i, atomic_t *v)
9338 {
9339 int __i = i;
9340 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
9341 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9342 +
9343 +#ifdef CONFIG_PAX_REFCOUNT
9344 + "jno 0f\n"
9345 + "movl %0, %1\n"
9346 + "int $4\n0:\n"
9347 + _ASM_EXTABLE(0b, 0b)
9348 +#endif
9349 +
9350 + : "+r" (i), "+m" (v->counter)
9351 + : : "memory");
9352 + return i + __i;
9353 +}
9354 +
9355 +/**
9356 + * atomic_add_return_unchecked - add and return
9357 + * @i: integer value to add
9358 + * @v: pointer of type atomic_unchecked_t
9359 + *
9360 + * Atomically adds @i to @v and returns @i + @v
9361 + */
9362 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9363 +{
9364 + int __i = i;
9365 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9366 : "+r" (i), "+m" (v->counter)
9367 : : "memory");
9368 return i + __i;
9369 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9370 }
9371
9372 #define atomic_inc_return(v) (atomic_add_return(1, v))
9373 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9374 +{
9375 + return atomic_add_return_unchecked(1, v);
9376 +}
9377 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9378
9379 /* The 64-bit atomic type */
9380 @@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
9381 }
9382
9383 /**
9384 + * atomic64_read_unchecked - read atomic64 variable
9385 + * @v: pointer of type atomic64_unchecked_t
9386 + *
9387 + * Atomically reads the value of @v.
9388 + * Doesn't imply a read memory barrier.
9389 + */
9390 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9391 +{
9392 + return v->counter;
9393 +}
9394 +
9395 +/**
9396 * atomic64_set - set atomic64 variable
9397 * @v: pointer to type atomic64_t
9398 * @i: required value
9399 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9400 }
9401
9402 /**
9403 + * atomic64_set_unchecked - set atomic64 variable
9404 + * @v: pointer to type atomic64_unchecked_t
9405 + * @i: required value
9406 + *
9407 + * Atomically sets the value of @v to @i.
9408 + */
9409 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9410 +{
9411 + v->counter = i;
9412 +}
9413 +
9414 +/**
9415 * atomic64_add - add integer to atomic64 variable
9416 * @i: integer value to add
9417 * @v: pointer to type atomic64_t
9418 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9419 */
9420 static inline void atomic64_add(long i, atomic64_t *v)
9421 {
9422 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9423 +
9424 +#ifdef CONFIG_PAX_REFCOUNT
9425 + "jno 0f\n"
9426 + LOCK_PREFIX "subq %1,%0\n"
9427 + "int $4\n0:\n"
9428 + _ASM_EXTABLE(0b, 0b)
9429 +#endif
9430 +
9431 + : "=m" (v->counter)
9432 + : "er" (i), "m" (v->counter));
9433 +}
9434 +
9435 +/**
9436 + * atomic64_add_unchecked - add integer to atomic64 variable
9437 + * @i: integer value to add
9438 + * @v: pointer to type atomic64_unchecked_t
9439 + *
9440 + * Atomically adds @i to @v.
9441 + */
9442 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9443 +{
9444 asm volatile(LOCK_PREFIX "addq %1,%0"
9445 : "=m" (v->counter)
9446 : "er" (i), "m" (v->counter));
9447 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
9448 */
9449 static inline void atomic64_sub(long i, atomic64_t *v)
9450 {
9451 - asm volatile(LOCK_PREFIX "subq %1,%0"
9452 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9453 +
9454 +#ifdef CONFIG_PAX_REFCOUNT
9455 + "jno 0f\n"
9456 + LOCK_PREFIX "addq %1,%0\n"
9457 + "int $4\n0:\n"
9458 + _ASM_EXTABLE(0b, 0b)
9459 +#endif
9460 +
9461 : "=m" (v->counter)
9462 : "er" (i), "m" (v->counter));
9463 }
9464 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9465 {
9466 unsigned char c;
9467
9468 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9469 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9470 +
9471 +#ifdef CONFIG_PAX_REFCOUNT
9472 + "jno 0f\n"
9473 + LOCK_PREFIX "addq %2,%0\n"
9474 + "int $4\n0:\n"
9475 + _ASM_EXTABLE(0b, 0b)
9476 +#endif
9477 +
9478 + "sete %1\n"
9479 : "=m" (v->counter), "=qm" (c)
9480 : "er" (i), "m" (v->counter) : "memory");
9481 return c;
9482 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9483 */
9484 static inline void atomic64_inc(atomic64_t *v)
9485 {
9486 + asm volatile(LOCK_PREFIX "incq %0\n"
9487 +
9488 +#ifdef CONFIG_PAX_REFCOUNT
9489 + "jno 0f\n"
9490 + LOCK_PREFIX "decq %0\n"
9491 + "int $4\n0:\n"
9492 + _ASM_EXTABLE(0b, 0b)
9493 +#endif
9494 +
9495 + : "=m" (v->counter)
9496 + : "m" (v->counter));
9497 +}
9498 +
9499 +/**
9500 + * atomic64_inc_unchecked - increment atomic64 variable
9501 + * @v: pointer to type atomic64_unchecked_t
9502 + *
9503 + * Atomically increments @v by 1.
9504 + */
9505 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9506 +{
9507 asm volatile(LOCK_PREFIX "incq %0"
9508 : "=m" (v->counter)
9509 : "m" (v->counter));
9510 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
9511 */
9512 static inline void atomic64_dec(atomic64_t *v)
9513 {
9514 - asm volatile(LOCK_PREFIX "decq %0"
9515 + asm volatile(LOCK_PREFIX "decq %0\n"
9516 +
9517 +#ifdef CONFIG_PAX_REFCOUNT
9518 + "jno 0f\n"
9519 + LOCK_PREFIX "incq %0\n"
9520 + "int $4\n0:\n"
9521 + _ASM_EXTABLE(0b, 0b)
9522 +#endif
9523 +
9524 + : "=m" (v->counter)
9525 + : "m" (v->counter));
9526 +}
9527 +
9528 +/**
9529 + * atomic64_dec_unchecked - decrement atomic64 variable
9530 + * @v: pointer to type atomic64_t
9531 + *
9532 + * Atomically decrements @v by 1.
9533 + */
9534 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9535 +{
9536 + asm volatile(LOCK_PREFIX "decq %0\n"
9537 : "=m" (v->counter)
9538 : "m" (v->counter));
9539 }
9540 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9541 {
9542 unsigned char c;
9543
9544 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9545 + asm volatile(LOCK_PREFIX "decq %0\n"
9546 +
9547 +#ifdef CONFIG_PAX_REFCOUNT
9548 + "jno 0f\n"
9549 + LOCK_PREFIX "incq %0\n"
9550 + "int $4\n0:\n"
9551 + _ASM_EXTABLE(0b, 0b)
9552 +#endif
9553 +
9554 + "sete %1\n"
9555 : "=m" (v->counter), "=qm" (c)
9556 : "m" (v->counter) : "memory");
9557 return c != 0;
9558 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9559 {
9560 unsigned char c;
9561
9562 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9563 + asm volatile(LOCK_PREFIX "incq %0\n"
9564 +
9565 +#ifdef CONFIG_PAX_REFCOUNT
9566 + "jno 0f\n"
9567 + LOCK_PREFIX "decq %0\n"
9568 + "int $4\n0:\n"
9569 + _ASM_EXTABLE(0b, 0b)
9570 +#endif
9571 +
9572 + "sete %1\n"
9573 : "=m" (v->counter), "=qm" (c)
9574 : "m" (v->counter) : "memory");
9575 return c != 0;
9576 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9577 {
9578 unsigned char c;
9579
9580 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9581 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9582 +
9583 +#ifdef CONFIG_PAX_REFCOUNT
9584 + "jno 0f\n"
9585 + LOCK_PREFIX "subq %2,%0\n"
9586 + "int $4\n0:\n"
9587 + _ASM_EXTABLE(0b, 0b)
9588 +#endif
9589 +
9590 + "sets %1\n"
9591 : "=m" (v->counter), "=qm" (c)
9592 : "er" (i), "m" (v->counter) : "memory");
9593 return c;
9594 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9595 static inline long atomic64_add_return(long i, atomic64_t *v)
9596 {
9597 long __i = i;
9598 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
9599 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
9600 +
9601 +#ifdef CONFIG_PAX_REFCOUNT
9602 + "jno 0f\n"
9603 + "movq %0, %1\n"
9604 + "int $4\n0:\n"
9605 + _ASM_EXTABLE(0b, 0b)
9606 +#endif
9607 +
9608 + : "+r" (i), "+m" (v->counter)
9609 + : : "memory");
9610 + return i + __i;
9611 +}
9612 +
9613 +/**
9614 + * atomic64_add_return_unchecked - add and return
9615 + * @i: integer value to add
9616 + * @v: pointer to type atomic64_unchecked_t
9617 + *
9618 + * Atomically adds @i to @v and returns @i + @v
9619 + */
9620 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9621 +{
9622 + long __i = i;
9623 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
9624 : "+r" (i), "+m" (v->counter)
9625 : : "memory");
9626 return i + __i;
9627 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9628 }
9629
9630 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9631 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9632 +{
9633 + return atomic64_add_return_unchecked(1, v);
9634 +}
9635 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9636
9637 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9638 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9639 return cmpxchg(&v->counter, old, new);
9640 }
9641
9642 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9643 +{
9644 + return cmpxchg(&v->counter, old, new);
9645 +}
9646 +
9647 static inline long atomic64_xchg(atomic64_t *v, long new)
9648 {
9649 return xchg(&v->counter, new);
9650 }
9651
9652 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9653 +{
9654 + return xchg(&v->counter, new);
9655 +}
9656 +
9657 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
9658 {
9659 return cmpxchg(&v->counter, old, new);
9660 }
9661
9662 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9663 +{
9664 + return cmpxchg(&v->counter, old, new);
9665 +}
9666 +
9667 static inline long atomic_xchg(atomic_t *v, int new)
9668 {
9669 return xchg(&v->counter, new);
9670 }
9671
9672 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9673 +{
9674 + return xchg(&v->counter, new);
9675 +}
9676 +
9677 /**
9678 * atomic_add_unless - add unless the number is a given value
9679 * @v: pointer of type atomic_t
9680 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
9681 */
9682 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9683 {
9684 - int c, old;
9685 + int c, old, new;
9686 c = atomic_read(v);
9687 for (;;) {
9688 - if (unlikely(c == (u)))
9689 + if (unlikely(c == u))
9690 break;
9691 - old = atomic_cmpxchg((v), c, c + (a));
9692 +
9693 + asm volatile("addl %2,%0\n"
9694 +
9695 +#ifdef CONFIG_PAX_REFCOUNT
9696 + "jno 0f\n"
9697 + "subl %2,%0\n"
9698 + "int $4\n0:\n"
9699 + _ASM_EXTABLE(0b, 0b)
9700 +#endif
9701 +
9702 + : "=r" (new)
9703 + : "0" (c), "ir" (a));
9704 +
9705 + old = atomic_cmpxchg(v, c, new);
9706 if (likely(old == c))
9707 break;
9708 c = old;
9709 }
9710 - return c != (u);
9711 + return c != u;
9712 }
9713
9714 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9715 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
9716 */
9717 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9718 {
9719 - long c, old;
9720 + long c, old, new;
9721 c = atomic64_read(v);
9722 for (;;) {
9723 - if (unlikely(c == (u)))
9724 + if (unlikely(c == u))
9725 break;
9726 - old = atomic64_cmpxchg((v), c, c + (a));
9727 +
9728 + asm volatile("addq %2,%0\n"
9729 +
9730 +#ifdef CONFIG_PAX_REFCOUNT
9731 + "jno 0f\n"
9732 + "subq %2,%0\n"
9733 + "int $4\n0:\n"
9734 + _ASM_EXTABLE(0b, 0b)
9735 +#endif
9736 +
9737 + : "=r" (new)
9738 + : "0" (c), "er" (a));
9739 +
9740 + old = atomic64_cmpxchg(v, c, new);
9741 if (likely(old == c))
9742 break;
9743 c = old;
9744 }
9745 - return c != (u);
9746 + return c != u;
9747 }
9748
9749 /**
9750 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9751 index 02b47a6..d5c4b15 100644
9752 --- a/arch/x86/include/asm/bitops.h
9753 +++ b/arch/x86/include/asm/bitops.h
9754 @@ -38,7 +38,7 @@
9755 * a mask operation on a byte.
9756 */
9757 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9758 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9759 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9760 #define CONST_MASK(nr) (1 << ((nr) & 7))
9761
9762 /**
9763 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9764 index 7a10659..8bbf355 100644
9765 --- a/arch/x86/include/asm/boot.h
9766 +++ b/arch/x86/include/asm/boot.h
9767 @@ -11,10 +11,15 @@
9768 #include <asm/pgtable_types.h>
9769
9770 /* Physical address where kernel should be loaded. */
9771 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9772 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9773 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9774 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9775
9776 +#ifndef __ASSEMBLY__
9777 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
9778 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9779 +#endif
9780 +
9781 /* Minimum kernel alignment, as a power of two */
9782 #ifdef CONFIG_X86_64
9783 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9784 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9785 index 549860d..7d45f68 100644
9786 --- a/arch/x86/include/asm/cache.h
9787 +++ b/arch/x86/include/asm/cache.h
9788 @@ -5,9 +5,10 @@
9789
9790 /* L1 cache line size */
9791 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9792 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9793 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9794
9795 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
9796 +#define __read_only __attribute__((__section__(".data.read_only")))
9797
9798 #ifdef CONFIG_X86_VSMP
9799 /* vSMP Internode cacheline shift */
9800 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9801 index b54f6af..5b376a6 100644
9802 --- a/arch/x86/include/asm/cacheflush.h
9803 +++ b/arch/x86/include/asm/cacheflush.h
9804 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
9805 static inline unsigned long get_page_memtype(struct page *pg)
9806 {
9807 if (!PageUncached(pg) && !PageWC(pg))
9808 - return -1;
9809 + return ~0UL;
9810 else if (!PageUncached(pg) && PageWC(pg))
9811 return _PAGE_CACHE_WC;
9812 else if (PageUncached(pg) && !PageWC(pg))
9813 @@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
9814 SetPageWC(pg);
9815 break;
9816 default:
9817 - case -1:
9818 + case ~0UL:
9819 ClearPageUncached(pg);
9820 ClearPageWC(pg);
9821 break;
9822 diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
9823 index 0e63c9a..ab8d972 100644
9824 --- a/arch/x86/include/asm/calling.h
9825 +++ b/arch/x86/include/asm/calling.h
9826 @@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
9827 * for assembly code:
9828 */
9829
9830 -#define R15 0
9831 -#define R14 8
9832 -#define R13 16
9833 -#define R12 24
9834 -#define RBP 32
9835 -#define RBX 40
9836 +#define R15 (0)
9837 +#define R14 (8)
9838 +#define R13 (16)
9839 +#define R12 (24)
9840 +#define RBP (32)
9841 +#define RBX (40)
9842
9843 /* arguments: interrupts/non tracing syscalls only save up to here: */
9844 -#define R11 48
9845 -#define R10 56
9846 -#define R9 64
9847 -#define R8 72
9848 -#define RAX 80
9849 -#define RCX 88
9850 -#define RDX 96
9851 -#define RSI 104
9852 -#define RDI 112
9853 -#define ORIG_RAX 120 /* + error_code */
9854 +#define R11 (48)
9855 +#define R10 (56)
9856 +#define R9 (64)
9857 +#define R8 (72)
9858 +#define RAX (80)
9859 +#define RCX (88)
9860 +#define RDX (96)
9861 +#define RSI (104)
9862 +#define RDI (112)
9863 +#define ORIG_RAX (120) /* + error_code */
9864 /* end of arguments */
9865
9866 /* cpu exception frame or undefined in case of fast syscall: */
9867 -#define RIP 128
9868 -#define CS 136
9869 -#define EFLAGS 144
9870 -#define RSP 152
9871 -#define SS 160
9872 +#define RIP (128)
9873 +#define CS (136)
9874 +#define EFLAGS (144)
9875 +#define RSP (152)
9876 +#define SS (160)
9877
9878 #define ARGOFFSET R11
9879 #define SWFRAME ORIG_RAX
9880 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9881 index 46fc474..b02b0f9 100644
9882 --- a/arch/x86/include/asm/checksum_32.h
9883 +++ b/arch/x86/include/asm/checksum_32.h
9884 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9885 int len, __wsum sum,
9886 int *src_err_ptr, int *dst_err_ptr);
9887
9888 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9889 + int len, __wsum sum,
9890 + int *src_err_ptr, int *dst_err_ptr);
9891 +
9892 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9893 + int len, __wsum sum,
9894 + int *src_err_ptr, int *dst_err_ptr);
9895 +
9896 /*
9897 * Note: when you get a NULL pointer exception here this means someone
9898 * passed in an incorrect kernel address to one of these functions.
9899 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9900 int *err_ptr)
9901 {
9902 might_sleep();
9903 - return csum_partial_copy_generic((__force void *)src, dst,
9904 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
9905 len, sum, err_ptr, NULL);
9906 }
9907
9908 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9909 {
9910 might_sleep();
9911 if (access_ok(VERIFY_WRITE, dst, len))
9912 - return csum_partial_copy_generic(src, (__force void *)dst,
9913 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9914 len, sum, NULL, err_ptr);
9915
9916 if (len)
9917 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9918 index 617bd56..7b047a1 100644
9919 --- a/arch/x86/include/asm/desc.h
9920 +++ b/arch/x86/include/asm/desc.h
9921 @@ -4,6 +4,7 @@
9922 #include <asm/desc_defs.h>
9923 #include <asm/ldt.h>
9924 #include <asm/mmu.h>
9925 +#include <asm/pgtable.h>
9926 #include <linux/smp.h>
9927
9928 static inline void fill_ldt(struct desc_struct *desc,
9929 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
9930 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
9931 desc->type = (info->read_exec_only ^ 1) << 1;
9932 desc->type |= info->contents << 2;
9933 + desc->type |= info->seg_not_present ^ 1;
9934 desc->s = 1;
9935 desc->dpl = 0x3;
9936 desc->p = info->seg_not_present ^ 1;
9937 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
9938 }
9939
9940 extern struct desc_ptr idt_descr;
9941 -extern gate_desc idt_table[];
9942 -
9943 -struct gdt_page {
9944 - struct desc_struct gdt[GDT_ENTRIES];
9945 -} __attribute__((aligned(PAGE_SIZE)));
9946 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9947 +extern gate_desc idt_table[256];
9948
9949 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9950 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9951 {
9952 - return per_cpu(gdt_page, cpu).gdt;
9953 + return cpu_gdt_table[cpu];
9954 }
9955
9956 #ifdef CONFIG_X86_64
9957 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9958 unsigned long base, unsigned dpl, unsigned flags,
9959 unsigned short seg)
9960 {
9961 - gate->a = (seg << 16) | (base & 0xffff);
9962 - gate->b = (base & 0xffff0000) |
9963 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9964 + gate->gate.offset_low = base;
9965 + gate->gate.seg = seg;
9966 + gate->gate.reserved = 0;
9967 + gate->gate.type = type;
9968 + gate->gate.s = 0;
9969 + gate->gate.dpl = dpl;
9970 + gate->gate.p = 1;
9971 + gate->gate.offset_high = base >> 16;
9972 }
9973
9974 #endif
9975 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9976 static inline void native_write_idt_entry(gate_desc *idt, int entry,
9977 const gate_desc *gate)
9978 {
9979 + pax_open_kernel();
9980 memcpy(&idt[entry], gate, sizeof(*gate));
9981 + pax_close_kernel();
9982 }
9983
9984 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
9985 const void *desc)
9986 {
9987 + pax_open_kernel();
9988 memcpy(&ldt[entry], desc, 8);
9989 + pax_close_kernel();
9990 }
9991
9992 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9993 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9994 size = sizeof(struct desc_struct);
9995 break;
9996 }
9997 +
9998 + pax_open_kernel();
9999 memcpy(&gdt[entry], desc, size);
10000 + pax_close_kernel();
10001 }
10002
10003 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10004 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10005
10006 static inline void native_load_tr_desc(void)
10007 {
10008 + pax_open_kernel();
10009 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10010 + pax_close_kernel();
10011 }
10012
10013 static inline void native_load_gdt(const struct desc_ptr *dtr)
10014 @@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10015 unsigned int i;
10016 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10017
10018 + pax_open_kernel();
10019 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10020 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10021 + pax_close_kernel();
10022 }
10023
10024 #define _LDT_empty(info) \
10025 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10026 desc->limit = (limit >> 16) & 0xf;
10027 }
10028
10029 -static inline void _set_gate(int gate, unsigned type, void *addr,
10030 +static inline void _set_gate(int gate, unsigned type, const void *addr,
10031 unsigned dpl, unsigned ist, unsigned seg)
10032 {
10033 gate_desc s;
10034 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10035 * Pentium F0 0F bugfix can have resulted in the mapped
10036 * IDT being write-protected.
10037 */
10038 -static inline void set_intr_gate(unsigned int n, void *addr)
10039 +static inline void set_intr_gate(unsigned int n, const void *addr)
10040 {
10041 BUG_ON((unsigned)n > 0xFF);
10042 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10043 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10044 /*
10045 * This routine sets up an interrupt gate at directory privilege level 3.
10046 */
10047 -static inline void set_system_intr_gate(unsigned int n, void *addr)
10048 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
10049 {
10050 BUG_ON((unsigned)n > 0xFF);
10051 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10052 }
10053
10054 -static inline void set_system_trap_gate(unsigned int n, void *addr)
10055 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
10056 {
10057 BUG_ON((unsigned)n > 0xFF);
10058 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10059 }
10060
10061 -static inline void set_trap_gate(unsigned int n, void *addr)
10062 +static inline void set_trap_gate(unsigned int n, const void *addr)
10063 {
10064 BUG_ON((unsigned)n > 0xFF);
10065 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10066 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10067 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10068 {
10069 BUG_ON((unsigned)n > 0xFF);
10070 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10071 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10072 }
10073
10074 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10075 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10076 {
10077 BUG_ON((unsigned)n > 0xFF);
10078 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10079 }
10080
10081 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10082 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10083 {
10084 BUG_ON((unsigned)n > 0xFF);
10085 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10086 }
10087
10088 +#ifdef CONFIG_X86_32
10089 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10090 +{
10091 + struct desc_struct d;
10092 +
10093 + if (likely(limit))
10094 + limit = (limit - 1UL) >> PAGE_SHIFT;
10095 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
10096 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10097 +}
10098 +#endif
10099 +
10100 #endif /* _ASM_X86_DESC_H */
10101 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10102 index 9d66848..6b4a691 100644
10103 --- a/arch/x86/include/asm/desc_defs.h
10104 +++ b/arch/x86/include/asm/desc_defs.h
10105 @@ -31,6 +31,12 @@ struct desc_struct {
10106 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10107 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10108 };
10109 + struct {
10110 + u16 offset_low;
10111 + u16 seg;
10112 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10113 + unsigned offset_high: 16;
10114 + } gate;
10115 };
10116 } __attribute__((packed));
10117
10118 diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
10119 index cee34e9..a7c3fa2 100644
10120 --- a/arch/x86/include/asm/device.h
10121 +++ b/arch/x86/include/asm/device.h
10122 @@ -6,7 +6,7 @@ struct dev_archdata {
10123 void *acpi_handle;
10124 #endif
10125 #ifdef CONFIG_X86_64
10126 -struct dma_map_ops *dma_ops;
10127 + const struct dma_map_ops *dma_ops;
10128 #endif
10129 #ifdef CONFIG_DMAR
10130 void *iommu; /* hook for IOMMU specific extension */
10131 diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
10132 index 6a25d5d..786b202 100644
10133 --- a/arch/x86/include/asm/dma-mapping.h
10134 +++ b/arch/x86/include/asm/dma-mapping.h
10135 @@ -25,9 +25,9 @@ extern int iommu_merge;
10136 extern struct device x86_dma_fallback_dev;
10137 extern int panic_on_overflow;
10138
10139 -extern struct dma_map_ops *dma_ops;
10140 +extern const struct dma_map_ops *dma_ops;
10141
10142 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
10143 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
10144 {
10145 #ifdef CONFIG_X86_32
10146 return dma_ops;
10147 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
10148 /* Make sure we keep the same behaviour */
10149 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
10150 {
10151 - struct dma_map_ops *ops = get_dma_ops(dev);
10152 + const struct dma_map_ops *ops = get_dma_ops(dev);
10153 if (ops->mapping_error)
10154 return ops->mapping_error(dev, dma_addr);
10155
10156 @@ -122,7 +122,7 @@ static inline void *
10157 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
10158 gfp_t gfp)
10159 {
10160 - struct dma_map_ops *ops = get_dma_ops(dev);
10161 + const struct dma_map_ops *ops = get_dma_ops(dev);
10162 void *memory;
10163
10164 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
10165 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
10166 static inline void dma_free_coherent(struct device *dev, size_t size,
10167 void *vaddr, dma_addr_t bus)
10168 {
10169 - struct dma_map_ops *ops = get_dma_ops(dev);
10170 + const struct dma_map_ops *ops = get_dma_ops(dev);
10171
10172 WARN_ON(irqs_disabled()); /* for portability */
10173
10174 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10175 index 40b4e61..40d8133 100644
10176 --- a/arch/x86/include/asm/e820.h
10177 +++ b/arch/x86/include/asm/e820.h
10178 @@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
10179 #define ISA_END_ADDRESS 0x100000
10180 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
10181
10182 -#define BIOS_BEGIN 0x000a0000
10183 +#define BIOS_BEGIN 0x000c0000
10184 #define BIOS_END 0x00100000
10185
10186 #ifdef __KERNEL__
10187 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10188 index 8ac9d9a..0a6c96e 100644
10189 --- a/arch/x86/include/asm/elf.h
10190 +++ b/arch/x86/include/asm/elf.h
10191 @@ -257,7 +257,25 @@ extern int force_personality32;
10192 the loader. We need to make sure that it is out of the way of the program
10193 that it will "exec", and that there is sufficient room for the brk. */
10194
10195 +#ifdef CONFIG_PAX_SEGMEXEC
10196 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10197 +#else
10198 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10199 +#endif
10200 +
10201 +#ifdef CONFIG_PAX_ASLR
10202 +#ifdef CONFIG_X86_32
10203 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10204 +
10205 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10206 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10207 +#else
10208 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
10209 +
10210 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10211 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10212 +#endif
10213 +#endif
10214
10215 /* This yields a mask that user programs can use to figure out what
10216 instruction set this CPU supports. This could be done in user space,
10217 @@ -310,9 +328,7 @@ do { \
10218
10219 #define ARCH_DLINFO \
10220 do { \
10221 - if (vdso_enabled) \
10222 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10223 - (unsigned long)current->mm->context.vdso); \
10224 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10225 } while (0)
10226
10227 #define AT_SYSINFO 32
10228 @@ -323,7 +339,7 @@ do { \
10229
10230 #endif /* !CONFIG_X86_32 */
10231
10232 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10233 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10234
10235 #define VDSO_ENTRY \
10236 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10237 @@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
10238 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10239 #define compat_arch_setup_additional_pages syscall32_setup_pages
10240
10241 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10242 -#define arch_randomize_brk arch_randomize_brk
10243 -
10244 #endif /* _ASM_X86_ELF_H */
10245 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10246 index cc70c1c..d96d011 100644
10247 --- a/arch/x86/include/asm/emergency-restart.h
10248 +++ b/arch/x86/include/asm/emergency-restart.h
10249 @@ -15,6 +15,6 @@ enum reboot_type {
10250
10251 extern enum reboot_type reboot_type;
10252
10253 -extern void machine_emergency_restart(void);
10254 +extern void machine_emergency_restart(void) __noreturn;
10255
10256 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10257 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10258 index 1f11ce4..7caabd1 100644
10259 --- a/arch/x86/include/asm/futex.h
10260 +++ b/arch/x86/include/asm/futex.h
10261 @@ -12,16 +12,18 @@
10262 #include <asm/system.h>
10263
10264 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10265 + typecheck(u32 __user *, uaddr); \
10266 asm volatile("1:\t" insn "\n" \
10267 "2:\t.section .fixup,\"ax\"\n" \
10268 "3:\tmov\t%3, %1\n" \
10269 "\tjmp\t2b\n" \
10270 "\t.previous\n" \
10271 _ASM_EXTABLE(1b, 3b) \
10272 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10273 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10274 : "i" (-EFAULT), "0" (oparg), "1" (0))
10275
10276 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10277 + typecheck(u32 __user *, uaddr); \
10278 asm volatile("1:\tmovl %2, %0\n" \
10279 "\tmovl\t%0, %3\n" \
10280 "\t" insn "\n" \
10281 @@ -34,10 +36,10 @@
10282 _ASM_EXTABLE(1b, 4b) \
10283 _ASM_EXTABLE(2b, 4b) \
10284 : "=&a" (oldval), "=&r" (ret), \
10285 - "+m" (*uaddr), "=&r" (tem) \
10286 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10287 : "r" (oparg), "i" (-EFAULT), "1" (0))
10288
10289 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10290 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10291 {
10292 int op = (encoded_op >> 28) & 7;
10293 int cmp = (encoded_op >> 24) & 15;
10294 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10295
10296 switch (op) {
10297 case FUTEX_OP_SET:
10298 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10299 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10300 break;
10301 case FUTEX_OP_ADD:
10302 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10303 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10304 uaddr, oparg);
10305 break;
10306 case FUTEX_OP_OR:
10307 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10308 return ret;
10309 }
10310
10311 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10312 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
10313 int newval)
10314 {
10315
10316 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10317 return -ENOSYS;
10318 #endif
10319
10320 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
10321 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10322 return -EFAULT;
10323
10324 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
10325 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
10326 "2:\t.section .fixup, \"ax\"\n"
10327 "3:\tmov %2, %0\n"
10328 "\tjmp 2b\n"
10329 "\t.previous\n"
10330 _ASM_EXTABLE(1b, 3b)
10331 - : "=a" (oldval), "+m" (*uaddr)
10332 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
10333 : "i" (-EFAULT), "r" (newval), "0" (oldval)
10334 : "memory"
10335 );
10336 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10337 index ba180d9..3bad351 100644
10338 --- a/arch/x86/include/asm/hw_irq.h
10339 +++ b/arch/x86/include/asm/hw_irq.h
10340 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
10341 extern void enable_IO_APIC(void);
10342
10343 /* Statistics */
10344 -extern atomic_t irq_err_count;
10345 -extern atomic_t irq_mis_count;
10346 +extern atomic_unchecked_t irq_err_count;
10347 +extern atomic_unchecked_t irq_mis_count;
10348
10349 /* EISA */
10350 extern void eisa_set_level_irq(unsigned int irq);
10351 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10352 index 0b20bbb..4cb1396 100644
10353 --- a/arch/x86/include/asm/i387.h
10354 +++ b/arch/x86/include/asm/i387.h
10355 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10356 {
10357 int err;
10358
10359 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10360 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10361 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
10362 +#endif
10363 +
10364 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
10365 "2:\n"
10366 ".section .fixup,\"ax\"\n"
10367 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10368 {
10369 int err;
10370
10371 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10372 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10373 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10374 +#endif
10375 +
10376 asm volatile("1: rex64/fxsave (%[fx])\n\t"
10377 "2:\n"
10378 ".section .fixup,\"ax\"\n"
10379 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10380 }
10381
10382 /* We need a safe address that is cheap to find and that is already
10383 - in L1 during context switch. The best choices are unfortunately
10384 - different for UP and SMP */
10385 -#ifdef CONFIG_SMP
10386 -#define safe_address (__per_cpu_offset[0])
10387 -#else
10388 -#define safe_address (kstat_cpu(0).cpustat.user)
10389 -#endif
10390 + in L1 during context switch. */
10391 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
10392
10393 /*
10394 * These must be called with preempt disabled
10395 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
10396 struct thread_info *me = current_thread_info();
10397 preempt_disable();
10398 if (me->status & TS_USEDFPU)
10399 - __save_init_fpu(me->task);
10400 + __save_init_fpu(current);
10401 else
10402 clts();
10403 }
10404 diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
10405 index a299900..15c5410 100644
10406 --- a/arch/x86/include/asm/io_32.h
10407 +++ b/arch/x86/include/asm/io_32.h
10408 @@ -3,6 +3,7 @@
10409
10410 #include <linux/string.h>
10411 #include <linux/compiler.h>
10412 +#include <asm/processor.h>
10413
10414 /*
10415 * This file contains the definitions for the x86 IO instructions
10416 @@ -42,6 +43,17 @@
10417
10418 #ifdef __KERNEL__
10419
10420 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10421 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10422 +{
10423 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10424 +}
10425 +
10426 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10427 +{
10428 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10429 +}
10430 +
10431 #include <asm-generic/iomap.h>
10432
10433 #include <linux/vmalloc.h>
10434 diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
10435 index 2440678..c158b88 100644
10436 --- a/arch/x86/include/asm/io_64.h
10437 +++ b/arch/x86/include/asm/io_64.h
10438 @@ -140,6 +140,17 @@ __OUTS(l)
10439
10440 #include <linux/vmalloc.h>
10441
10442 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10443 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10444 +{
10445 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10446 +}
10447 +
10448 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10449 +{
10450 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10451 +}
10452 +
10453 #include <asm-generic/iomap.h>
10454
10455 void __memcpy_fromio(void *, unsigned long, unsigned);
10456 diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
10457 index fd6d21b..8b13915 100644
10458 --- a/arch/x86/include/asm/iommu.h
10459 +++ b/arch/x86/include/asm/iommu.h
10460 @@ -3,7 +3,7 @@
10461
10462 extern void pci_iommu_shutdown(void);
10463 extern void no_iommu_init(void);
10464 -extern struct dma_map_ops nommu_dma_ops;
10465 +extern const struct dma_map_ops nommu_dma_ops;
10466 extern int force_iommu, no_iommu;
10467 extern int iommu_detected;
10468 extern int iommu_pass_through;
10469 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10470 index 9e2b952..557206e 100644
10471 --- a/arch/x86/include/asm/irqflags.h
10472 +++ b/arch/x86/include/asm/irqflags.h
10473 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
10474 sti; \
10475 sysexit
10476
10477 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10478 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10479 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10480 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10481 +
10482 #else
10483 #define INTERRUPT_RETURN iret
10484 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10485 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10486 index 4fe681d..bb6d40c 100644
10487 --- a/arch/x86/include/asm/kprobes.h
10488 +++ b/arch/x86/include/asm/kprobes.h
10489 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
10490 #define BREAKPOINT_INSTRUCTION 0xcc
10491 #define RELATIVEJUMP_INSTRUCTION 0xe9
10492 #define MAX_INSN_SIZE 16
10493 -#define MAX_STACK_SIZE 64
10494 -#define MIN_STACK_SIZE(ADDR) \
10495 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10496 - THREAD_SIZE - (unsigned long)(ADDR))) \
10497 - ? (MAX_STACK_SIZE) \
10498 - : (((unsigned long)current_thread_info()) + \
10499 - THREAD_SIZE - (unsigned long)(ADDR)))
10500 +#define MAX_STACK_SIZE 64UL
10501 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10502
10503 #define flush_insn_slot(p) do { } while (0)
10504
10505 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10506 index 08bc2ff..2e88d1f 100644
10507 --- a/arch/x86/include/asm/kvm_host.h
10508 +++ b/arch/x86/include/asm/kvm_host.h
10509 @@ -534,9 +534,9 @@ struct kvm_x86_ops {
10510 bool (*gb_page_enable)(void);
10511
10512 const struct trace_print_flags *exit_reasons_str;
10513 -};
10514 +} __do_const;
10515
10516 -extern struct kvm_x86_ops *kvm_x86_ops;
10517 +extern const struct kvm_x86_ops *kvm_x86_ops;
10518
10519 int kvm_mmu_module_init(void);
10520 void kvm_mmu_module_exit(void);
10521 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10522 index 47b9b6f..815aaa1 100644
10523 --- a/arch/x86/include/asm/local.h
10524 +++ b/arch/x86/include/asm/local.h
10525 @@ -18,26 +18,58 @@ typedef struct {
10526
10527 static inline void local_inc(local_t *l)
10528 {
10529 - asm volatile(_ASM_INC "%0"
10530 + asm volatile(_ASM_INC "%0\n"
10531 +
10532 +#ifdef CONFIG_PAX_REFCOUNT
10533 + "jno 0f\n"
10534 + _ASM_DEC "%0\n"
10535 + "int $4\n0:\n"
10536 + _ASM_EXTABLE(0b, 0b)
10537 +#endif
10538 +
10539 : "+m" (l->a.counter));
10540 }
10541
10542 static inline void local_dec(local_t *l)
10543 {
10544 - asm volatile(_ASM_DEC "%0"
10545 + asm volatile(_ASM_DEC "%0\n"
10546 +
10547 +#ifdef CONFIG_PAX_REFCOUNT
10548 + "jno 0f\n"
10549 + _ASM_INC "%0\n"
10550 + "int $4\n0:\n"
10551 + _ASM_EXTABLE(0b, 0b)
10552 +#endif
10553 +
10554 : "+m" (l->a.counter));
10555 }
10556
10557 static inline void local_add(long i, local_t *l)
10558 {
10559 - asm volatile(_ASM_ADD "%1,%0"
10560 + asm volatile(_ASM_ADD "%1,%0\n"
10561 +
10562 +#ifdef CONFIG_PAX_REFCOUNT
10563 + "jno 0f\n"
10564 + _ASM_SUB "%1,%0\n"
10565 + "int $4\n0:\n"
10566 + _ASM_EXTABLE(0b, 0b)
10567 +#endif
10568 +
10569 : "+m" (l->a.counter)
10570 : "ir" (i));
10571 }
10572
10573 static inline void local_sub(long i, local_t *l)
10574 {
10575 - asm volatile(_ASM_SUB "%1,%0"
10576 + asm volatile(_ASM_SUB "%1,%0\n"
10577 +
10578 +#ifdef CONFIG_PAX_REFCOUNT
10579 + "jno 0f\n"
10580 + _ASM_ADD "%1,%0\n"
10581 + "int $4\n0:\n"
10582 + _ASM_EXTABLE(0b, 0b)
10583 +#endif
10584 +
10585 : "+m" (l->a.counter)
10586 : "ir" (i));
10587 }
10588 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10589 {
10590 unsigned char c;
10591
10592 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10593 + asm volatile(_ASM_SUB "%2,%0\n"
10594 +
10595 +#ifdef CONFIG_PAX_REFCOUNT
10596 + "jno 0f\n"
10597 + _ASM_ADD "%2,%0\n"
10598 + "int $4\n0:\n"
10599 + _ASM_EXTABLE(0b, 0b)
10600 +#endif
10601 +
10602 + "sete %1\n"
10603 : "+m" (l->a.counter), "=qm" (c)
10604 : "ir" (i) : "memory");
10605 return c;
10606 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10607 {
10608 unsigned char c;
10609
10610 - asm volatile(_ASM_DEC "%0; sete %1"
10611 + asm volatile(_ASM_DEC "%0\n"
10612 +
10613 +#ifdef CONFIG_PAX_REFCOUNT
10614 + "jno 0f\n"
10615 + _ASM_INC "%0\n"
10616 + "int $4\n0:\n"
10617 + _ASM_EXTABLE(0b, 0b)
10618 +#endif
10619 +
10620 + "sete %1\n"
10621 : "+m" (l->a.counter), "=qm" (c)
10622 : : "memory");
10623 return c != 0;
10624 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10625 {
10626 unsigned char c;
10627
10628 - asm volatile(_ASM_INC "%0; sete %1"
10629 + asm volatile(_ASM_INC "%0\n"
10630 +
10631 +#ifdef CONFIG_PAX_REFCOUNT
10632 + "jno 0f\n"
10633 + _ASM_DEC "%0\n"
10634 + "int $4\n0:\n"
10635 + _ASM_EXTABLE(0b, 0b)
10636 +#endif
10637 +
10638 + "sete %1\n"
10639 : "+m" (l->a.counter), "=qm" (c)
10640 : : "memory");
10641 return c != 0;
10642 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10643 {
10644 unsigned char c;
10645
10646 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10647 + asm volatile(_ASM_ADD "%2,%0\n"
10648 +
10649 +#ifdef CONFIG_PAX_REFCOUNT
10650 + "jno 0f\n"
10651 + _ASM_SUB "%2,%0\n"
10652 + "int $4\n0:\n"
10653 + _ASM_EXTABLE(0b, 0b)
10654 +#endif
10655 +
10656 + "sets %1\n"
10657 : "+m" (l->a.counter), "=qm" (c)
10658 : "ir" (i) : "memory");
10659 return c;
10660 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10661 #endif
10662 /* Modern 486+ processor */
10663 __i = i;
10664 - asm volatile(_ASM_XADD "%0, %1;"
10665 + asm volatile(_ASM_XADD "%0, %1\n"
10666 +
10667 +#ifdef CONFIG_PAX_REFCOUNT
10668 + "jno 0f\n"
10669 + _ASM_MOV "%0,%1\n"
10670 + "int $4\n0:\n"
10671 + _ASM_EXTABLE(0b, 0b)
10672 +#endif
10673 +
10674 : "+r" (i), "+m" (l->a.counter)
10675 : : "memory");
10676 return i + __i;
10677 diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
10678 index ef51b50..514ba37 100644
10679 --- a/arch/x86/include/asm/microcode.h
10680 +++ b/arch/x86/include/asm/microcode.h
10681 @@ -12,13 +12,13 @@ struct device;
10682 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
10683
10684 struct microcode_ops {
10685 - enum ucode_state (*request_microcode_user) (int cpu,
10686 + enum ucode_state (* const request_microcode_user) (int cpu,
10687 const void __user *buf, size_t size);
10688
10689 - enum ucode_state (*request_microcode_fw) (int cpu,
10690 + enum ucode_state (* const request_microcode_fw) (int cpu,
10691 struct device *device);
10692
10693 - void (*microcode_fini_cpu) (int cpu);
10694 + void (* const microcode_fini_cpu) (int cpu);
10695
10696 /*
10697 * The generic 'microcode_core' part guarantees that
10698 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
10699 extern struct ucode_cpu_info ucode_cpu_info[];
10700
10701 #ifdef CONFIG_MICROCODE_INTEL
10702 -extern struct microcode_ops * __init init_intel_microcode(void);
10703 +extern const struct microcode_ops * __init init_intel_microcode(void);
10704 #else
10705 -static inline struct microcode_ops * __init init_intel_microcode(void)
10706 +static inline const struct microcode_ops * __init init_intel_microcode(void)
10707 {
10708 return NULL;
10709 }
10710 #endif /* CONFIG_MICROCODE_INTEL */
10711
10712 #ifdef CONFIG_MICROCODE_AMD
10713 -extern struct microcode_ops * __init init_amd_microcode(void);
10714 +extern const struct microcode_ops * __init init_amd_microcode(void);
10715 #else
10716 -static inline struct microcode_ops * __init init_amd_microcode(void)
10717 +static inline const struct microcode_ops * __init init_amd_microcode(void)
10718 {
10719 return NULL;
10720 }
10721 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10722 index 593e51d..fa69c9a 100644
10723 --- a/arch/x86/include/asm/mman.h
10724 +++ b/arch/x86/include/asm/mman.h
10725 @@ -5,4 +5,14 @@
10726
10727 #include <asm-generic/mman.h>
10728
10729 +#ifdef __KERNEL__
10730 +#ifndef __ASSEMBLY__
10731 +#ifdef CONFIG_X86_32
10732 +#define arch_mmap_check i386_mmap_check
10733 +int i386_mmap_check(unsigned long addr, unsigned long len,
10734 + unsigned long flags);
10735 +#endif
10736 +#endif
10737 +#endif
10738 +
10739 #endif /* _ASM_X86_MMAN_H */
10740 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10741 index 80a1dee..239c67d 100644
10742 --- a/arch/x86/include/asm/mmu.h
10743 +++ b/arch/x86/include/asm/mmu.h
10744 @@ -9,10 +9,23 @@
10745 * we put the segment information here.
10746 */
10747 typedef struct {
10748 - void *ldt;
10749 + struct desc_struct *ldt;
10750 int size;
10751 struct mutex lock;
10752 - void *vdso;
10753 + unsigned long vdso;
10754 +
10755 +#ifdef CONFIG_X86_32
10756 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10757 + unsigned long user_cs_base;
10758 + unsigned long user_cs_limit;
10759 +
10760 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10761 + cpumask_t cpu_user_cs_mask;
10762 +#endif
10763 +
10764 +#endif
10765 +#endif
10766 +
10767 } mm_context_t;
10768
10769 #ifdef CONFIG_SMP
10770 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10771 index 8b5393e..8143173 100644
10772 --- a/arch/x86/include/asm/mmu_context.h
10773 +++ b/arch/x86/include/asm/mmu_context.h
10774 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10775
10776 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10777 {
10778 +
10779 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10780 + unsigned int i;
10781 + pgd_t *pgd;
10782 +
10783 + pax_open_kernel();
10784 + pgd = get_cpu_pgd(smp_processor_id());
10785 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10786 + set_pgd_batched(pgd+i, native_make_pgd(0));
10787 + pax_close_kernel();
10788 +#endif
10789 +
10790 #ifdef CONFIG_SMP
10791 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10792 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10793 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10794 struct task_struct *tsk)
10795 {
10796 unsigned cpu = smp_processor_id();
10797 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
10798 + int tlbstate = TLBSTATE_OK;
10799 +#endif
10800
10801 if (likely(prev != next)) {
10802 #ifdef CONFIG_SMP
10803 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10804 + tlbstate = percpu_read(cpu_tlbstate.state);
10805 +#endif
10806 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10807 percpu_write(cpu_tlbstate.active_mm, next);
10808 #endif
10809 cpumask_set_cpu(cpu, mm_cpumask(next));
10810
10811 /* Re-load page tables */
10812 +#ifdef CONFIG_PAX_PER_CPU_PGD
10813 + pax_open_kernel();
10814 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10815 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10816 + pax_close_kernel();
10817 + load_cr3(get_cpu_pgd(cpu));
10818 +#else
10819 load_cr3(next->pgd);
10820 +#endif
10821
10822 /* stop flush ipis for the previous mm */
10823 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10824 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10825 */
10826 if (unlikely(prev->context.ldt != next->context.ldt))
10827 load_LDT_nolock(&next->context);
10828 - }
10829 +
10830 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10831 + if (!nx_enabled) {
10832 + smp_mb__before_clear_bit();
10833 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10834 + smp_mb__after_clear_bit();
10835 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10836 + }
10837 +#endif
10838 +
10839 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10840 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10841 + prev->context.user_cs_limit != next->context.user_cs_limit))
10842 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10843 #ifdef CONFIG_SMP
10844 + else if (unlikely(tlbstate != TLBSTATE_OK))
10845 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10846 +#endif
10847 +#endif
10848 +
10849 + }
10850 else {
10851 +
10852 +#ifdef CONFIG_PAX_PER_CPU_PGD
10853 + pax_open_kernel();
10854 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10855 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10856 + pax_close_kernel();
10857 + load_cr3(get_cpu_pgd(cpu));
10858 +#endif
10859 +
10860 +#ifdef CONFIG_SMP
10861 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10862 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10863
10864 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10865 * tlb flush IPI delivery. We must reload CR3
10866 * to make sure to use no freed page tables.
10867 */
10868 +
10869 +#ifndef CONFIG_PAX_PER_CPU_PGD
10870 load_cr3(next->pgd);
10871 +#endif
10872 +
10873 load_LDT_nolock(&next->context);
10874 +
10875 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10876 + if (!nx_enabled)
10877 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10878 +#endif
10879 +
10880 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10881 +#ifdef CONFIG_PAX_PAGEEXEC
10882 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
10883 +#endif
10884 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10885 +#endif
10886 +
10887 }
10888 +#endif
10889 }
10890 -#endif
10891 }
10892
10893 #define activate_mm(prev, next) \
10894 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10895 index 3e2ce58..caaf478 100644
10896 --- a/arch/x86/include/asm/module.h
10897 +++ b/arch/x86/include/asm/module.h
10898 @@ -5,6 +5,7 @@
10899
10900 #ifdef CONFIG_X86_64
10901 /* X86_64 does not define MODULE_PROC_FAMILY */
10902 +#define MODULE_PROC_FAMILY ""
10903 #elif defined CONFIG_M386
10904 #define MODULE_PROC_FAMILY "386 "
10905 #elif defined CONFIG_M486
10906 @@ -59,13 +60,26 @@
10907 #error unknown processor family
10908 #endif
10909
10910 -#ifdef CONFIG_X86_32
10911 -# ifdef CONFIG_4KSTACKS
10912 -# define MODULE_STACKSIZE "4KSTACKS "
10913 -# else
10914 -# define MODULE_STACKSIZE ""
10915 -# endif
10916 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
10917 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
10918 +#define MODULE_STACKSIZE "4KSTACKS "
10919 +#else
10920 +#define MODULE_STACKSIZE ""
10921 #endif
10922
10923 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10924 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10925 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10926 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10927 +#else
10928 +#define MODULE_PAX_KERNEXEC ""
10929 +#endif
10930 +
10931 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10932 +#define MODULE_PAX_UDEREF "UDEREF "
10933 +#else
10934 +#define MODULE_PAX_UDEREF ""
10935 +#endif
10936 +
10937 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10938 +
10939 #endif /* _ASM_X86_MODULE_H */
10940 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10941 index 7639dbf..e08a58c 100644
10942 --- a/arch/x86/include/asm/page_64_types.h
10943 +++ b/arch/x86/include/asm/page_64_types.h
10944 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10945
10946 /* duplicated to the one in bootmem.h */
10947 extern unsigned long max_pfn;
10948 -extern unsigned long phys_base;
10949 +extern const unsigned long phys_base;
10950
10951 extern unsigned long __phys_addr(unsigned long);
10952 #define __phys_reloc_hide(x) (x)
10953 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10954 index efb3899..ef30687 100644
10955 --- a/arch/x86/include/asm/paravirt.h
10956 +++ b/arch/x86/include/asm/paravirt.h
10957 @@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10958 val);
10959 }
10960
10961 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10962 +{
10963 + pgdval_t val = native_pgd_val(pgd);
10964 +
10965 + if (sizeof(pgdval_t) > sizeof(long))
10966 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10967 + val, (u64)val >> 32);
10968 + else
10969 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10970 + val);
10971 +}
10972 +
10973 static inline void pgd_clear(pgd_t *pgdp)
10974 {
10975 set_pgd(pgdp, __pgd(0));
10976 @@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10977 pv_mmu_ops.set_fixmap(idx, phys, flags);
10978 }
10979
10980 +#ifdef CONFIG_PAX_KERNEXEC
10981 +static inline unsigned long pax_open_kernel(void)
10982 +{
10983 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10984 +}
10985 +
10986 +static inline unsigned long pax_close_kernel(void)
10987 +{
10988 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10989 +}
10990 +#else
10991 +static inline unsigned long pax_open_kernel(void) { return 0; }
10992 +static inline unsigned long pax_close_kernel(void) { return 0; }
10993 +#endif
10994 +
10995 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10996
10997 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
10998 @@ -945,7 +972,7 @@ extern void default_banner(void);
10999
11000 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11001 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11002 -#define PARA_INDIRECT(addr) *%cs:addr
11003 +#define PARA_INDIRECT(addr) *%ss:addr
11004 #endif
11005
11006 #define INTERRUPT_RETURN \
11007 @@ -1022,6 +1049,21 @@ extern void default_banner(void);
11008 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11009 CLBR_NONE, \
11010 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11011 +
11012 +#define GET_CR0_INTO_RDI \
11013 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11014 + mov %rax,%rdi
11015 +
11016 +#define SET_RDI_INTO_CR0 \
11017 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11018 +
11019 +#define GET_CR3_INTO_RDI \
11020 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11021 + mov %rax,%rdi
11022 +
11023 +#define SET_RDI_INTO_CR3 \
11024 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11025 +
11026 #endif /* CONFIG_X86_32 */
11027
11028 #endif /* __ASSEMBLY__ */
11029 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11030 index 9357473..aeb2de5 100644
11031 --- a/arch/x86/include/asm/paravirt_types.h
11032 +++ b/arch/x86/include/asm/paravirt_types.h
11033 @@ -78,19 +78,19 @@ struct pv_init_ops {
11034 */
11035 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11036 unsigned long addr, unsigned len);
11037 -};
11038 +} __no_const;
11039
11040
11041 struct pv_lazy_ops {
11042 /* Set deferred update mode, used for batching operations. */
11043 void (*enter)(void);
11044 void (*leave)(void);
11045 -};
11046 +} __no_const;
11047
11048 struct pv_time_ops {
11049 unsigned long long (*sched_clock)(void);
11050 unsigned long (*get_tsc_khz)(void);
11051 -};
11052 +} __no_const;
11053
11054 struct pv_cpu_ops {
11055 /* hooks for various privileged instructions */
11056 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
11057
11058 void (*start_context_switch)(struct task_struct *prev);
11059 void (*end_context_switch)(struct task_struct *next);
11060 -};
11061 +} __no_const;
11062
11063 struct pv_irq_ops {
11064 /*
11065 @@ -217,7 +217,7 @@ struct pv_apic_ops {
11066 unsigned long start_eip,
11067 unsigned long start_esp);
11068 #endif
11069 -};
11070 +} __no_const;
11071
11072 struct pv_mmu_ops {
11073 unsigned long (*read_cr2)(void);
11074 @@ -301,6 +301,7 @@ struct pv_mmu_ops {
11075 struct paravirt_callee_save make_pud;
11076
11077 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11078 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11079 #endif /* PAGETABLE_LEVELS == 4 */
11080 #endif /* PAGETABLE_LEVELS >= 3 */
11081
11082 @@ -316,6 +317,12 @@ struct pv_mmu_ops {
11083 an mfn. We can tell which is which from the index. */
11084 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11085 phys_addr_t phys, pgprot_t flags);
11086 +
11087 +#ifdef CONFIG_PAX_KERNEXEC
11088 + unsigned long (*pax_open_kernel)(void);
11089 + unsigned long (*pax_close_kernel)(void);
11090 +#endif
11091 +
11092 };
11093
11094 struct raw_spinlock;
11095 @@ -326,7 +333,7 @@ struct pv_lock_ops {
11096 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
11097 int (*spin_trylock)(struct raw_spinlock *lock);
11098 void (*spin_unlock)(struct raw_spinlock *lock);
11099 -};
11100 +} __no_const;
11101
11102 /* This contains all the paravirt structures: we get a convenient
11103 * number for each function using the offset which we use to indicate
11104 diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
11105 index b399988..3f47c38 100644
11106 --- a/arch/x86/include/asm/pci_x86.h
11107 +++ b/arch/x86/include/asm/pci_x86.h
11108 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
11109 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
11110
11111 struct pci_raw_ops {
11112 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
11113 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
11114 int reg, int len, u32 *val);
11115 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
11116 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
11117 int reg, int len, u32 val);
11118 };
11119
11120 -extern struct pci_raw_ops *raw_pci_ops;
11121 -extern struct pci_raw_ops *raw_pci_ext_ops;
11122 +extern const struct pci_raw_ops *raw_pci_ops;
11123 +extern const struct pci_raw_ops *raw_pci_ext_ops;
11124
11125 -extern struct pci_raw_ops pci_direct_conf1;
11126 +extern const struct pci_raw_ops pci_direct_conf1;
11127 extern bool port_cf9_safe;
11128
11129 /* arch_initcall level */
11130 diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
11131 index b65a36d..50345a4 100644
11132 --- a/arch/x86/include/asm/percpu.h
11133 +++ b/arch/x86/include/asm/percpu.h
11134 @@ -78,6 +78,7 @@ do { \
11135 if (0) { \
11136 T__ tmp__; \
11137 tmp__ = (val); \
11138 + (void)tmp__; \
11139 } \
11140 switch (sizeof(var)) { \
11141 case 1: \
11142 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11143 index 271de94..ef944d6 100644
11144 --- a/arch/x86/include/asm/pgalloc.h
11145 +++ b/arch/x86/include/asm/pgalloc.h
11146 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11147 pmd_t *pmd, pte_t *pte)
11148 {
11149 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11150 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11151 +}
11152 +
11153 +static inline void pmd_populate_user(struct mm_struct *mm,
11154 + pmd_t *pmd, pte_t *pte)
11155 +{
11156 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11157 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11158 }
11159
11160 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11161 index 2334982..70bc412 100644
11162 --- a/arch/x86/include/asm/pgtable-2level.h
11163 +++ b/arch/x86/include/asm/pgtable-2level.h
11164 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11165
11166 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11167 {
11168 + pax_open_kernel();
11169 *pmdp = pmd;
11170 + pax_close_kernel();
11171 }
11172
11173 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11174 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11175 index 33927d2..ccde329 100644
11176 --- a/arch/x86/include/asm/pgtable-3level.h
11177 +++ b/arch/x86/include/asm/pgtable-3level.h
11178 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11179
11180 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11181 {
11182 + pax_open_kernel();
11183 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11184 + pax_close_kernel();
11185 }
11186
11187 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11188 {
11189 + pax_open_kernel();
11190 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11191 + pax_close_kernel();
11192 }
11193
11194 /*
11195 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11196 index af6fd36..867ff74 100644
11197 --- a/arch/x86/include/asm/pgtable.h
11198 +++ b/arch/x86/include/asm/pgtable.h
11199 @@ -39,6 +39,7 @@ extern struct list_head pgd_list;
11200
11201 #ifndef __PAGETABLE_PUD_FOLDED
11202 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11203 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11204 #define pgd_clear(pgd) native_pgd_clear(pgd)
11205 #endif
11206
11207 @@ -74,12 +75,51 @@ extern struct list_head pgd_list;
11208
11209 #define arch_end_context_switch(prev) do {} while(0)
11210
11211 +#define pax_open_kernel() native_pax_open_kernel()
11212 +#define pax_close_kernel() native_pax_close_kernel()
11213 #endif /* CONFIG_PARAVIRT */
11214
11215 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
11216 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11217 +
11218 +#ifdef CONFIG_PAX_KERNEXEC
11219 +static inline unsigned long native_pax_open_kernel(void)
11220 +{
11221 + unsigned long cr0;
11222 +
11223 + preempt_disable();
11224 + barrier();
11225 + cr0 = read_cr0() ^ X86_CR0_WP;
11226 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
11227 + write_cr0(cr0);
11228 + return cr0 ^ X86_CR0_WP;
11229 +}
11230 +
11231 +static inline unsigned long native_pax_close_kernel(void)
11232 +{
11233 + unsigned long cr0;
11234 +
11235 + cr0 = read_cr0() ^ X86_CR0_WP;
11236 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11237 + write_cr0(cr0);
11238 + barrier();
11239 + preempt_enable_no_resched();
11240 + return cr0 ^ X86_CR0_WP;
11241 +}
11242 +#else
11243 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
11244 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
11245 +#endif
11246 +
11247 /*
11248 * The following only work if pte_present() is true.
11249 * Undefined behaviour if not..
11250 */
11251 +static inline int pte_user(pte_t pte)
11252 +{
11253 + return pte_val(pte) & _PAGE_USER;
11254 +}
11255 +
11256 static inline int pte_dirty(pte_t pte)
11257 {
11258 return pte_flags(pte) & _PAGE_DIRTY;
11259 @@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11260 return pte_clear_flags(pte, _PAGE_RW);
11261 }
11262
11263 +static inline pte_t pte_mkread(pte_t pte)
11264 +{
11265 + return __pte(pte_val(pte) | _PAGE_USER);
11266 +}
11267 +
11268 static inline pte_t pte_mkexec(pte_t pte)
11269 {
11270 - return pte_clear_flags(pte, _PAGE_NX);
11271 +#ifdef CONFIG_X86_PAE
11272 + if (__supported_pte_mask & _PAGE_NX)
11273 + return pte_clear_flags(pte, _PAGE_NX);
11274 + else
11275 +#endif
11276 + return pte_set_flags(pte, _PAGE_USER);
11277 +}
11278 +
11279 +static inline pte_t pte_exprotect(pte_t pte)
11280 +{
11281 +#ifdef CONFIG_X86_PAE
11282 + if (__supported_pte_mask & _PAGE_NX)
11283 + return pte_set_flags(pte, _PAGE_NX);
11284 + else
11285 +#endif
11286 + return pte_clear_flags(pte, _PAGE_USER);
11287 }
11288
11289 static inline pte_t pte_mkdirty(pte_t pte)
11290 @@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11291 #endif
11292
11293 #ifndef __ASSEMBLY__
11294 +
11295 +#ifdef CONFIG_PAX_PER_CPU_PGD
11296 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11297 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11298 +{
11299 + return cpu_pgd[cpu];
11300 +}
11301 +#endif
11302 +
11303 #include <linux/mm_types.h>
11304
11305 static inline int pte_none(pte_t pte)
11306 @@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11307
11308 static inline int pgd_bad(pgd_t pgd)
11309 {
11310 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11311 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11312 }
11313
11314 static inline int pgd_none(pgd_t pgd)
11315 @@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
11316 * pgd_offset() returns a (pgd_t *)
11317 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11318 */
11319 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11320 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11321 +
11322 +#ifdef CONFIG_PAX_PER_CPU_PGD
11323 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11324 +#endif
11325 +
11326 /*
11327 * a shortcut which implies the use of the kernel's pgd, instead
11328 * of a process's
11329 @@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
11330 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11331 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11332
11333 +#ifdef CONFIG_X86_32
11334 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11335 +#else
11336 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11337 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11338 +
11339 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11340 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11341 +#else
11342 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11343 +#endif
11344 +
11345 +#endif
11346 +
11347 #ifndef __ASSEMBLY__
11348
11349 extern int direct_gbpages;
11350 @@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
11351 * dst and src can be on the same page, but the range must not overlap,
11352 * and must not cross a page boundary.
11353 */
11354 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11355 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11356 {
11357 - memcpy(dst, src, count * sizeof(pgd_t));
11358 + pax_open_kernel();
11359 + while (count--)
11360 + *dst++ = *src++;
11361 + pax_close_kernel();
11362 }
11363
11364 +#ifdef CONFIG_PAX_PER_CPU_PGD
11365 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11366 +#endif
11367 +
11368 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11369 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11370 +#else
11371 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11372 +#endif
11373
11374 #include <asm-generic/pgtable.h>
11375 #endif /* __ASSEMBLY__ */
11376 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11377 index 750f1bf..971e839 100644
11378 --- a/arch/x86/include/asm/pgtable_32.h
11379 +++ b/arch/x86/include/asm/pgtable_32.h
11380 @@ -26,9 +26,6 @@
11381 struct mm_struct;
11382 struct vm_area_struct;
11383
11384 -extern pgd_t swapper_pg_dir[1024];
11385 -extern pgd_t trampoline_pg_dir[1024];
11386 -
11387 static inline void pgtable_cache_init(void) { }
11388 static inline void check_pgt_cache(void) { }
11389 void paging_init(void);
11390 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11391 # include <asm/pgtable-2level.h>
11392 #endif
11393
11394 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11395 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
11396 +#ifdef CONFIG_X86_PAE
11397 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11398 +#endif
11399 +
11400 #if defined(CONFIG_HIGHPTE)
11401 #define __KM_PTE \
11402 (in_nmi() ? KM_NMI_PTE : \
11403 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11404 /* Clear a kernel PTE and flush it from the TLB */
11405 #define kpte_clear_flush(ptep, vaddr) \
11406 do { \
11407 + pax_open_kernel(); \
11408 pte_clear(&init_mm, (vaddr), (ptep)); \
11409 + pax_close_kernel(); \
11410 __flush_tlb_one((vaddr)); \
11411 } while (0)
11412
11413 @@ -85,6 +90,9 @@ do { \
11414
11415 #endif /* !__ASSEMBLY__ */
11416
11417 +#define HAVE_ARCH_UNMAPPED_AREA
11418 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11419 +
11420 /*
11421 * kern_addr_valid() is (1) for FLATMEM and (0) for
11422 * SPARSEMEM and DISCONTIGMEM
11423 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11424 index 5e67c15..12d5c47 100644
11425 --- a/arch/x86/include/asm/pgtable_32_types.h
11426 +++ b/arch/x86/include/asm/pgtable_32_types.h
11427 @@ -8,7 +8,7 @@
11428 */
11429 #ifdef CONFIG_X86_PAE
11430 # include <asm/pgtable-3level_types.h>
11431 -# define PMD_SIZE (1UL << PMD_SHIFT)
11432 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11433 # define PMD_MASK (~(PMD_SIZE - 1))
11434 #else
11435 # include <asm/pgtable-2level_types.h>
11436 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11437 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11438 #endif
11439
11440 +#ifdef CONFIG_PAX_KERNEXEC
11441 +#ifndef __ASSEMBLY__
11442 +extern unsigned char MODULES_EXEC_VADDR[];
11443 +extern unsigned char MODULES_EXEC_END[];
11444 +#endif
11445 +#include <asm/boot.h>
11446 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11447 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11448 +#else
11449 +#define ktla_ktva(addr) (addr)
11450 +#define ktva_ktla(addr) (addr)
11451 +#endif
11452 +
11453 #define MODULES_VADDR VMALLOC_START
11454 #define MODULES_END VMALLOC_END
11455 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11456 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11457 index c57a301..6b414ff 100644
11458 --- a/arch/x86/include/asm/pgtable_64.h
11459 +++ b/arch/x86/include/asm/pgtable_64.h
11460 @@ -16,10 +16,14 @@
11461
11462 extern pud_t level3_kernel_pgt[512];
11463 extern pud_t level3_ident_pgt[512];
11464 +extern pud_t level3_vmalloc_start_pgt[512];
11465 +extern pud_t level3_vmalloc_end_pgt[512];
11466 +extern pud_t level3_vmemmap_pgt[512];
11467 +extern pud_t level2_vmemmap_pgt[512];
11468 extern pmd_t level2_kernel_pgt[512];
11469 extern pmd_t level2_fixmap_pgt[512];
11470 -extern pmd_t level2_ident_pgt[512];
11471 -extern pgd_t init_level4_pgt[];
11472 +extern pmd_t level2_ident_pgt[512*2];
11473 +extern pgd_t init_level4_pgt[512];
11474
11475 #define swapper_pg_dir init_level4_pgt
11476
11477 @@ -74,7 +78,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
11478
11479 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11480 {
11481 + pax_open_kernel();
11482 *pmdp = pmd;
11483 + pax_close_kernel();
11484 }
11485
11486 static inline void native_pmd_clear(pmd_t *pmd)
11487 @@ -94,6 +100,13 @@ static inline void native_pud_clear(pud_t *pud)
11488
11489 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11490 {
11491 + pax_open_kernel();
11492 + *pgdp = pgd;
11493 + pax_close_kernel();
11494 +}
11495 +
11496 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11497 +{
11498 *pgdp = pgd;
11499 }
11500
11501 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11502 index 766ea16..5b96cb3 100644
11503 --- a/arch/x86/include/asm/pgtable_64_types.h
11504 +++ b/arch/x86/include/asm/pgtable_64_types.h
11505 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11506 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11507 #define MODULES_END _AC(0xffffffffff000000, UL)
11508 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11509 +#define MODULES_EXEC_VADDR MODULES_VADDR
11510 +#define MODULES_EXEC_END MODULES_END
11511 +
11512 +#define ktla_ktva(addr) (addr)
11513 +#define ktva_ktla(addr) (addr)
11514
11515 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11516 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11517 index d1f4a76..2f46ba1 100644
11518 --- a/arch/x86/include/asm/pgtable_types.h
11519 +++ b/arch/x86/include/asm/pgtable_types.h
11520 @@ -16,12 +16,11 @@
11521 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11522 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11523 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11524 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11525 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11526 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11527 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11528 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11529 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11530 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11531 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11532 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11533
11534 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11535 @@ -39,7 +38,6 @@
11536 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11537 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11538 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11539 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11540 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11541 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11542 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11543 @@ -55,8 +53,10 @@
11544
11545 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11546 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11547 -#else
11548 +#elif defined(CONFIG_KMEMCHECK)
11549 #define _PAGE_NX (_AT(pteval_t, 0))
11550 +#else
11551 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11552 #endif
11553
11554 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11555 @@ -93,6 +93,9 @@
11556 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11557 _PAGE_ACCESSED)
11558
11559 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11560 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11561 +
11562 #define __PAGE_KERNEL_EXEC \
11563 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11564 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11565 @@ -103,8 +106,8 @@
11566 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11567 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11568 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11569 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11570 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
11571 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11572 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
11573 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11574 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
11575 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
11576 @@ -163,8 +166,8 @@
11577 * bits are combined, this will alow user to access the high address mapped
11578 * VDSO in the presence of CONFIG_COMPAT_VDSO
11579 */
11580 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11581 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11582 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11583 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11584 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11585 #endif
11586
11587 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11588 {
11589 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11590 }
11591 +#endif
11592
11593 +#if PAGETABLE_LEVELS == 3
11594 +#include <asm-generic/pgtable-nopud.h>
11595 +#endif
11596 +
11597 +#if PAGETABLE_LEVELS == 2
11598 +#include <asm-generic/pgtable-nopmd.h>
11599 +#endif
11600 +
11601 +#ifndef __ASSEMBLY__
11602 #if PAGETABLE_LEVELS > 3
11603 typedef struct { pudval_t pud; } pud_t;
11604
11605 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11606 return pud.pud;
11607 }
11608 #else
11609 -#include <asm-generic/pgtable-nopud.h>
11610 -
11611 static inline pudval_t native_pud_val(pud_t pud)
11612 {
11613 return native_pgd_val(pud.pgd);
11614 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11615 return pmd.pmd;
11616 }
11617 #else
11618 -#include <asm-generic/pgtable-nopmd.h>
11619 -
11620 static inline pmdval_t native_pmd_val(pmd_t pmd)
11621 {
11622 return native_pgd_val(pmd.pud.pgd);
11623 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
11624
11625 extern pteval_t __supported_pte_mask;
11626 extern void set_nx(void);
11627 +
11628 +#ifdef CONFIG_X86_32
11629 +#ifdef CONFIG_X86_PAE
11630 extern int nx_enabled;
11631 +#else
11632 +#define nx_enabled (0)
11633 +#endif
11634 +#else
11635 +#define nx_enabled (1)
11636 +#endif
11637
11638 #define pgprot_writecombine pgprot_writecombine
11639 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11640 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11641 index fa04dea..5f823fc 100644
11642 --- a/arch/x86/include/asm/processor.h
11643 +++ b/arch/x86/include/asm/processor.h
11644 @@ -272,7 +272,7 @@ struct tss_struct {
11645
11646 } ____cacheline_aligned;
11647
11648 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11649 +extern struct tss_struct init_tss[NR_CPUS];
11650
11651 /*
11652 * Save the original ist values for checking stack pointers during debugging
11653 @@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
11654 */
11655 #define TASK_SIZE PAGE_OFFSET
11656 #define TASK_SIZE_MAX TASK_SIZE
11657 +
11658 +#ifdef CONFIG_PAX_SEGMEXEC
11659 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11660 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11661 +#else
11662 #define STACK_TOP TASK_SIZE
11663 -#define STACK_TOP_MAX STACK_TOP
11664 +#endif
11665 +
11666 +#define STACK_TOP_MAX TASK_SIZE
11667
11668 #define INIT_THREAD { \
11669 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11670 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11671 .vm86_info = NULL, \
11672 .sysenter_cs = __KERNEL_CS, \
11673 .io_bitmap_ptr = NULL, \
11674 @@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
11675 */
11676 #define INIT_TSS { \
11677 .x86_tss = { \
11678 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11679 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11680 .ss0 = __KERNEL_DS, \
11681 .ss1 = __KERNEL_CS, \
11682 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11683 @@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
11684 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11685
11686 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11687 -#define KSTK_TOP(info) \
11688 -({ \
11689 - unsigned long *__ptr = (unsigned long *)(info); \
11690 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11691 -})
11692 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11693
11694 /*
11695 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11696 @@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11697 #define task_pt_regs(task) \
11698 ({ \
11699 struct pt_regs *__regs__; \
11700 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11701 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11702 __regs__ - 1; \
11703 })
11704
11705 @@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11706 /*
11707 * User space process size. 47bits minus one guard page.
11708 */
11709 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11710 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11711
11712 /* This decides where the kernel will search for a free chunk of vm
11713 * space during mmap's.
11714 */
11715 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11716 - 0xc0000000 : 0xFFFFe000)
11717 + 0xc0000000 : 0xFFFFf000)
11718
11719 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11720 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11721 @@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11722 #define STACK_TOP_MAX TASK_SIZE_MAX
11723
11724 #define INIT_THREAD { \
11725 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11726 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11727 }
11728
11729 #define INIT_TSS { \
11730 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11731 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11732 }
11733
11734 /*
11735 @@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11736 */
11737 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11738
11739 +#ifdef CONFIG_PAX_SEGMEXEC
11740 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11741 +#endif
11742 +
11743 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11744
11745 /* Get/set a process' ability to use the timestamp counter instruction */
11746 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11747 index 0f0d908..f2e3da2 100644
11748 --- a/arch/x86/include/asm/ptrace.h
11749 +++ b/arch/x86/include/asm/ptrace.h
11750 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11751 }
11752
11753 /*
11754 - * user_mode_vm(regs) determines whether a register set came from user mode.
11755 + * user_mode(regs) determines whether a register set came from user mode.
11756 * This is true if V8086 mode was enabled OR if the register set was from
11757 * protected mode with RPL-3 CS value. This tricky test checks that with
11758 * one comparison. Many places in the kernel can bypass this full check
11759 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11760 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11761 + * be used.
11762 */
11763 -static inline int user_mode(struct pt_regs *regs)
11764 +static inline int user_mode_novm(struct pt_regs *regs)
11765 {
11766 #ifdef CONFIG_X86_32
11767 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11768 #else
11769 - return !!(regs->cs & 3);
11770 + return !!(regs->cs & SEGMENT_RPL_MASK);
11771 #endif
11772 }
11773
11774 -static inline int user_mode_vm(struct pt_regs *regs)
11775 +static inline int user_mode(struct pt_regs *regs)
11776 {
11777 #ifdef CONFIG_X86_32
11778 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11779 USER_RPL;
11780 #else
11781 - return user_mode(regs);
11782 + return user_mode_novm(regs);
11783 #endif
11784 }
11785
11786 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11787 index 562d4fd..6e39df1 100644
11788 --- a/arch/x86/include/asm/reboot.h
11789 +++ b/arch/x86/include/asm/reboot.h
11790 @@ -6,19 +6,19 @@
11791 struct pt_regs;
11792
11793 struct machine_ops {
11794 - void (*restart)(char *cmd);
11795 - void (*halt)(void);
11796 - void (*power_off)(void);
11797 + void (* __noreturn restart)(char *cmd);
11798 + void (* __noreturn halt)(void);
11799 + void (* __noreturn power_off)(void);
11800 void (*shutdown)(void);
11801 void (*crash_shutdown)(struct pt_regs *);
11802 - void (*emergency_restart)(void);
11803 -};
11804 + void (* __noreturn emergency_restart)(void);
11805 +} __no_const;
11806
11807 extern struct machine_ops machine_ops;
11808
11809 void native_machine_crash_shutdown(struct pt_regs *regs);
11810 void native_machine_shutdown(void);
11811 -void machine_real_restart(const unsigned char *code, int length);
11812 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
11813
11814 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
11815 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
11816 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11817 index 606ede1..dbfff37 100644
11818 --- a/arch/x86/include/asm/rwsem.h
11819 +++ b/arch/x86/include/asm/rwsem.h
11820 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11821 {
11822 asm volatile("# beginning down_read\n\t"
11823 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11824 +
11825 +#ifdef CONFIG_PAX_REFCOUNT
11826 + "jno 0f\n"
11827 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
11828 + "int $4\n0:\n"
11829 + _ASM_EXTABLE(0b, 0b)
11830 +#endif
11831 +
11832 /* adds 0x00000001, returns the old value */
11833 " jns 1f\n"
11834 " call call_rwsem_down_read_failed\n"
11835 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11836 "1:\n\t"
11837 " mov %1,%2\n\t"
11838 " add %3,%2\n\t"
11839 +
11840 +#ifdef CONFIG_PAX_REFCOUNT
11841 + "jno 0f\n"
11842 + "sub %3,%2\n"
11843 + "int $4\n0:\n"
11844 + _ASM_EXTABLE(0b, 0b)
11845 +#endif
11846 +
11847 " jle 2f\n\t"
11848 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11849 " jnz 1b\n\t"
11850 @@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11851 tmp = RWSEM_ACTIVE_WRITE_BIAS;
11852 asm volatile("# beginning down_write\n\t"
11853 LOCK_PREFIX " xadd %1,(%2)\n\t"
11854 +
11855 +#ifdef CONFIG_PAX_REFCOUNT
11856 + "jno 0f\n"
11857 + "mov %1,(%2)\n"
11858 + "int $4\n0:\n"
11859 + _ASM_EXTABLE(0b, 0b)
11860 +#endif
11861 +
11862 /* subtract 0x0000ffff, returns the old value */
11863 " test %1,%1\n\t"
11864 /* was the count 0 before? */
11865 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11866 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
11867 asm volatile("# beginning __up_read\n\t"
11868 LOCK_PREFIX " xadd %1,(%2)\n\t"
11869 +
11870 +#ifdef CONFIG_PAX_REFCOUNT
11871 + "jno 0f\n"
11872 + "mov %1,(%2)\n"
11873 + "int $4\n0:\n"
11874 + _ASM_EXTABLE(0b, 0b)
11875 +#endif
11876 +
11877 /* subtracts 1, returns the old value */
11878 " jns 1f\n\t"
11879 " call call_rwsem_wake\n"
11880 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11881 rwsem_count_t tmp;
11882 asm volatile("# beginning __up_write\n\t"
11883 LOCK_PREFIX " xadd %1,(%2)\n\t"
11884 +
11885 +#ifdef CONFIG_PAX_REFCOUNT
11886 + "jno 0f\n"
11887 + "mov %1,(%2)\n"
11888 + "int $4\n0:\n"
11889 + _ASM_EXTABLE(0b, 0b)
11890 +#endif
11891 +
11892 /* tries to transition
11893 0xffff0001 -> 0x00000000 */
11894 " jz 1f\n"
11895 @@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11896 {
11897 asm volatile("# beginning __downgrade_write\n\t"
11898 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11899 +
11900 +#ifdef CONFIG_PAX_REFCOUNT
11901 + "jno 0f\n"
11902 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11903 + "int $4\n0:\n"
11904 + _ASM_EXTABLE(0b, 0b)
11905 +#endif
11906 +
11907 /*
11908 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11909 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11910 @@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11911 static inline void rwsem_atomic_add(rwsem_count_t delta,
11912 struct rw_semaphore *sem)
11913 {
11914 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11915 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11916 +
11917 +#ifdef CONFIG_PAX_REFCOUNT
11918 + "jno 0f\n"
11919 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
11920 + "int $4\n0:\n"
11921 + _ASM_EXTABLE(0b, 0b)
11922 +#endif
11923 +
11924 : "+m" (sem->count)
11925 : "er" (delta));
11926 }
11927 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
11928 {
11929 rwsem_count_t tmp = delta;
11930
11931 - asm volatile(LOCK_PREFIX "xadd %0,%1"
11932 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
11933 +
11934 +#ifdef CONFIG_PAX_REFCOUNT
11935 + "jno 0f\n"
11936 + "mov %0,%1\n"
11937 + "int $4\n0:\n"
11938 + _ASM_EXTABLE(0b, 0b)
11939 +#endif
11940 +
11941 : "+r" (tmp), "+m" (sem->count)
11942 : : "memory");
11943
11944 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11945 index 14e0ed8..7f7dd5e 100644
11946 --- a/arch/x86/include/asm/segment.h
11947 +++ b/arch/x86/include/asm/segment.h
11948 @@ -62,10 +62,15 @@
11949 * 26 - ESPFIX small SS
11950 * 27 - per-cpu [ offset to per-cpu data area ]
11951 * 28 - stack_canary-20 [ for stack protector ]
11952 - * 29 - unused
11953 - * 30 - unused
11954 + * 29 - PCI BIOS CS
11955 + * 30 - PCI BIOS DS
11956 * 31 - TSS for double fault handler
11957 */
11958 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11959 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11960 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11961 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11962 +
11963 #define GDT_ENTRY_TLS_MIN 6
11964 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11965
11966 @@ -77,6 +82,8 @@
11967
11968 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
11969
11970 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11971 +
11972 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
11973
11974 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
11975 @@ -88,7 +95,7 @@
11976 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
11977 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
11978
11979 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11980 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11981 #ifdef CONFIG_SMP
11982 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
11983 #else
11984 @@ -102,6 +109,12 @@
11985 #define __KERNEL_STACK_CANARY 0
11986 #endif
11987
11988 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
11989 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11990 +
11991 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
11992 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11993 +
11994 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11995
11996 /*
11997 @@ -139,7 +152,7 @@
11998 */
11999
12000 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12001 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12002 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12003
12004
12005 #else
12006 @@ -163,6 +176,8 @@
12007 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
12008 #define __USER32_DS __USER_DS
12009
12010 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12011 +
12012 #define GDT_ENTRY_TSS 8 /* needs two entries */
12013 #define GDT_ENTRY_LDT 10 /* needs two entries */
12014 #define GDT_ENTRY_TLS_MIN 12
12015 @@ -183,6 +198,7 @@
12016 #endif
12017
12018 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
12019 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
12020 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
12021 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
12022 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
12023 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12024 index 4c2f63c..5685db2 100644
12025 --- a/arch/x86/include/asm/smp.h
12026 +++ b/arch/x86/include/asm/smp.h
12027 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
12028 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
12029 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12030 DECLARE_PER_CPU(u16, cpu_llc_id);
12031 -DECLARE_PER_CPU(int, cpu_number);
12032 +DECLARE_PER_CPU(unsigned int, cpu_number);
12033
12034 static inline struct cpumask *cpu_sibling_mask(int cpu)
12035 {
12036 @@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
12037 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
12038
12039 /* Static state in head.S used to set up a CPU */
12040 -extern struct {
12041 - void *sp;
12042 - unsigned short ss;
12043 -} stack_start;
12044 +extern unsigned long stack_start; /* Initial stack pointer address */
12045
12046 struct smp_ops {
12047 void (*smp_prepare_boot_cpu)(void);
12048 @@ -60,7 +57,7 @@ struct smp_ops {
12049
12050 void (*send_call_func_ipi)(const struct cpumask *mask);
12051 void (*send_call_func_single_ipi)(int cpu);
12052 -};
12053 +} __no_const;
12054
12055 /* Globals due to paravirt */
12056 extern void set_cpu_sibling_map(int cpu);
12057 @@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12058 extern int safe_smp_processor_id(void);
12059
12060 #elif defined(CONFIG_X86_64_SMP)
12061 -#define raw_smp_processor_id() (percpu_read(cpu_number))
12062 -
12063 -#define stack_smp_processor_id() \
12064 -({ \
12065 - struct thread_info *ti; \
12066 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12067 - ti->cpu; \
12068 -})
12069 +#define raw_smp_processor_id() (percpu_read(cpu_number))
12070 +#define stack_smp_processor_id() raw_smp_processor_id()
12071 #define safe_smp_processor_id() smp_processor_id()
12072
12073 #endif
12074 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12075 index 4e77853..4359783 100644
12076 --- a/arch/x86/include/asm/spinlock.h
12077 +++ b/arch/x86/include/asm/spinlock.h
12078 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
12079 static inline void __raw_read_lock(raw_rwlock_t *rw)
12080 {
12081 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
12082 +
12083 +#ifdef CONFIG_PAX_REFCOUNT
12084 + "jno 0f\n"
12085 + LOCK_PREFIX " addl $1,(%0)\n"
12086 + "int $4\n0:\n"
12087 + _ASM_EXTABLE(0b, 0b)
12088 +#endif
12089 +
12090 "jns 1f\n"
12091 "call __read_lock_failed\n\t"
12092 "1:\n"
12093 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
12094 static inline void __raw_write_lock(raw_rwlock_t *rw)
12095 {
12096 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
12097 +
12098 +#ifdef CONFIG_PAX_REFCOUNT
12099 + "jno 0f\n"
12100 + LOCK_PREFIX " addl %1,(%0)\n"
12101 + "int $4\n0:\n"
12102 + _ASM_EXTABLE(0b, 0b)
12103 +#endif
12104 +
12105 "jz 1f\n"
12106 "call __write_lock_failed\n\t"
12107 "1:\n"
12108 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
12109
12110 static inline void __raw_read_unlock(raw_rwlock_t *rw)
12111 {
12112 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
12113 + asm volatile(LOCK_PREFIX "incl %0\n"
12114 +
12115 +#ifdef CONFIG_PAX_REFCOUNT
12116 + "jno 0f\n"
12117 + LOCK_PREFIX "decl %0\n"
12118 + "int $4\n0:\n"
12119 + _ASM_EXTABLE(0b, 0b)
12120 +#endif
12121 +
12122 + :"+m" (rw->lock) : : "memory");
12123 }
12124
12125 static inline void __raw_write_unlock(raw_rwlock_t *rw)
12126 {
12127 - asm volatile(LOCK_PREFIX "addl %1, %0"
12128 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
12129 +
12130 +#ifdef CONFIG_PAX_REFCOUNT
12131 + "jno 0f\n"
12132 + LOCK_PREFIX "subl %1, %0\n"
12133 + "int $4\n0:\n"
12134 + _ASM_EXTABLE(0b, 0b)
12135 +#endif
12136 +
12137 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
12138 }
12139
12140 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12141 index 1575177..cb23f52 100644
12142 --- a/arch/x86/include/asm/stackprotector.h
12143 +++ b/arch/x86/include/asm/stackprotector.h
12144 @@ -48,7 +48,7 @@
12145 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12146 */
12147 #define GDT_STACK_CANARY_INIT \
12148 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12149 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12150
12151 /*
12152 * Initialize the stackprotector canary value.
12153 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
12154
12155 static inline void load_stack_canary_segment(void)
12156 {
12157 -#ifdef CONFIG_X86_32
12158 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12159 asm volatile ("mov %0, %%gs" : : "r" (0));
12160 #endif
12161 }
12162 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
12163 index e0fbf29..858ef4a 100644
12164 --- a/arch/x86/include/asm/system.h
12165 +++ b/arch/x86/include/asm/system.h
12166 @@ -132,7 +132,7 @@ do { \
12167 "thread_return:\n\t" \
12168 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12169 __switch_canary \
12170 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
12171 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12172 "movq %%rax,%%rdi\n\t" \
12173 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12174 "jnz ret_from_fork\n\t" \
12175 @@ -143,7 +143,7 @@ do { \
12176 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12177 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12178 [_tif_fork] "i" (_TIF_FORK), \
12179 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
12180 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
12181 [current_task] "m" (per_cpu_var(current_task)) \
12182 __switch_canary_iparam \
12183 : "memory", "cc" __EXTRA_CLOBBER)
12184 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
12185 {
12186 unsigned long __limit;
12187 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12188 - return __limit + 1;
12189 + return __limit;
12190 }
12191
12192 static inline void native_clts(void)
12193 @@ -340,12 +340,12 @@ void enable_hlt(void);
12194
12195 void cpu_idle_wait(void);
12196
12197 -extern unsigned long arch_align_stack(unsigned long sp);
12198 +#define arch_align_stack(x) ((x) & ~0xfUL)
12199 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
12200
12201 void default_idle(void);
12202
12203 -void stop_this_cpu(void *dummy);
12204 +void stop_this_cpu(void *dummy) __noreturn;
12205
12206 /*
12207 * Force strict CPU ordering.
12208 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12209 index 19c3ce4..8962535 100644
12210 --- a/arch/x86/include/asm/thread_info.h
12211 +++ b/arch/x86/include/asm/thread_info.h
12212 @@ -10,6 +10,7 @@
12213 #include <linux/compiler.h>
12214 #include <asm/page.h>
12215 #include <asm/types.h>
12216 +#include <asm/percpu.h>
12217
12218 /*
12219 * low level task data that entry.S needs immediate access to
12220 @@ -24,7 +25,6 @@ struct exec_domain;
12221 #include <asm/atomic.h>
12222
12223 struct thread_info {
12224 - struct task_struct *task; /* main task structure */
12225 struct exec_domain *exec_domain; /* execution domain */
12226 __u32 flags; /* low level flags */
12227 __u32 status; /* thread synchronous flags */
12228 @@ -34,18 +34,12 @@ struct thread_info {
12229 mm_segment_t addr_limit;
12230 struct restart_block restart_block;
12231 void __user *sysenter_return;
12232 -#ifdef CONFIG_X86_32
12233 - unsigned long previous_esp; /* ESP of the previous stack in
12234 - case of nested (IRQ) stacks
12235 - */
12236 - __u8 supervisor_stack[0];
12237 -#endif
12238 + unsigned long lowest_stack;
12239 int uaccess_err;
12240 };
12241
12242 -#define INIT_THREAD_INFO(tsk) \
12243 +#define INIT_THREAD_INFO \
12244 { \
12245 - .task = &tsk, \
12246 .exec_domain = &default_exec_domain, \
12247 .flags = 0, \
12248 .cpu = 0, \
12249 @@ -56,7 +50,7 @@ struct thread_info {
12250 }, \
12251 }
12252
12253 -#define init_thread_info (init_thread_union.thread_info)
12254 +#define init_thread_info (init_thread_union.stack)
12255 #define init_stack (init_thread_union.stack)
12256
12257 #else /* !__ASSEMBLY__ */
12258 @@ -163,45 +157,40 @@ struct thread_info {
12259 #define alloc_thread_info(tsk) \
12260 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
12261
12262 -#ifdef CONFIG_X86_32
12263 -
12264 -#define STACK_WARN (THREAD_SIZE/8)
12265 -/*
12266 - * macros/functions for gaining access to the thread information structure
12267 - *
12268 - * preempt_count needs to be 1 initially, until the scheduler is functional.
12269 - */
12270 -#ifndef __ASSEMBLY__
12271 -
12272 -
12273 -/* how to get the current stack pointer from C */
12274 -register unsigned long current_stack_pointer asm("esp") __used;
12275 -
12276 -/* how to get the thread information struct from C */
12277 -static inline struct thread_info *current_thread_info(void)
12278 -{
12279 - return (struct thread_info *)
12280 - (current_stack_pointer & ~(THREAD_SIZE - 1));
12281 -}
12282 -
12283 -#else /* !__ASSEMBLY__ */
12284 -
12285 +#ifdef __ASSEMBLY__
12286 /* how to get the thread information struct from ASM */
12287 #define GET_THREAD_INFO(reg) \
12288 - movl $-THREAD_SIZE, reg; \
12289 - andl %esp, reg
12290 + mov PER_CPU_VAR(current_tinfo), reg
12291
12292 /* use this one if reg already contains %esp */
12293 -#define GET_THREAD_INFO_WITH_ESP(reg) \
12294 - andl $-THREAD_SIZE, reg
12295 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12296 +#else
12297 +/* how to get the thread information struct from C */
12298 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12299 +
12300 +static __always_inline struct thread_info *current_thread_info(void)
12301 +{
12302 + return percpu_read_stable(current_tinfo);
12303 +}
12304 +#endif
12305 +
12306 +#ifdef CONFIG_X86_32
12307 +
12308 +#define STACK_WARN (THREAD_SIZE/8)
12309 +/*
12310 + * macros/functions for gaining access to the thread information structure
12311 + *
12312 + * preempt_count needs to be 1 initially, until the scheduler is functional.
12313 + */
12314 +#ifndef __ASSEMBLY__
12315 +
12316 +/* how to get the current stack pointer from C */
12317 +register unsigned long current_stack_pointer asm("esp") __used;
12318
12319 #endif
12320
12321 #else /* X86_32 */
12322
12323 -#include <asm/percpu.h>
12324 -#define KERNEL_STACK_OFFSET (5*8)
12325 -
12326 /*
12327 * macros/functions for gaining access to the thread information structure
12328 * preempt_count needs to be 1 initially, until the scheduler is functional.
12329 @@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
12330 #ifndef __ASSEMBLY__
12331 DECLARE_PER_CPU(unsigned long, kernel_stack);
12332
12333 -static inline struct thread_info *current_thread_info(void)
12334 -{
12335 - struct thread_info *ti;
12336 - ti = (void *)(percpu_read_stable(kernel_stack) +
12337 - KERNEL_STACK_OFFSET - THREAD_SIZE);
12338 - return ti;
12339 -}
12340 -
12341 -#else /* !__ASSEMBLY__ */
12342 -
12343 -/* how to get the thread information struct from ASM */
12344 -#define GET_THREAD_INFO(reg) \
12345 - movq PER_CPU_VAR(kernel_stack),reg ; \
12346 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12347 -
12348 +/* how to get the current stack pointer from C */
12349 +register unsigned long current_stack_pointer asm("rsp") __used;
12350 #endif
12351
12352 #endif /* !X86_32 */
12353 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
12354 extern void free_thread_info(struct thread_info *ti);
12355 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12356 #define arch_task_cache_init arch_task_cache_init
12357 +
12358 +#define __HAVE_THREAD_FUNCTIONS
12359 +#define task_thread_info(task) (&(task)->tinfo)
12360 +#define task_stack_page(task) ((task)->stack)
12361 +#define setup_thread_stack(p, org) do {} while (0)
12362 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12363 +
12364 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12365 +extern struct task_struct *alloc_task_struct(void);
12366 +extern void free_task_struct(struct task_struct *);
12367 +
12368 #endif
12369 #endif /* _ASM_X86_THREAD_INFO_H */
12370 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12371 index 61c5874..8a046e9 100644
12372 --- a/arch/x86/include/asm/uaccess.h
12373 +++ b/arch/x86/include/asm/uaccess.h
12374 @@ -8,12 +8,15 @@
12375 #include <linux/thread_info.h>
12376 #include <linux/prefetch.h>
12377 #include <linux/string.h>
12378 +#include <linux/sched.h>
12379 #include <asm/asm.h>
12380 #include <asm/page.h>
12381
12382 #define VERIFY_READ 0
12383 #define VERIFY_WRITE 1
12384
12385 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12386 +
12387 /*
12388 * The fs value determines whether argument validity checking should be
12389 * performed or not. If get_fs() == USER_DS, checking is performed, with
12390 @@ -29,7 +32,12 @@
12391
12392 #define get_ds() (KERNEL_DS)
12393 #define get_fs() (current_thread_info()->addr_limit)
12394 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12395 +void __set_fs(mm_segment_t x);
12396 +void set_fs(mm_segment_t x);
12397 +#else
12398 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12399 +#endif
12400
12401 #define segment_eq(a, b) ((a).seg == (b).seg)
12402
12403 @@ -77,7 +85,33 @@
12404 * checks that the pointer is in the user space range - after calling
12405 * this function, memory access functions may still return -EFAULT.
12406 */
12407 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12408 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12409 +#define access_ok(type, addr, size) \
12410 +({ \
12411 + long __size = size; \
12412 + unsigned long __addr = (unsigned long)addr; \
12413 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12414 + unsigned long __end_ao = __addr + __size - 1; \
12415 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12416 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12417 + while(__addr_ao <= __end_ao) { \
12418 + char __c_ao; \
12419 + __addr_ao += PAGE_SIZE; \
12420 + if (__size > PAGE_SIZE) \
12421 + cond_resched(); \
12422 + if (__get_user(__c_ao, (char __user *)__addr)) \
12423 + break; \
12424 + if (type != VERIFY_WRITE) { \
12425 + __addr = __addr_ao; \
12426 + continue; \
12427 + } \
12428 + if (__put_user(__c_ao, (char __user *)__addr)) \
12429 + break; \
12430 + __addr = __addr_ao; \
12431 + } \
12432 + } \
12433 + __ret_ao; \
12434 +})
12435
12436 /*
12437 * The exception table consists of pairs of addresses: the first is the
12438 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
12439 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12440 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12441
12442 -
12443 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12444 +#define __copyuser_seg "gs;"
12445 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12446 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12447 +#else
12448 +#define __copyuser_seg
12449 +#define __COPYUSER_SET_ES
12450 +#define __COPYUSER_RESTORE_ES
12451 +#endif
12452
12453 #ifdef CONFIG_X86_32
12454 #define __put_user_asm_u64(x, addr, err, errret) \
12455 - asm volatile("1: movl %%eax,0(%2)\n" \
12456 - "2: movl %%edx,4(%2)\n" \
12457 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12458 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12459 "3:\n" \
12460 ".section .fixup,\"ax\"\n" \
12461 "4: movl %3,%0\n" \
12462 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
12463 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12464
12465 #define __put_user_asm_ex_u64(x, addr) \
12466 - asm volatile("1: movl %%eax,0(%1)\n" \
12467 - "2: movl %%edx,4(%1)\n" \
12468 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12469 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12470 "3:\n" \
12471 _ASM_EXTABLE(1b, 2b - 1b) \
12472 _ASM_EXTABLE(2b, 3b - 2b) \
12473 @@ -253,7 +295,7 @@ extern void __put_user_8(void);
12474 __typeof__(*(ptr)) __pu_val; \
12475 __chk_user_ptr(ptr); \
12476 might_fault(); \
12477 - __pu_val = x; \
12478 + __pu_val = (x); \
12479 switch (sizeof(*(ptr))) { \
12480 case 1: \
12481 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12482 @@ -374,7 +416,7 @@ do { \
12483 } while (0)
12484
12485 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12486 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12487 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12488 "2:\n" \
12489 ".section .fixup,\"ax\"\n" \
12490 "3: mov %3,%0\n" \
12491 @@ -382,7 +424,7 @@ do { \
12492 " jmp 2b\n" \
12493 ".previous\n" \
12494 _ASM_EXTABLE(1b, 3b) \
12495 - : "=r" (err), ltype(x) \
12496 + : "=r" (err), ltype (x) \
12497 : "m" (__m(addr)), "i" (errret), "0" (err))
12498
12499 #define __get_user_size_ex(x, ptr, size) \
12500 @@ -407,7 +449,7 @@ do { \
12501 } while (0)
12502
12503 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12504 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12505 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12506 "2:\n" \
12507 _ASM_EXTABLE(1b, 2b - 1b) \
12508 : ltype(x) : "m" (__m(addr)))
12509 @@ -424,13 +466,24 @@ do { \
12510 int __gu_err; \
12511 unsigned long __gu_val; \
12512 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12513 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12514 + (x) = (__typeof__(*(ptr)))__gu_val; \
12515 __gu_err; \
12516 })
12517
12518 /* FIXME: this hack is definitely wrong -AK */
12519 struct __large_struct { unsigned long buf[100]; };
12520 -#define __m(x) (*(struct __large_struct __user *)(x))
12521 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12522 +#define ____m(x) \
12523 +({ \
12524 + unsigned long ____x = (unsigned long)(x); \
12525 + if (____x < PAX_USER_SHADOW_BASE) \
12526 + ____x += PAX_USER_SHADOW_BASE; \
12527 + (void __user *)____x; \
12528 +})
12529 +#else
12530 +#define ____m(x) (x)
12531 +#endif
12532 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12533
12534 /*
12535 * Tell gcc we read from memory instead of writing: this is because
12536 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
12537 * aliasing issues.
12538 */
12539 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12540 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12541 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12542 "2:\n" \
12543 ".section .fixup,\"ax\"\n" \
12544 "3: mov %3,%0\n" \
12545 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
12546 ".previous\n" \
12547 _ASM_EXTABLE(1b, 3b) \
12548 : "=r"(err) \
12549 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12550 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12551
12552 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12553 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12554 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12555 "2:\n" \
12556 _ASM_EXTABLE(1b, 2b - 1b) \
12557 : : ltype(x), "m" (__m(addr)))
12558 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
12559 * On error, the variable @x is set to zero.
12560 */
12561
12562 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12563 +#define __get_user(x, ptr) get_user((x), (ptr))
12564 +#else
12565 #define __get_user(x, ptr) \
12566 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12567 +#endif
12568
12569 /**
12570 * __put_user: - Write a simple value into user space, with less checking.
12571 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
12572 * Returns zero on success, or -EFAULT on error.
12573 */
12574
12575 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12576 +#define __put_user(x, ptr) put_user((x), (ptr))
12577 +#else
12578 #define __put_user(x, ptr) \
12579 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12580 +#endif
12581
12582 #define __get_user_unaligned __get_user
12583 #define __put_user_unaligned __put_user
12584 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
12585 #define get_user_ex(x, ptr) do { \
12586 unsigned long __gue_val; \
12587 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12588 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12589 + (x) = (__typeof__(*(ptr)))__gue_val; \
12590 } while (0)
12591
12592 #ifdef CONFIG_X86_WP_WORKS_OK
12593 @@ -567,6 +628,7 @@ extern struct movsl_mask {
12594
12595 #define ARCH_HAS_NOCACHE_UACCESS 1
12596
12597 +#define ARCH_HAS_SORT_EXTABLE
12598 #ifdef CONFIG_X86_32
12599 # include "uaccess_32.h"
12600 #else
12601 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12602 index 632fb44..e30e334 100644
12603 --- a/arch/x86/include/asm/uaccess_32.h
12604 +++ b/arch/x86/include/asm/uaccess_32.h
12605 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12606 static __always_inline unsigned long __must_check
12607 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12608 {
12609 + pax_track_stack();
12610 +
12611 + if ((long)n < 0)
12612 + return n;
12613 +
12614 if (__builtin_constant_p(n)) {
12615 unsigned long ret;
12616
12617 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12618 return ret;
12619 }
12620 }
12621 + if (!__builtin_constant_p(n))
12622 + check_object_size(from, n, true);
12623 return __copy_to_user_ll(to, from, n);
12624 }
12625
12626 @@ -83,12 +90,16 @@ static __always_inline unsigned long __must_check
12627 __copy_to_user(void __user *to, const void *from, unsigned long n)
12628 {
12629 might_fault();
12630 +
12631 return __copy_to_user_inatomic(to, from, n);
12632 }
12633
12634 static __always_inline unsigned long
12635 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12636 {
12637 + if ((long)n < 0)
12638 + return n;
12639 +
12640 /* Avoid zeroing the tail if the copy fails..
12641 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12642 * but as the zeroing behaviour is only significant when n is not
12643 @@ -138,6 +149,12 @@ static __always_inline unsigned long
12644 __copy_from_user(void *to, const void __user *from, unsigned long n)
12645 {
12646 might_fault();
12647 +
12648 + pax_track_stack();
12649 +
12650 + if ((long)n < 0)
12651 + return n;
12652 +
12653 if (__builtin_constant_p(n)) {
12654 unsigned long ret;
12655
12656 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12657 return ret;
12658 }
12659 }
12660 + if (!__builtin_constant_p(n))
12661 + check_object_size(to, n, false);
12662 return __copy_from_user_ll(to, from, n);
12663 }
12664
12665 @@ -160,6 +179,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12666 const void __user *from, unsigned long n)
12667 {
12668 might_fault();
12669 +
12670 + if ((long)n < 0)
12671 + return n;
12672 +
12673 if (__builtin_constant_p(n)) {
12674 unsigned long ret;
12675
12676 @@ -182,14 +205,62 @@ static __always_inline unsigned long
12677 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12678 unsigned long n)
12679 {
12680 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12681 + if ((long)n < 0)
12682 + return n;
12683 +
12684 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12685 +}
12686 +
12687 +/**
12688 + * copy_to_user: - Copy a block of data into user space.
12689 + * @to: Destination address, in user space.
12690 + * @from: Source address, in kernel space.
12691 + * @n: Number of bytes to copy.
12692 + *
12693 + * Context: User context only. This function may sleep.
12694 + *
12695 + * Copy data from kernel space to user space.
12696 + *
12697 + * Returns number of bytes that could not be copied.
12698 + * On success, this will be zero.
12699 + */
12700 +static __always_inline unsigned long __must_check
12701 +copy_to_user(void __user *to, const void *from, unsigned long n)
12702 +{
12703 + if (access_ok(VERIFY_WRITE, to, n))
12704 + n = __copy_to_user(to, from, n);
12705 + return n;
12706 +}
12707 +
12708 +/**
12709 + * copy_from_user: - Copy a block of data from user space.
12710 + * @to: Destination address, in kernel space.
12711 + * @from: Source address, in user space.
12712 + * @n: Number of bytes to copy.
12713 + *
12714 + * Context: User context only. This function may sleep.
12715 + *
12716 + * Copy data from user space to kernel space.
12717 + *
12718 + * Returns number of bytes that could not be copied.
12719 + * On success, this will be zero.
12720 + *
12721 + * If some data could not be copied, this function will pad the copied
12722 + * data to the requested size using zero bytes.
12723 + */
12724 +static __always_inline unsigned long __must_check
12725 +copy_from_user(void *to, const void __user *from, unsigned long n)
12726 +{
12727 + if (access_ok(VERIFY_READ, from, n))
12728 + n = __copy_from_user(to, from, n);
12729 + else if ((long)n > 0) {
12730 + if (!__builtin_constant_p(n))
12731 + check_object_size(to, n, false);
12732 + memset(to, 0, n);
12733 + }
12734 + return n;
12735 }
12736
12737 -unsigned long __must_check copy_to_user(void __user *to,
12738 - const void *from, unsigned long n);
12739 -unsigned long __must_check copy_from_user(void *to,
12740 - const void __user *from,
12741 - unsigned long n);
12742 long __must_check strncpy_from_user(char *dst, const char __user *src,
12743 long count);
12744 long __must_check __strncpy_from_user(char *dst,
12745 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12746 index db24b21..f595ae7 100644
12747 --- a/arch/x86/include/asm/uaccess_64.h
12748 +++ b/arch/x86/include/asm/uaccess_64.h
12749 @@ -9,6 +9,9 @@
12750 #include <linux/prefetch.h>
12751 #include <linux/lockdep.h>
12752 #include <asm/page.h>
12753 +#include <asm/pgtable.h>
12754 +
12755 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
12756
12757 /*
12758 * Copy To/From Userspace
12759 @@ -16,116 +19,205 @@
12760
12761 /* Handles exceptions in both to and from, but doesn't do access_ok */
12762 __must_check unsigned long
12763 -copy_user_generic(void *to, const void *from, unsigned len);
12764 +copy_user_generic(void *to, const void *from, unsigned long len);
12765
12766 __must_check unsigned long
12767 -copy_to_user(void __user *to, const void *from, unsigned len);
12768 -__must_check unsigned long
12769 -copy_from_user(void *to, const void __user *from, unsigned len);
12770 -__must_check unsigned long
12771 -copy_in_user(void __user *to, const void __user *from, unsigned len);
12772 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
12773
12774 static __always_inline __must_check
12775 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
12776 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12777 {
12778 - int ret = 0;
12779 + unsigned ret = 0;
12780
12781 might_fault();
12782 - if (!__builtin_constant_p(size))
12783 - return copy_user_generic(dst, (__force void *)src, size);
12784 +
12785 + if (size > INT_MAX)
12786 + return size;
12787 +
12788 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12789 + if (!__access_ok(VERIFY_READ, src, size))
12790 + return size;
12791 +#endif
12792 +
12793 + if (!__builtin_constant_p(size)) {
12794 + check_object_size(dst, size, false);
12795 +
12796 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12797 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12798 + src += PAX_USER_SHADOW_BASE;
12799 +#endif
12800 +
12801 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12802 + }
12803 switch (size) {
12804 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12805 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12806 ret, "b", "b", "=q", 1);
12807 return ret;
12808 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12809 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12810 ret, "w", "w", "=r", 2);
12811 return ret;
12812 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12813 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12814 ret, "l", "k", "=r", 4);
12815 return ret;
12816 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12817 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12818 ret, "q", "", "=r", 8);
12819 return ret;
12820 case 10:
12821 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12822 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12823 ret, "q", "", "=r", 10);
12824 if (unlikely(ret))
12825 return ret;
12826 __get_user_asm(*(u16 *)(8 + (char *)dst),
12827 - (u16 __user *)(8 + (char __user *)src),
12828 + (const u16 __user *)(8 + (const char __user *)src),
12829 ret, "w", "w", "=r", 2);
12830 return ret;
12831 case 16:
12832 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12833 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12834 ret, "q", "", "=r", 16);
12835 if (unlikely(ret))
12836 return ret;
12837 __get_user_asm(*(u64 *)(8 + (char *)dst),
12838 - (u64 __user *)(8 + (char __user *)src),
12839 + (const u64 __user *)(8 + (const char __user *)src),
12840 ret, "q", "", "=r", 8);
12841 return ret;
12842 default:
12843 - return copy_user_generic(dst, (__force void *)src, size);
12844 +
12845 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12846 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12847 + src += PAX_USER_SHADOW_BASE;
12848 +#endif
12849 +
12850 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12851 }
12852 }
12853
12854 static __always_inline __must_check
12855 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
12856 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12857 {
12858 - int ret = 0;
12859 + unsigned ret = 0;
12860
12861 might_fault();
12862 - if (!__builtin_constant_p(size))
12863 - return copy_user_generic((__force void *)dst, src, size);
12864 +
12865 + pax_track_stack();
12866 +
12867 + if (size > INT_MAX)
12868 + return size;
12869 +
12870 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12871 + if (!__access_ok(VERIFY_WRITE, dst, size))
12872 + return size;
12873 +#endif
12874 +
12875 + if (!__builtin_constant_p(size)) {
12876 + check_object_size(src, size, true);
12877 +
12878 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12879 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12880 + dst += PAX_USER_SHADOW_BASE;
12881 +#endif
12882 +
12883 + return copy_user_generic((__force_kernel void *)dst, src, size);
12884 + }
12885 switch (size) {
12886 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12887 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12888 ret, "b", "b", "iq", 1);
12889 return ret;
12890 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12891 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12892 ret, "w", "w", "ir", 2);
12893 return ret;
12894 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12895 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12896 ret, "l", "k", "ir", 4);
12897 return ret;
12898 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12899 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12900 ret, "q", "", "er", 8);
12901 return ret;
12902 case 10:
12903 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12904 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12905 ret, "q", "", "er", 10);
12906 if (unlikely(ret))
12907 return ret;
12908 asm("":::"memory");
12909 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12910 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12911 ret, "w", "w", "ir", 2);
12912 return ret;
12913 case 16:
12914 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12915 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12916 ret, "q", "", "er", 16);
12917 if (unlikely(ret))
12918 return ret;
12919 asm("":::"memory");
12920 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12921 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12922 ret, "q", "", "er", 8);
12923 return ret;
12924 default:
12925 - return copy_user_generic((__force void *)dst, src, size);
12926 +
12927 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12928 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12929 + dst += PAX_USER_SHADOW_BASE;
12930 +#endif
12931 +
12932 + return copy_user_generic((__force_kernel void *)dst, src, size);
12933 + }
12934 +}
12935 +
12936 +static __always_inline __must_check
12937 +unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
12938 +{
12939 + if (access_ok(VERIFY_WRITE, to, len))
12940 + len = __copy_to_user(to, from, len);
12941 + return len;
12942 +}
12943 +
12944 +static __always_inline __must_check
12945 +unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
12946 +{
12947 + might_fault();
12948 +
12949 + if (access_ok(VERIFY_READ, from, len))
12950 + len = __copy_from_user(to, from, len);
12951 + else if (len < INT_MAX) {
12952 + if (!__builtin_constant_p(len))
12953 + check_object_size(to, len, false);
12954 + memset(to, 0, len);
12955 }
12956 + return len;
12957 }
12958
12959 static __always_inline __must_check
12960 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12961 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12962 {
12963 - int ret = 0;
12964 + unsigned ret = 0;
12965
12966 might_fault();
12967 - if (!__builtin_constant_p(size))
12968 - return copy_user_generic((__force void *)dst,
12969 - (__force void *)src, size);
12970 +
12971 + pax_track_stack();
12972 +
12973 + if (size > INT_MAX)
12974 + return size;
12975 +
12976 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12977 + if (!__access_ok(VERIFY_READ, src, size))
12978 + return size;
12979 + if (!__access_ok(VERIFY_WRITE, dst, size))
12980 + return size;
12981 +#endif
12982 +
12983 + if (!__builtin_constant_p(size)) {
12984 +
12985 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12986 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12987 + src += PAX_USER_SHADOW_BASE;
12988 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12989 + dst += PAX_USER_SHADOW_BASE;
12990 +#endif
12991 +
12992 + return copy_user_generic((__force_kernel void *)dst,
12993 + (__force_kernel const void *)src, size);
12994 + }
12995 switch (size) {
12996 case 1: {
12997 u8 tmp;
12998 - __get_user_asm(tmp, (u8 __user *)src,
12999 + __get_user_asm(tmp, (const u8 __user *)src,
13000 ret, "b", "b", "=q", 1);
13001 if (likely(!ret))
13002 __put_user_asm(tmp, (u8 __user *)dst,
13003 @@ -134,7 +226,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13004 }
13005 case 2: {
13006 u16 tmp;
13007 - __get_user_asm(tmp, (u16 __user *)src,
13008 + __get_user_asm(tmp, (const u16 __user *)src,
13009 ret, "w", "w", "=r", 2);
13010 if (likely(!ret))
13011 __put_user_asm(tmp, (u16 __user *)dst,
13012 @@ -144,7 +236,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13013
13014 case 4: {
13015 u32 tmp;
13016 - __get_user_asm(tmp, (u32 __user *)src,
13017 + __get_user_asm(tmp, (const u32 __user *)src,
13018 ret, "l", "k", "=r", 4);
13019 if (likely(!ret))
13020 __put_user_asm(tmp, (u32 __user *)dst,
13021 @@ -153,7 +245,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13022 }
13023 case 8: {
13024 u64 tmp;
13025 - __get_user_asm(tmp, (u64 __user *)src,
13026 + __get_user_asm(tmp, (const u64 __user *)src,
13027 ret, "q", "", "=r", 8);
13028 if (likely(!ret))
13029 __put_user_asm(tmp, (u64 __user *)dst,
13030 @@ -161,8 +253,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13031 return ret;
13032 }
13033 default:
13034 - return copy_user_generic((__force void *)dst,
13035 - (__force void *)src, size);
13036 +
13037 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13038 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13039 + src += PAX_USER_SHADOW_BASE;
13040 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13041 + dst += PAX_USER_SHADOW_BASE;
13042 +#endif
13043 +
13044 + return copy_user_generic((__force_kernel void *)dst,
13045 + (__force_kernel const void *)src, size);
13046 }
13047 }
13048
13049 @@ -176,33 +276,75 @@ __must_check long strlen_user(const char __user *str);
13050 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
13051 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13052
13053 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
13054 - unsigned size);
13055 +static __must_check __always_inline unsigned long
13056 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13057 +{
13058 + pax_track_stack();
13059 +
13060 + if (size > INT_MAX)
13061 + return size;
13062 +
13063 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13064 + if (!__access_ok(VERIFY_READ, src, size))
13065 + return size;
13066
13067 -static __must_check __always_inline int
13068 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13069 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13070 + src += PAX_USER_SHADOW_BASE;
13071 +#endif
13072 +
13073 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13074 +}
13075 +
13076 +static __must_check __always_inline unsigned long
13077 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13078 {
13079 - return copy_user_generic((__force void *)dst, src, size);
13080 + if (size > INT_MAX)
13081 + return size;
13082 +
13083 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13084 + if (!__access_ok(VERIFY_WRITE, dst, size))
13085 + return size;
13086 +
13087 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13088 + dst += PAX_USER_SHADOW_BASE;
13089 +#endif
13090 +
13091 + return copy_user_generic((__force_kernel void *)dst, src, size);
13092 }
13093
13094 -extern long __copy_user_nocache(void *dst, const void __user *src,
13095 - unsigned size, int zerorest);
13096 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13097 + unsigned long size, int zerorest);
13098
13099 -static inline int
13100 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13101 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13102 {
13103 might_sleep();
13104 +
13105 + if (size > INT_MAX)
13106 + return size;
13107 +
13108 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13109 + if (!__access_ok(VERIFY_READ, src, size))
13110 + return size;
13111 +#endif
13112 +
13113 return __copy_user_nocache(dst, src, size, 1);
13114 }
13115
13116 -static inline int
13117 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13118 - unsigned size)
13119 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13120 + unsigned long size)
13121 {
13122 + if (size > INT_MAX)
13123 + return size;
13124 +
13125 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13126 + if (!__access_ok(VERIFY_READ, src, size))
13127 + return size;
13128 +#endif
13129 +
13130 return __copy_user_nocache(dst, src, size, 0);
13131 }
13132
13133 -unsigned long
13134 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13135 +extern unsigned long
13136 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
13137
13138 #endif /* _ASM_X86_UACCESS_64_H */
13139 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13140 index 9064052..786cfbc 100644
13141 --- a/arch/x86/include/asm/vdso.h
13142 +++ b/arch/x86/include/asm/vdso.h
13143 @@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
13144 #define VDSO32_SYMBOL(base, name) \
13145 ({ \
13146 extern const char VDSO32_##name[]; \
13147 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13148 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13149 })
13150 #endif
13151
13152 diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
13153 index 3d61e20..9507180 100644
13154 --- a/arch/x86/include/asm/vgtod.h
13155 +++ b/arch/x86/include/asm/vgtod.h
13156 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
13157 int sysctl_enabled;
13158 struct timezone sys_tz;
13159 struct { /* extract of a clocksource struct */
13160 + char name[8];
13161 cycle_t (*vread)(void);
13162 cycle_t cycle_last;
13163 cycle_t mask;
13164 diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
13165 index 61e08c0..b0da582 100644
13166 --- a/arch/x86/include/asm/vmi.h
13167 +++ b/arch/x86/include/asm/vmi.h
13168 @@ -191,6 +191,7 @@ struct vrom_header {
13169 u8 reserved[96]; /* Reserved for headers */
13170 char vmi_init[8]; /* VMI_Init jump point */
13171 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
13172 + char rom_data[8048]; /* rest of the option ROM */
13173 } __attribute__((packed));
13174
13175 struct pnp_header {
13176 diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
13177 index c6e0bee..fcb9f74 100644
13178 --- a/arch/x86/include/asm/vmi_time.h
13179 +++ b/arch/x86/include/asm/vmi_time.h
13180 @@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
13181 int (*wallclock_updated)(void);
13182 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
13183 void (*cancel_alarm)(u32 flags);
13184 -} vmi_timer_ops;
13185 +} __no_const vmi_timer_ops;
13186
13187 /* Prototypes */
13188 extern void __init vmi_time_init(void);
13189 diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
13190 index d0983d2..1f7c9e9 100644
13191 --- a/arch/x86/include/asm/vsyscall.h
13192 +++ b/arch/x86/include/asm/vsyscall.h
13193 @@ -15,9 +15,10 @@ enum vsyscall_num {
13194
13195 #ifdef __KERNEL__
13196 #include <linux/seqlock.h>
13197 +#include <linux/getcpu.h>
13198 +#include <linux/time.h>
13199
13200 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
13201 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
13202
13203 /* Definitions for CONFIG_GENERIC_TIME definitions */
13204 #define __section_vsyscall_gtod_data __attribute__ \
13205 @@ -31,7 +32,6 @@ enum vsyscall_num {
13206 #define VGETCPU_LSL 2
13207
13208 extern int __vgetcpu_mode;
13209 -extern volatile unsigned long __jiffies;
13210
13211 /* kernel space (writeable) */
13212 extern int vgetcpu_mode;
13213 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
13214
13215 extern void map_vsyscall(void);
13216
13217 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
13218 +extern time_t vtime(time_t *t);
13219 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
13220 #endif /* __KERNEL__ */
13221
13222 #endif /* _ASM_X86_VSYSCALL_H */
13223 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13224 index 2c756fd..3377e37 100644
13225 --- a/arch/x86/include/asm/x86_init.h
13226 +++ b/arch/x86/include/asm/x86_init.h
13227 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
13228 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13229 void (*find_smp_config)(unsigned int reserve);
13230 void (*get_smp_config)(unsigned int early);
13231 -};
13232 +} __no_const;
13233
13234 /**
13235 * struct x86_init_resources - platform specific resource related ops
13236 @@ -42,7 +42,7 @@ struct x86_init_resources {
13237 void (*probe_roms)(void);
13238 void (*reserve_resources)(void);
13239 char *(*memory_setup)(void);
13240 -};
13241 +} __no_const;
13242
13243 /**
13244 * struct x86_init_irqs - platform specific interrupt setup
13245 @@ -55,7 +55,7 @@ struct x86_init_irqs {
13246 void (*pre_vector_init)(void);
13247 void (*intr_init)(void);
13248 void (*trap_init)(void);
13249 -};
13250 +} __no_const;
13251
13252 /**
13253 * struct x86_init_oem - oem platform specific customizing functions
13254 @@ -65,7 +65,7 @@ struct x86_init_irqs {
13255 struct x86_init_oem {
13256 void (*arch_setup)(void);
13257 void (*banner)(void);
13258 -};
13259 +} __no_const;
13260
13261 /**
13262 * struct x86_init_paging - platform specific paging functions
13263 @@ -75,7 +75,7 @@ struct x86_init_oem {
13264 struct x86_init_paging {
13265 void (*pagetable_setup_start)(pgd_t *base);
13266 void (*pagetable_setup_done)(pgd_t *base);
13267 -};
13268 +} __no_const;
13269
13270 /**
13271 * struct x86_init_timers - platform specific timer setup
13272 @@ -88,7 +88,7 @@ struct x86_init_timers {
13273 void (*setup_percpu_clockev)(void);
13274 void (*tsc_pre_init)(void);
13275 void (*timer_init)(void);
13276 -};
13277 +} __no_const;
13278
13279 /**
13280 * struct x86_init_ops - functions for platform specific setup
13281 @@ -101,7 +101,7 @@ struct x86_init_ops {
13282 struct x86_init_oem oem;
13283 struct x86_init_paging paging;
13284 struct x86_init_timers timers;
13285 -};
13286 +} __no_const;
13287
13288 /**
13289 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13290 @@ -109,7 +109,7 @@ struct x86_init_ops {
13291 */
13292 struct x86_cpuinit_ops {
13293 void (*setup_percpu_clockev)(void);
13294 -};
13295 +} __no_const;
13296
13297 /**
13298 * struct x86_platform_ops - platform specific runtime functions
13299 @@ -121,7 +121,7 @@ struct x86_platform_ops {
13300 unsigned long (*calibrate_tsc)(void);
13301 unsigned long (*get_wallclock)(void);
13302 int (*set_wallclock)(unsigned long nowtime);
13303 -};
13304 +} __no_const;
13305
13306 extern struct x86_init_ops x86_init;
13307 extern struct x86_cpuinit_ops x86_cpuinit;
13308 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13309 index 727acc1..554f3eb 100644
13310 --- a/arch/x86/include/asm/xsave.h
13311 +++ b/arch/x86/include/asm/xsave.h
13312 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
13313 static inline int xsave_user(struct xsave_struct __user *buf)
13314 {
13315 int err;
13316 +
13317 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13318 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13319 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13320 +#endif
13321 +
13322 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
13323 "2:\n"
13324 ".section .fixup,\"ax\"\n"
13325 @@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13326 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13327 {
13328 int err;
13329 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13330 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13331 u32 lmask = mask;
13332 u32 hmask = mask >> 32;
13333
13334 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13335 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13336 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13337 +#endif
13338 +
13339 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13340 "2:\n"
13341 ".section .fixup,\"ax\"\n"
13342 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13343 index 6a564ac..9b1340c 100644
13344 --- a/arch/x86/kernel/acpi/realmode/Makefile
13345 +++ b/arch/x86/kernel/acpi/realmode/Makefile
13346 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13347 $(call cc-option, -fno-stack-protector) \
13348 $(call cc-option, -mpreferred-stack-boundary=2)
13349 KBUILD_CFLAGS += $(call cc-option, -m32)
13350 +ifdef CONSTIFY_PLUGIN
13351 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13352 +endif
13353 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13354 GCOV_PROFILE := n
13355
13356 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13357 index 580b4e2..d4129e4 100644
13358 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13359 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13360 @@ -91,6 +91,9 @@ _start:
13361 /* Do any other stuff... */
13362
13363 #ifndef CONFIG_64BIT
13364 + /* Recheck NX bit overrides (64bit path does this in trampoline) */
13365 + call verify_cpu
13366 +
13367 /* This could also be done in C code... */
13368 movl pmode_cr3, %eax
13369 movl %eax, %cr3
13370 @@ -104,7 +107,7 @@ _start:
13371 movl %eax, %ecx
13372 orl %edx, %ecx
13373 jz 1f
13374 - movl $0xc0000080, %ecx
13375 + mov $MSR_EFER, %ecx
13376 wrmsr
13377 1:
13378
13379 @@ -114,6 +117,7 @@ _start:
13380 movl pmode_cr0, %eax
13381 movl %eax, %cr0
13382 jmp pmode_return
13383 +# include "../../verify_cpu.S"
13384 #else
13385 pushw $0
13386 pushw trampoline_segment
13387 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13388 index ca93638..7042f24 100644
13389 --- a/arch/x86/kernel/acpi/sleep.c
13390 +++ b/arch/x86/kernel/acpi/sleep.c
13391 @@ -11,11 +11,12 @@
13392 #include <linux/cpumask.h>
13393 #include <asm/segment.h>
13394 #include <asm/desc.h>
13395 +#include <asm/e820.h>
13396
13397 #include "realmode/wakeup.h"
13398 #include "sleep.h"
13399
13400 -unsigned long acpi_wakeup_address;
13401 +unsigned long acpi_wakeup_address = 0x2000;
13402 unsigned long acpi_realmode_flags;
13403
13404 /* address in low memory of the wakeup routine. */
13405 @@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
13406 #else /* CONFIG_64BIT */
13407 header->trampoline_segment = setup_trampoline() >> 4;
13408 #ifdef CONFIG_SMP
13409 - stack_start.sp = temp_stack + sizeof(temp_stack);
13410 + stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13411 +
13412 + pax_open_kernel();
13413 early_gdt_descr.address =
13414 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13415 + pax_close_kernel();
13416 +
13417 initial_gs = per_cpu_offset(smp_processor_id());
13418 #endif
13419 initial_code = (unsigned long)wakeup_long64;
13420 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
13421 return;
13422 }
13423
13424 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
13425 -
13426 - if (!acpi_realmode) {
13427 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
13428 - return;
13429 - }
13430 -
13431 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
13432 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
13433 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
13434 }
13435
13436
13437 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13438 index 8ded418..079961e 100644
13439 --- a/arch/x86/kernel/acpi/wakeup_32.S
13440 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13441 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13442 # and restore the stack ... but you need gdt for this to work
13443 movl saved_context_esp, %esp
13444
13445 - movl %cs:saved_magic, %eax
13446 - cmpl $0x12345678, %eax
13447 + cmpl $0x12345678, saved_magic
13448 jne bogus_magic
13449
13450 # jump to place where we left off
13451 - movl saved_eip, %eax
13452 - jmp *%eax
13453 + jmp *(saved_eip)
13454
13455 bogus_magic:
13456 jmp bogus_magic
13457 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13458 index de7353c..075da5f 100644
13459 --- a/arch/x86/kernel/alternative.c
13460 +++ b/arch/x86/kernel/alternative.c
13461 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13462
13463 BUG_ON(p->len > MAX_PATCH_LEN);
13464 /* prep the buffer with the original instructions */
13465 - memcpy(insnbuf, p->instr, p->len);
13466 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13467 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13468 (unsigned long)p->instr, p->len);
13469
13470 @@ -475,7 +475,7 @@ void __init alternative_instructions(void)
13471 if (smp_alt_once)
13472 free_init_pages("SMP alternatives",
13473 (unsigned long)__smp_locks,
13474 - (unsigned long)__smp_locks_end);
13475 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13476
13477 restart_nmi();
13478 }
13479 @@ -492,13 +492,17 @@ void __init alternative_instructions(void)
13480 * instructions. And on the local CPU you need to be protected again NMI or MCE
13481 * handlers seeing an inconsistent instruction while you patch.
13482 */
13483 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13484 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
13485 size_t len)
13486 {
13487 unsigned long flags;
13488 local_irq_save(flags);
13489 - memcpy(addr, opcode, len);
13490 +
13491 + pax_open_kernel();
13492 + memcpy(ktla_ktva(addr), opcode, len);
13493 sync_core();
13494 + pax_close_kernel();
13495 +
13496 local_irq_restore(flags);
13497 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13498 that causes hangs on some VIA CPUs. */
13499 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13500 */
13501 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13502 {
13503 - unsigned long flags;
13504 - char *vaddr;
13505 + unsigned char *vaddr = ktla_ktva(addr);
13506 struct page *pages[2];
13507 - int i;
13508 + size_t i;
13509
13510 if (!core_kernel_text((unsigned long)addr)) {
13511 - pages[0] = vmalloc_to_page(addr);
13512 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13513 + pages[0] = vmalloc_to_page(vaddr);
13514 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13515 } else {
13516 - pages[0] = virt_to_page(addr);
13517 + pages[0] = virt_to_page(vaddr);
13518 WARN_ON(!PageReserved(pages[0]));
13519 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13520 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13521 }
13522 BUG_ON(!pages[0]);
13523 - local_irq_save(flags);
13524 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13525 - if (pages[1])
13526 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13527 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13528 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13529 - clear_fixmap(FIX_TEXT_POKE0);
13530 - if (pages[1])
13531 - clear_fixmap(FIX_TEXT_POKE1);
13532 - local_flush_tlb();
13533 - sync_core();
13534 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13535 - that causes hangs on some VIA CPUs. */
13536 + text_poke_early(addr, opcode, len);
13537 for (i = 0; i < len; i++)
13538 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13539 - local_irq_restore(flags);
13540 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13541 return addr;
13542 }
13543 diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
13544 index 3a44b75..1601800 100644
13545 --- a/arch/x86/kernel/amd_iommu.c
13546 +++ b/arch/x86/kernel/amd_iommu.c
13547 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
13548 }
13549 }
13550
13551 -static struct dma_map_ops amd_iommu_dma_ops = {
13552 +static const struct dma_map_ops amd_iommu_dma_ops = {
13553 .alloc_coherent = alloc_coherent,
13554 .free_coherent = free_coherent,
13555 .map_page = map_page,
13556 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13557 index 1d2d670..8e3f477 100644
13558 --- a/arch/x86/kernel/apic/apic.c
13559 +++ b/arch/x86/kernel/apic/apic.c
13560 @@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
13561 /*
13562 * Debug level, exported for io_apic.c
13563 */
13564 -unsigned int apic_verbosity;
13565 +int apic_verbosity;
13566
13567 int pic_mode;
13568
13569 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13570 apic_write(APIC_ESR, 0);
13571 v1 = apic_read(APIC_ESR);
13572 ack_APIC_irq();
13573 - atomic_inc(&irq_err_count);
13574 + atomic_inc_unchecked(&irq_err_count);
13575
13576 /*
13577 * Here is what the APIC error bits mean:
13578 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
13579 u16 *bios_cpu_apicid;
13580 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
13581
13582 + pax_track_stack();
13583 +
13584 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
13585 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
13586
13587 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13588 index 8928d97..f799cea 100644
13589 --- a/arch/x86/kernel/apic/io_apic.c
13590 +++ b/arch/x86/kernel/apic/io_apic.c
13591 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
13592 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
13593 GFP_ATOMIC);
13594 if (!ioapic_entries)
13595 - return 0;
13596 + return NULL;
13597
13598 for (apic = 0; apic < nr_ioapics; apic++) {
13599 ioapic_entries[apic] =
13600 @@ -733,7 +733,7 @@ nomem:
13601 kfree(ioapic_entries[apic]);
13602 kfree(ioapic_entries);
13603
13604 - return 0;
13605 + return NULL;
13606 }
13607
13608 /*
13609 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13610 }
13611 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13612
13613 -void lock_vector_lock(void)
13614 +void lock_vector_lock(void) __acquires(vector_lock)
13615 {
13616 /* Used to the online set of cpus does not change
13617 * during assign_irq_vector.
13618 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
13619 spin_lock(&vector_lock);
13620 }
13621
13622 -void unlock_vector_lock(void)
13623 +void unlock_vector_lock(void) __releases(vector_lock)
13624 {
13625 spin_unlock(&vector_lock);
13626 }
13627 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
13628 ack_APIC_irq();
13629 }
13630
13631 -atomic_t irq_mis_count;
13632 +atomic_unchecked_t irq_mis_count;
13633
13634 static void ack_apic_level(unsigned int irq)
13635 {
13636 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
13637
13638 /* Tail end of version 0x11 I/O APIC bug workaround */
13639 if (!(v & (1 << (i & 0x1f)))) {
13640 - atomic_inc(&irq_mis_count);
13641 + atomic_inc_unchecked(&irq_mis_count);
13642 spin_lock(&ioapic_lock);
13643 __mask_and_edge_IO_APIC_irq(cfg);
13644 __unmask_and_level_IO_APIC_irq(cfg);
13645 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13646 index 151ace6..f317474 100644
13647 --- a/arch/x86/kernel/apm_32.c
13648 +++ b/arch/x86/kernel/apm_32.c
13649 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
13650 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13651 * even though they are called in protected mode.
13652 */
13653 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13654 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13655 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13656
13657 static const char driver_version[] = "1.16ac"; /* no spaces */
13658 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13659 BUG_ON(cpu != 0);
13660 gdt = get_cpu_gdt_table(cpu);
13661 save_desc_40 = gdt[0x40 / 8];
13662 +
13663 + pax_open_kernel();
13664 gdt[0x40 / 8] = bad_bios_desc;
13665 + pax_close_kernel();
13666
13667 apm_irq_save(flags);
13668 APM_DO_SAVE_SEGS;
13669 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13670 &call->esi);
13671 APM_DO_RESTORE_SEGS;
13672 apm_irq_restore(flags);
13673 +
13674 + pax_open_kernel();
13675 gdt[0x40 / 8] = save_desc_40;
13676 + pax_close_kernel();
13677 +
13678 put_cpu();
13679
13680 return call->eax & 0xff;
13681 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13682 BUG_ON(cpu != 0);
13683 gdt = get_cpu_gdt_table(cpu);
13684 save_desc_40 = gdt[0x40 / 8];
13685 +
13686 + pax_open_kernel();
13687 gdt[0x40 / 8] = bad_bios_desc;
13688 + pax_close_kernel();
13689
13690 apm_irq_save(flags);
13691 APM_DO_SAVE_SEGS;
13692 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
13693 &call->eax);
13694 APM_DO_RESTORE_SEGS;
13695 apm_irq_restore(flags);
13696 +
13697 + pax_open_kernel();
13698 gdt[0x40 / 8] = save_desc_40;
13699 + pax_close_kernel();
13700 +
13701 put_cpu();
13702 return error;
13703 }
13704 @@ -975,7 +989,7 @@ recalc:
13705
13706 static void apm_power_off(void)
13707 {
13708 - unsigned char po_bios_call[] = {
13709 + const unsigned char po_bios_call[] = {
13710 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
13711 0x8e, 0xd0, /* movw ax,ss */
13712 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
13713 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
13714 * code to that CPU.
13715 */
13716 gdt = get_cpu_gdt_table(0);
13717 +
13718 + pax_open_kernel();
13719 set_desc_base(&gdt[APM_CS >> 3],
13720 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13721 set_desc_base(&gdt[APM_CS_16 >> 3],
13722 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13723 set_desc_base(&gdt[APM_DS >> 3],
13724 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13725 + pax_close_kernel();
13726
13727 proc_create("apm", 0, NULL, &apm_file_ops);
13728
13729 diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
13730 index dfdbf64..9b2b6ce 100644
13731 --- a/arch/x86/kernel/asm-offsets_32.c
13732 +++ b/arch/x86/kernel/asm-offsets_32.c
13733 @@ -51,7 +51,6 @@ void foo(void)
13734 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
13735 BLANK();
13736
13737 - OFFSET(TI_task, thread_info, task);
13738 OFFSET(TI_exec_domain, thread_info, exec_domain);
13739 OFFSET(TI_flags, thread_info, flags);
13740 OFFSET(TI_status, thread_info, status);
13741 @@ -60,6 +59,8 @@ void foo(void)
13742 OFFSET(TI_restart_block, thread_info, restart_block);
13743 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
13744 OFFSET(TI_cpu, thread_info, cpu);
13745 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13746 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13747 BLANK();
13748
13749 OFFSET(GDS_size, desc_ptr, size);
13750 @@ -99,6 +100,7 @@ void foo(void)
13751
13752 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13753 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13754 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13755 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
13756 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
13757 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
13758 @@ -115,6 +117,11 @@ void foo(void)
13759 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
13760 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13761 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13762 +
13763 +#ifdef CONFIG_PAX_KERNEXEC
13764 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13765 +#endif
13766 +
13767 #endif
13768
13769 #ifdef CONFIG_XEN
13770 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13771 index 4a6aeed..371de20 100644
13772 --- a/arch/x86/kernel/asm-offsets_64.c
13773 +++ b/arch/x86/kernel/asm-offsets_64.c
13774 @@ -44,6 +44,8 @@ int main(void)
13775 ENTRY(addr_limit);
13776 ENTRY(preempt_count);
13777 ENTRY(status);
13778 + ENTRY(lowest_stack);
13779 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13780 #ifdef CONFIG_IA32_EMULATION
13781 ENTRY(sysenter_return);
13782 #endif
13783 @@ -63,6 +65,18 @@ int main(void)
13784 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13785 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
13786 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13787 +
13788 +#ifdef CONFIG_PAX_KERNEXEC
13789 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13790 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13791 +#endif
13792 +
13793 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13794 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13795 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13796 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13797 +#endif
13798 +
13799 #endif
13800
13801
13802 @@ -115,6 +129,7 @@ int main(void)
13803 ENTRY(cr8);
13804 BLANK();
13805 #undef ENTRY
13806 + DEFINE(TSS_size, sizeof(struct tss_struct));
13807 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
13808 BLANK();
13809 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
13810 @@ -130,6 +145,7 @@ int main(void)
13811
13812 BLANK();
13813 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13814 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13815 #ifdef CONFIG_XEN
13816 BLANK();
13817 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13818 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13819 index ff502cc..dc5133e 100644
13820 --- a/arch/x86/kernel/cpu/Makefile
13821 +++ b/arch/x86/kernel/cpu/Makefile
13822 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
13823 CFLAGS_REMOVE_common.o = -pg
13824 endif
13825
13826 -# Make sure load_percpu_segment has no stackprotector
13827 -nostackp := $(call cc-option, -fno-stack-protector)
13828 -CFLAGS_common.o := $(nostackp)
13829 -
13830 obj-y := intel_cacheinfo.o addon_cpuid_features.o
13831 obj-y += proc.o capflags.o powerflags.o common.o
13832 obj-y += vmware.o hypervisor.o sched.o
13833 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13834 index 6e082dc..a0b5f36 100644
13835 --- a/arch/x86/kernel/cpu/amd.c
13836 +++ b/arch/x86/kernel/cpu/amd.c
13837 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13838 unsigned int size)
13839 {
13840 /* AMD errata T13 (order #21922) */
13841 - if ((c->x86 == 6)) {
13842 + if (c->x86 == 6) {
13843 /* Duron Rev A0 */
13844 if (c->x86_model == 3 && c->x86_mask == 0)
13845 size = 64;
13846 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13847 index 4e34d10..ba6bc97 100644
13848 --- a/arch/x86/kernel/cpu/common.c
13849 +++ b/arch/x86/kernel/cpu/common.c
13850 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13851
13852 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13853
13854 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13855 -#ifdef CONFIG_X86_64
13856 - /*
13857 - * We need valid kernel segments for data and code in long mode too
13858 - * IRET will check the segment types kkeil 2000/10/28
13859 - * Also sysret mandates a special GDT layout
13860 - *
13861 - * TLS descriptors are currently at a different place compared to i386.
13862 - * Hopefully nobody expects them at a fixed place (Wine?)
13863 - */
13864 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13865 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13866 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13867 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13868 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13869 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13870 -#else
13871 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13872 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13873 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13874 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13875 - /*
13876 - * Segments used for calling PnP BIOS have byte granularity.
13877 - * They code segments and data segments have fixed 64k limits,
13878 - * the transfer segment sizes are set at run time.
13879 - */
13880 - /* 32-bit code */
13881 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13882 - /* 16-bit code */
13883 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13884 - /* 16-bit data */
13885 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13886 - /* 16-bit data */
13887 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13888 - /* 16-bit data */
13889 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13890 - /*
13891 - * The APM segments have byte granularity and their bases
13892 - * are set at run time. All have 64k limits.
13893 - */
13894 - /* 32-bit code */
13895 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13896 - /* 16-bit code */
13897 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13898 - /* data */
13899 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13900 -
13901 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13902 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13903 - GDT_STACK_CANARY_INIT
13904 -#endif
13905 -} };
13906 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13907 -
13908 static int __init x86_xsave_setup(char *s)
13909 {
13910 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13911 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
13912 {
13913 struct desc_ptr gdt_descr;
13914
13915 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13916 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13917 gdt_descr.size = GDT_SIZE - 1;
13918 load_gdt(&gdt_descr);
13919 /* Reload the per-cpu base */
13920 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13921 /* Filter out anything that depends on CPUID levels we don't have */
13922 filter_cpuid_features(c, true);
13923
13924 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13925 + setup_clear_cpu_cap(X86_FEATURE_SEP);
13926 +#endif
13927 +
13928 /* If the model name is still unset, do table lookup. */
13929 if (!c->x86_model_id[0]) {
13930 const char *p;
13931 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
13932 }
13933 __setup("clearcpuid=", setup_disablecpuid);
13934
13935 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13936 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
13937 +
13938 #ifdef CONFIG_X86_64
13939 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13940
13941 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13942 EXPORT_PER_CPU_SYMBOL(current_task);
13943
13944 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13945 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13946 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13947 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13948
13949 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13950 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13951 {
13952 memset(regs, 0, sizeof(struct pt_regs));
13953 regs->fs = __KERNEL_PERCPU;
13954 - regs->gs = __KERNEL_STACK_CANARY;
13955 + savesegment(gs, regs->gs);
13956
13957 return regs;
13958 }
13959 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
13960 int i;
13961
13962 cpu = stack_smp_processor_id();
13963 - t = &per_cpu(init_tss, cpu);
13964 + t = init_tss + cpu;
13965 orig_ist = &per_cpu(orig_ist, cpu);
13966
13967 #ifdef CONFIG_NUMA
13968 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
13969 switch_to_new_gdt(cpu);
13970 loadsegment(fs, 0);
13971
13972 - load_idt((const struct desc_ptr *)&idt_descr);
13973 + load_idt(&idt_descr);
13974
13975 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13976 syscall_init();
13977 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
13978 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13979 barrier();
13980
13981 - check_efer();
13982 if (cpu != 0)
13983 enable_x2apic();
13984
13985 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
13986 {
13987 int cpu = smp_processor_id();
13988 struct task_struct *curr = current;
13989 - struct tss_struct *t = &per_cpu(init_tss, cpu);
13990 + struct tss_struct *t = init_tss + cpu;
13991 struct thread_struct *thread = &curr->thread;
13992
13993 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13994 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13995 index 6a77cca..4f4fca0 100644
13996 --- a/arch/x86/kernel/cpu/intel.c
13997 +++ b/arch/x86/kernel/cpu/intel.c
13998 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13999 * Update the IDT descriptor and reload the IDT so that
14000 * it uses the read-only mapped virtual address.
14001 */
14002 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14003 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14004 load_idt(&idt_descr);
14005 }
14006 #endif
14007 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
14008 index 417990f..96dc36b 100644
14009 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
14010 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
14011 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
14012 return ret;
14013 }
14014
14015 -static struct sysfs_ops sysfs_ops = {
14016 +static const struct sysfs_ops sysfs_ops = {
14017 .show = show,
14018 .store = store,
14019 };
14020 diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14021 index 472763d..9831e11 100644
14022 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
14023 +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14024 @@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
14025 static int inject_init(void)
14026 {
14027 printk(KERN_INFO "Machine check injector initialized\n");
14028 - mce_chrdev_ops.write = mce_write;
14029 + pax_open_kernel();
14030 + *(void **)&mce_chrdev_ops.write = mce_write;
14031 + pax_close_kernel();
14032 register_die_notifier(&mce_raise_nb);
14033 return 0;
14034 }
14035 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14036 index 0f16a2b..21740f5 100644
14037 --- a/arch/x86/kernel/cpu/mcheck/mce.c
14038 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
14039 @@ -43,6 +43,7 @@
14040 #include <asm/ipi.h>
14041 #include <asm/mce.h>
14042 #include <asm/msr.h>
14043 +#include <asm/local.h>
14044
14045 #include "mce-internal.h"
14046
14047 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
14048 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14049 m->cs, m->ip);
14050
14051 - if (m->cs == __KERNEL_CS)
14052 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14053 print_symbol("{%s}", m->ip);
14054 pr_cont("\n");
14055 }
14056 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
14057
14058 #define PANIC_TIMEOUT 5 /* 5 seconds */
14059
14060 -static atomic_t mce_paniced;
14061 +static atomic_unchecked_t mce_paniced;
14062
14063 static int fake_panic;
14064 -static atomic_t mce_fake_paniced;
14065 +static atomic_unchecked_t mce_fake_paniced;
14066
14067 /* Panic in progress. Enable interrupts and wait for final IPI */
14068 static void wait_for_panic(void)
14069 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14070 /*
14071 * Make sure only one CPU runs in machine check panic
14072 */
14073 - if (atomic_inc_return(&mce_paniced) > 1)
14074 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14075 wait_for_panic();
14076 barrier();
14077
14078 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14079 console_verbose();
14080 } else {
14081 /* Don't log too much for fake panic */
14082 - if (atomic_inc_return(&mce_fake_paniced) > 1)
14083 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14084 return;
14085 }
14086 print_mce_head();
14087 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
14088 * might have been modified by someone else.
14089 */
14090 rmb();
14091 - if (atomic_read(&mce_paniced))
14092 + if (atomic_read_unchecked(&mce_paniced))
14093 wait_for_panic();
14094 if (!monarch_timeout)
14095 goto out;
14096 @@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14097 }
14098
14099 /* Call the installed machine check handler for this CPU setup. */
14100 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
14101 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14102 unexpected_machine_check;
14103
14104 /*
14105 @@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
14106 return;
14107 }
14108
14109 + pax_open_kernel();
14110 machine_check_vector = do_machine_check;
14111 + pax_close_kernel();
14112
14113 mce_init();
14114 mce_cpu_features(c);
14115 @@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
14116 */
14117
14118 static DEFINE_SPINLOCK(mce_state_lock);
14119 -static int open_count; /* #times opened */
14120 +static local_t open_count; /* #times opened */
14121 static int open_exclu; /* already open exclusive? */
14122
14123 static int mce_open(struct inode *inode, struct file *file)
14124 {
14125 spin_lock(&mce_state_lock);
14126
14127 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
14128 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
14129 spin_unlock(&mce_state_lock);
14130
14131 return -EBUSY;
14132 @@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
14133
14134 if (file->f_flags & O_EXCL)
14135 open_exclu = 1;
14136 - open_count++;
14137 + local_inc(&open_count);
14138
14139 spin_unlock(&mce_state_lock);
14140
14141 @@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
14142 {
14143 spin_lock(&mce_state_lock);
14144
14145 - open_count--;
14146 + local_dec(&open_count);
14147 open_exclu = 0;
14148
14149 spin_unlock(&mce_state_lock);
14150 @@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
14151 static void mce_reset(void)
14152 {
14153 cpu_missing = 0;
14154 - atomic_set(&mce_fake_paniced, 0);
14155 + atomic_set_unchecked(&mce_fake_paniced, 0);
14156 atomic_set(&mce_executing, 0);
14157 atomic_set(&mce_callin, 0);
14158 atomic_set(&global_nwo, 0);
14159 diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
14160 index ef3cd31..9d2f6ab 100644
14161 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
14162 +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
14163 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
14164 return ret;
14165 }
14166
14167 -static struct sysfs_ops threshold_ops = {
14168 +static const struct sysfs_ops threshold_ops = {
14169 .show = show,
14170 .store = store,
14171 };
14172 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14173 index 5c0e653..0882b0a 100644
14174 --- a/arch/x86/kernel/cpu/mcheck/p5.c
14175 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
14176 @@ -12,6 +12,7 @@
14177 #include <asm/system.h>
14178 #include <asm/mce.h>
14179 #include <asm/msr.h>
14180 +#include <asm/pgtable.h>
14181
14182 /* By default disabled */
14183 int mce_p5_enabled __read_mostly;
14184 @@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14185 if (!cpu_has(c, X86_FEATURE_MCE))
14186 return;
14187
14188 + pax_open_kernel();
14189 machine_check_vector = pentium_machine_check;
14190 + pax_close_kernel();
14191 /* Make sure the vector pointer is visible before we enable MCEs: */
14192 wmb();
14193
14194 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14195 index 54060f5..c1a7577 100644
14196 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
14197 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14198 @@ -11,6 +11,7 @@
14199 #include <asm/system.h>
14200 #include <asm/mce.h>
14201 #include <asm/msr.h>
14202 +#include <asm/pgtable.h>
14203
14204 /* Machine check handler for WinChip C6: */
14205 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14206 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14207 {
14208 u32 lo, hi;
14209
14210 + pax_open_kernel();
14211 machine_check_vector = winchip_machine_check;
14212 + pax_close_kernel();
14213 /* Make sure the vector pointer is visible before we enable MCEs: */
14214 wmb();
14215
14216 diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
14217 index 33af141..92ba9cd 100644
14218 --- a/arch/x86/kernel/cpu/mtrr/amd.c
14219 +++ b/arch/x86/kernel/cpu/mtrr/amd.c
14220 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
14221 return 0;
14222 }
14223
14224 -static struct mtrr_ops amd_mtrr_ops = {
14225 +static const struct mtrr_ops amd_mtrr_ops = {
14226 .vendor = X86_VENDOR_AMD,
14227 .set = amd_set_mtrr,
14228 .get = amd_get_mtrr,
14229 diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
14230 index de89f14..316fe3e 100644
14231 --- a/arch/x86/kernel/cpu/mtrr/centaur.c
14232 +++ b/arch/x86/kernel/cpu/mtrr/centaur.c
14233 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
14234 return 0;
14235 }
14236
14237 -static struct mtrr_ops centaur_mtrr_ops = {
14238 +static const struct mtrr_ops centaur_mtrr_ops = {
14239 .vendor = X86_VENDOR_CENTAUR,
14240 .set = centaur_set_mcr,
14241 .get = centaur_get_mcr,
14242 diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
14243 index 228d982..68a3343 100644
14244 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c
14245 +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
14246 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
14247 post_set();
14248 }
14249
14250 -static struct mtrr_ops cyrix_mtrr_ops = {
14251 +static const struct mtrr_ops cyrix_mtrr_ops = {
14252 .vendor = X86_VENDOR_CYRIX,
14253 .set_all = cyrix_set_all,
14254 .set = cyrix_set_arr,
14255 diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
14256 index 55da0c5..4d75584 100644
14257 --- a/arch/x86/kernel/cpu/mtrr/generic.c
14258 +++ b/arch/x86/kernel/cpu/mtrr/generic.c
14259 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
14260 /*
14261 * Generic structure...
14262 */
14263 -struct mtrr_ops generic_mtrr_ops = {
14264 +const struct mtrr_ops generic_mtrr_ops = {
14265 .use_intel_if = 1,
14266 .set_all = generic_set_all,
14267 .get = generic_get_mtrr,
14268 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14269 index fd60f09..c94ef52 100644
14270 --- a/arch/x86/kernel/cpu/mtrr/main.c
14271 +++ b/arch/x86/kernel/cpu/mtrr/main.c
14272 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
14273 u64 size_or_mask, size_and_mask;
14274 static bool mtrr_aps_delayed_init;
14275
14276 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14277 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14278
14279 -struct mtrr_ops *mtrr_if;
14280 +const struct mtrr_ops *mtrr_if;
14281
14282 static void set_mtrr(unsigned int reg, unsigned long base,
14283 unsigned long size, mtrr_type type);
14284
14285 -void set_mtrr_ops(struct mtrr_ops *ops)
14286 +void set_mtrr_ops(const struct mtrr_ops *ops)
14287 {
14288 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
14289 mtrr_ops[ops->vendor] = ops;
14290 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14291 index a501dee..816c719 100644
14292 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14293 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14294 @@ -25,14 +25,14 @@ struct mtrr_ops {
14295 int (*validate_add_page)(unsigned long base, unsigned long size,
14296 unsigned int type);
14297 int (*have_wrcomb)(void);
14298 -};
14299 +} __do_const;
14300
14301 extern int generic_get_free_region(unsigned long base, unsigned long size,
14302 int replace_reg);
14303 extern int generic_validate_add_page(unsigned long base, unsigned long size,
14304 unsigned int type);
14305
14306 -extern struct mtrr_ops generic_mtrr_ops;
14307 +extern const struct mtrr_ops generic_mtrr_ops;
14308
14309 extern int positive_have_wrcomb(void);
14310
14311 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
14312 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
14313 void get_mtrr_state(void);
14314
14315 -extern void set_mtrr_ops(struct mtrr_ops *ops);
14316 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
14317
14318 extern u64 size_or_mask, size_and_mask;
14319 -extern struct mtrr_ops *mtrr_if;
14320 +extern const struct mtrr_ops *mtrr_if;
14321
14322 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
14323 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
14324 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14325 index 0ff02ca..fc49a60 100644
14326 --- a/arch/x86/kernel/cpu/perf_event.c
14327 +++ b/arch/x86/kernel/cpu/perf_event.c
14328 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
14329 * count to the generic event atomically:
14330 */
14331 again:
14332 - prev_raw_count = atomic64_read(&hwc->prev_count);
14333 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
14334 rdmsrl(hwc->event_base + idx, new_raw_count);
14335
14336 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
14337 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
14338 new_raw_count) != prev_raw_count)
14339 goto again;
14340
14341 @@ -741,7 +741,7 @@ again:
14342 delta = (new_raw_count << shift) - (prev_raw_count << shift);
14343 delta >>= shift;
14344
14345 - atomic64_add(delta, &event->count);
14346 + atomic64_add_unchecked(delta, &event->count);
14347 atomic64_sub(delta, &hwc->period_left);
14348
14349 return new_raw_count;
14350 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
14351 * The hw event starts counting from this event offset,
14352 * mark it to be able to extra future deltas:
14353 */
14354 - atomic64_set(&hwc->prev_count, (u64)-left);
14355 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
14356
14357 err = checking_wrmsrl(hwc->event_base + idx,
14358 (u64)(-left) & x86_pmu.event_mask);
14359 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
14360 break;
14361
14362 callchain_store(entry, frame.return_address);
14363 - fp = frame.next_frame;
14364 + fp = (__force const void __user *)frame.next_frame;
14365 }
14366 }
14367
14368 diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
14369 index 898df97..9e82503 100644
14370 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c
14371 +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
14372 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
14373
14374 /* Interface defining a CPU specific perfctr watchdog */
14375 struct wd_ops {
14376 - int (*reserve)(void);
14377 - void (*unreserve)(void);
14378 - int (*setup)(unsigned nmi_hz);
14379 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14380 - void (*stop)(void);
14381 + int (* const reserve)(void);
14382 + void (* const unreserve)(void);
14383 + int (* const setup)(unsigned nmi_hz);
14384 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14385 + void (* const stop)(void);
14386 unsigned perfctr;
14387 unsigned evntsel;
14388 u64 checkbit;
14389 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
14390 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
14391 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
14392
14393 +/* cannot be const */
14394 static struct wd_ops intel_arch_wd_ops;
14395
14396 static int setup_intel_arch_watchdog(unsigned nmi_hz)
14397 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
14398 return 1;
14399 }
14400
14401 +/* cannot be const */
14402 static struct wd_ops intel_arch_wd_ops __read_mostly = {
14403 .reserve = single_msr_reserve,
14404 .unreserve = single_msr_unreserve,
14405 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14406 index ff95824..2ffdcb5 100644
14407 --- a/arch/x86/kernel/crash.c
14408 +++ b/arch/x86/kernel/crash.c
14409 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
14410 regs = args->regs;
14411
14412 #ifdef CONFIG_X86_32
14413 - if (!user_mode_vm(regs)) {
14414 + if (!user_mode(regs)) {
14415 crash_fixup_ss_esp(&fixed_regs, regs);
14416 regs = &fixed_regs;
14417 }
14418 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14419 index 37250fe..bf2ec74 100644
14420 --- a/arch/x86/kernel/doublefault_32.c
14421 +++ b/arch/x86/kernel/doublefault_32.c
14422 @@ -11,7 +11,7 @@
14423
14424 #define DOUBLEFAULT_STACKSIZE (1024)
14425 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14426 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14427 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14428
14429 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14430
14431 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
14432 unsigned long gdt, tss;
14433
14434 store_gdt(&gdt_desc);
14435 - gdt = gdt_desc.address;
14436 + gdt = (unsigned long)gdt_desc.address;
14437
14438 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14439
14440 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14441 /* 0x2 bit is always set */
14442 .flags = X86_EFLAGS_SF | 0x2,
14443 .sp = STACK_START,
14444 - .es = __USER_DS,
14445 + .es = __KERNEL_DS,
14446 .cs = __KERNEL_CS,
14447 .ss = __KERNEL_DS,
14448 - .ds = __USER_DS,
14449 + .ds = __KERNEL_DS,
14450 .fs = __KERNEL_PERCPU,
14451
14452 .__cr3 = __pa_nodebug(swapper_pg_dir),
14453 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14454 index 2d8a371..4fa6ae6 100644
14455 --- a/arch/x86/kernel/dumpstack.c
14456 +++ b/arch/x86/kernel/dumpstack.c
14457 @@ -2,6 +2,9 @@
14458 * Copyright (C) 1991, 1992 Linus Torvalds
14459 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14460 */
14461 +#ifdef CONFIG_GRKERNSEC_HIDESYM
14462 +#define __INCLUDED_BY_HIDESYM 1
14463 +#endif
14464 #include <linux/kallsyms.h>
14465 #include <linux/kprobes.h>
14466 #include <linux/uaccess.h>
14467 @@ -28,7 +31,7 @@ static int die_counter;
14468
14469 void printk_address(unsigned long address, int reliable)
14470 {
14471 - printk(" [<%p>] %s%pS\n", (void *) address,
14472 + printk(" [<%p>] %s%pA\n", (void *) address,
14473 reliable ? "" : "? ", (void *) address);
14474 }
14475
14476 @@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
14477 static void
14478 print_ftrace_graph_addr(unsigned long addr, void *data,
14479 const struct stacktrace_ops *ops,
14480 - struct thread_info *tinfo, int *graph)
14481 + struct task_struct *task, int *graph)
14482 {
14483 - struct task_struct *task = tinfo->task;
14484 unsigned long ret_addr;
14485 int index = task->curr_ret_stack;
14486
14487 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14488 static inline void
14489 print_ftrace_graph_addr(unsigned long addr, void *data,
14490 const struct stacktrace_ops *ops,
14491 - struct thread_info *tinfo, int *graph)
14492 + struct task_struct *task, int *graph)
14493 { }
14494 #endif
14495
14496 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14497 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14498 */
14499
14500 -static inline int valid_stack_ptr(struct thread_info *tinfo,
14501 - void *p, unsigned int size, void *end)
14502 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14503 {
14504 - void *t = tinfo;
14505 if (end) {
14506 if (p < end && p >= (end-THREAD_SIZE))
14507 return 1;
14508 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14509 }
14510
14511 unsigned long
14512 -print_context_stack(struct thread_info *tinfo,
14513 +print_context_stack(struct task_struct *task, void *stack_start,
14514 unsigned long *stack, unsigned long bp,
14515 const struct stacktrace_ops *ops, void *data,
14516 unsigned long *end, int *graph)
14517 {
14518 struct stack_frame *frame = (struct stack_frame *)bp;
14519
14520 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14521 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14522 unsigned long addr;
14523
14524 addr = *stack;
14525 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
14526 } else {
14527 ops->address(data, addr, 0);
14528 }
14529 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14530 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14531 }
14532 stack++;
14533 }
14534 @@ -180,7 +180,7 @@ void dump_stack(void)
14535 #endif
14536
14537 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14538 - current->pid, current->comm, print_tainted(),
14539 + task_pid_nr(current), current->comm, print_tainted(),
14540 init_utsname()->release,
14541 (int)strcspn(init_utsname()->version, " "),
14542 init_utsname()->version);
14543 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
14544 return flags;
14545 }
14546
14547 +extern void gr_handle_kernel_exploit(void);
14548 +
14549 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14550 {
14551 if (regs && kexec_should_crash(current))
14552 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14553 panic("Fatal exception in interrupt");
14554 if (panic_on_oops)
14555 panic("Fatal exception");
14556 - do_exit(signr);
14557 +
14558 + gr_handle_kernel_exploit();
14559 +
14560 + do_group_exit(signr);
14561 }
14562
14563 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14564 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14565 unsigned long flags = oops_begin();
14566 int sig = SIGSEGV;
14567
14568 - if (!user_mode_vm(regs))
14569 + if (!user_mode(regs))
14570 report_bug(regs->ip, regs);
14571
14572 if (__die(str, regs, err))
14573 diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
14574 index 81086c2..13e8b17 100644
14575 --- a/arch/x86/kernel/dumpstack.h
14576 +++ b/arch/x86/kernel/dumpstack.h
14577 @@ -15,7 +15,7 @@
14578 #endif
14579
14580 extern unsigned long
14581 -print_context_stack(struct thread_info *tinfo,
14582 +print_context_stack(struct task_struct *task, void *stack_start,
14583 unsigned long *stack, unsigned long bp,
14584 const struct stacktrace_ops *ops, void *data,
14585 unsigned long *end, int *graph);
14586 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14587 index f7dd2a7..504f53b 100644
14588 --- a/arch/x86/kernel/dumpstack_32.c
14589 +++ b/arch/x86/kernel/dumpstack_32.c
14590 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14591 #endif
14592
14593 for (;;) {
14594 - struct thread_info *context;
14595 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14596 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14597
14598 - context = (struct thread_info *)
14599 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14600 - bp = print_context_stack(context, stack, bp, ops,
14601 - data, NULL, &graph);
14602 -
14603 - stack = (unsigned long *)context->previous_esp;
14604 - if (!stack)
14605 + if (stack_start == task_stack_page(task))
14606 break;
14607 + stack = *(unsigned long **)stack_start;
14608 if (ops->stack(data, "IRQ") < 0)
14609 break;
14610 touch_nmi_watchdog();
14611 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
14612 * When in-kernel, we also print out the stack and code at the
14613 * time of the fault..
14614 */
14615 - if (!user_mode_vm(regs)) {
14616 + if (!user_mode(regs)) {
14617 unsigned int code_prologue = code_bytes * 43 / 64;
14618 unsigned int code_len = code_bytes;
14619 unsigned char c;
14620 u8 *ip;
14621 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14622
14623 printk(KERN_EMERG "Stack:\n");
14624 show_stack_log_lvl(NULL, regs, &regs->sp,
14625 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
14626
14627 printk(KERN_EMERG "Code: ");
14628
14629 - ip = (u8 *)regs->ip - code_prologue;
14630 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14631 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14632 /* try starting at IP */
14633 - ip = (u8 *)regs->ip;
14634 + ip = (u8 *)regs->ip + cs_base;
14635 code_len = code_len - code_prologue + 1;
14636 }
14637 for (i = 0; i < code_len; i++, ip++) {
14638 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
14639 printk(" Bad EIP value.");
14640 break;
14641 }
14642 - if (ip == (u8 *)regs->ip)
14643 + if (ip == (u8 *)regs->ip + cs_base)
14644 printk("<%02x> ", c);
14645 else
14646 printk("%02x ", c);
14647 @@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
14648 printk("\n");
14649 }
14650
14651 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14652 +void pax_check_alloca(unsigned long size)
14653 +{
14654 + unsigned long sp = (unsigned long)&sp, stack_left;
14655 +
14656 + /* all kernel stacks are of the same size */
14657 + stack_left = sp & (THREAD_SIZE - 1);
14658 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14659 +}
14660 +EXPORT_SYMBOL(pax_check_alloca);
14661 +#endif
14662 +
14663 int is_valid_bugaddr(unsigned long ip)
14664 {
14665 unsigned short ud2;
14666
14667 + ip = ktla_ktva(ip);
14668 if (ip < PAGE_OFFSET)
14669 return 0;
14670 if (probe_kernel_address((unsigned short *)ip, ud2))
14671 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14672 index a071e6b..36cd585 100644
14673 --- a/arch/x86/kernel/dumpstack_64.c
14674 +++ b/arch/x86/kernel/dumpstack_64.c
14675 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14676 unsigned long *irq_stack_end =
14677 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14678 unsigned used = 0;
14679 - struct thread_info *tinfo;
14680 int graph = 0;
14681 + void *stack_start;
14682
14683 if (!task)
14684 task = current;
14685 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14686 * current stack address. If the stacks consist of nested
14687 * exceptions
14688 */
14689 - tinfo = task_thread_info(task);
14690 for (;;) {
14691 char *id;
14692 unsigned long *estack_end;
14693 +
14694 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14695 &used, &id);
14696
14697 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14698 if (ops->stack(data, id) < 0)
14699 break;
14700
14701 - bp = print_context_stack(tinfo, stack, bp, ops,
14702 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14703 data, estack_end, &graph);
14704 ops->stack(data, "<EOE>");
14705 /*
14706 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14707 if (stack >= irq_stack && stack < irq_stack_end) {
14708 if (ops->stack(data, "IRQ") < 0)
14709 break;
14710 - bp = print_context_stack(tinfo, stack, bp,
14711 + bp = print_context_stack(task, irq_stack, stack, bp,
14712 ops, data, irq_stack_end, &graph);
14713 /*
14714 * We link to the next stack (which would be
14715 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14716 /*
14717 * This handles the process stack:
14718 */
14719 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14720 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14721 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14722 put_cpu();
14723 }
14724 EXPORT_SYMBOL(dump_trace);
14725 @@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
14726 return ud2 == 0x0b0f;
14727 }
14728
14729 +
14730 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14731 +void pax_check_alloca(unsigned long size)
14732 +{
14733 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14734 + unsigned cpu, used;
14735 + char *id;
14736 +
14737 + /* check the process stack first */
14738 + stack_start = (unsigned long)task_stack_page(current);
14739 + stack_end = stack_start + THREAD_SIZE;
14740 + if (likely(stack_start <= sp && sp < stack_end)) {
14741 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14742 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14743 + return;
14744 + }
14745 +
14746 + cpu = get_cpu();
14747 +
14748 + /* check the irq stacks */
14749 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14750 + stack_start = stack_end - IRQ_STACK_SIZE;
14751 + if (stack_start <= sp && sp < stack_end) {
14752 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14753 + put_cpu();
14754 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14755 + return;
14756 + }
14757 +
14758 + /* check the exception stacks */
14759 + used = 0;
14760 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14761 + stack_start = stack_end - EXCEPTION_STKSZ;
14762 + if (stack_end && stack_start <= sp && sp < stack_end) {
14763 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14764 + put_cpu();
14765 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14766 + return;
14767 + }
14768 +
14769 + put_cpu();
14770 +
14771 + /* unknown stack */
14772 + BUG();
14773 +}
14774 +EXPORT_SYMBOL(pax_check_alloca);
14775 +#endif
14776 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
14777 index a89739a..95e0c48 100644
14778 --- a/arch/x86/kernel/e820.c
14779 +++ b/arch/x86/kernel/e820.c
14780 @@ -733,7 +733,7 @@ struct early_res {
14781 };
14782 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
14783 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
14784 - {}
14785 + { 0, 0, {0}, 0 }
14786 };
14787
14788 static int __init find_overlapped_early(u64 start, u64 end)
14789 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14790 index b9c830c..1e41a96 100644
14791 --- a/arch/x86/kernel/early_printk.c
14792 +++ b/arch/x86/kernel/early_printk.c
14793 @@ -7,6 +7,7 @@
14794 #include <linux/pci_regs.h>
14795 #include <linux/pci_ids.h>
14796 #include <linux/errno.h>
14797 +#include <linux/sched.h>
14798 #include <asm/io.h>
14799 #include <asm/processor.h>
14800 #include <asm/fcntl.h>
14801 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
14802 int n;
14803 va_list ap;
14804
14805 + pax_track_stack();
14806 +
14807 va_start(ap, fmt);
14808 n = vscnprintf(buf, sizeof(buf), fmt, ap);
14809 early_console->write(early_console, buf, n);
14810 diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
14811 index 5cab48e..b025f9b 100644
14812 --- a/arch/x86/kernel/efi_32.c
14813 +++ b/arch/x86/kernel/efi_32.c
14814 @@ -38,70 +38,56 @@
14815 */
14816
14817 static unsigned long efi_rt_eflags;
14818 -static pgd_t efi_bak_pg_dir_pointer[2];
14819 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
14820
14821 -void efi_call_phys_prelog(void)
14822 +void __init efi_call_phys_prelog(void)
14823 {
14824 - unsigned long cr4;
14825 - unsigned long temp;
14826 struct desc_ptr gdt_descr;
14827
14828 +#ifdef CONFIG_PAX_KERNEXEC
14829 + struct desc_struct d;
14830 +#endif
14831 +
14832 local_irq_save(efi_rt_eflags);
14833
14834 - /*
14835 - * If I don't have PAE, I should just duplicate two entries in page
14836 - * directory. If I have PAE, I just need to duplicate one entry in
14837 - * page directory.
14838 - */
14839 - cr4 = read_cr4_safe();
14840 -
14841 - if (cr4 & X86_CR4_PAE) {
14842 - efi_bak_pg_dir_pointer[0].pgd =
14843 - swapper_pg_dir[pgd_index(0)].pgd;
14844 - swapper_pg_dir[0].pgd =
14845 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14846 - } else {
14847 - efi_bak_pg_dir_pointer[0].pgd =
14848 - swapper_pg_dir[pgd_index(0)].pgd;
14849 - efi_bak_pg_dir_pointer[1].pgd =
14850 - swapper_pg_dir[pgd_index(0x400000)].pgd;
14851 - swapper_pg_dir[pgd_index(0)].pgd =
14852 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14853 - temp = PAGE_OFFSET + 0x400000;
14854 - swapper_pg_dir[pgd_index(0x400000)].pgd =
14855 - swapper_pg_dir[pgd_index(temp)].pgd;
14856 - }
14857 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
14858 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14859 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
14860
14861 /*
14862 * After the lock is released, the original page table is restored.
14863 */
14864 __flush_tlb_all();
14865
14866 +#ifdef CONFIG_PAX_KERNEXEC
14867 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
14868 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14869 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
14870 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14871 +#endif
14872 +
14873 gdt_descr.address = __pa(get_cpu_gdt_table(0));
14874 gdt_descr.size = GDT_SIZE - 1;
14875 load_gdt(&gdt_descr);
14876 }
14877
14878 -void efi_call_phys_epilog(void)
14879 +void __init efi_call_phys_epilog(void)
14880 {
14881 - unsigned long cr4;
14882 struct desc_ptr gdt_descr;
14883
14884 +#ifdef CONFIG_PAX_KERNEXEC
14885 + struct desc_struct d;
14886 +
14887 + memset(&d, 0, sizeof d);
14888 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14889 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14890 +#endif
14891 +
14892 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
14893 gdt_descr.size = GDT_SIZE - 1;
14894 load_gdt(&gdt_descr);
14895
14896 - cr4 = read_cr4_safe();
14897 -
14898 - if (cr4 & X86_CR4_PAE) {
14899 - swapper_pg_dir[pgd_index(0)].pgd =
14900 - efi_bak_pg_dir_pointer[0].pgd;
14901 - } else {
14902 - swapper_pg_dir[pgd_index(0)].pgd =
14903 - efi_bak_pg_dir_pointer[0].pgd;
14904 - swapper_pg_dir[pgd_index(0x400000)].pgd =
14905 - efi_bak_pg_dir_pointer[1].pgd;
14906 - }
14907 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
14908
14909 /*
14910 * After the lock is released, the original page table is restored.
14911 diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
14912 index fbe66e6..c5c0dd2 100644
14913 --- a/arch/x86/kernel/efi_stub_32.S
14914 +++ b/arch/x86/kernel/efi_stub_32.S
14915 @@ -6,7 +6,9 @@
14916 */
14917
14918 #include <linux/linkage.h>
14919 +#include <linux/init.h>
14920 #include <asm/page_types.h>
14921 +#include <asm/segment.h>
14922
14923 /*
14924 * efi_call_phys(void *, ...) is a function with variable parameters.
14925 @@ -20,7 +22,7 @@
14926 * service functions will comply with gcc calling convention, too.
14927 */
14928
14929 -.text
14930 +__INIT
14931 ENTRY(efi_call_phys)
14932 /*
14933 * 0. The function can only be called in Linux kernel. So CS has been
14934 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
14935 * The mapping of lower virtual memory has been created in prelog and
14936 * epilog.
14937 */
14938 - movl $1f, %edx
14939 - subl $__PAGE_OFFSET, %edx
14940 - jmp *%edx
14941 + movl $(__KERNEXEC_EFI_DS), %edx
14942 + mov %edx, %ds
14943 + mov %edx, %es
14944 + mov %edx, %ss
14945 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
14946 1:
14947
14948 /*
14949 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
14950 * parameter 2, ..., param n. To make things easy, we save the return
14951 * address of efi_call_phys in a global variable.
14952 */
14953 - popl %edx
14954 - movl %edx, saved_return_addr
14955 - /* get the function pointer into ECX*/
14956 - popl %ecx
14957 - movl %ecx, efi_rt_function_ptr
14958 - movl $2f, %edx
14959 - subl $__PAGE_OFFSET, %edx
14960 - pushl %edx
14961 + popl (saved_return_addr)
14962 + popl (efi_rt_function_ptr)
14963
14964 /*
14965 * 3. Clear PG bit in %CR0.
14966 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
14967 /*
14968 * 5. Call the physical function.
14969 */
14970 - jmp *%ecx
14971 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
14972
14973 -2:
14974 /*
14975 * 6. After EFI runtime service returns, control will return to
14976 * following instruction. We'd better readjust stack pointer first.
14977 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
14978 movl %cr0, %edx
14979 orl $0x80000000, %edx
14980 movl %edx, %cr0
14981 - jmp 1f
14982 -1:
14983 +
14984 /*
14985 * 8. Now restore the virtual mode from flat mode by
14986 * adding EIP with PAGE_OFFSET.
14987 */
14988 - movl $1f, %edx
14989 - jmp *%edx
14990 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
14991 1:
14992 + movl $(__KERNEL_DS), %edx
14993 + mov %edx, %ds
14994 + mov %edx, %es
14995 + mov %edx, %ss
14996
14997 /*
14998 * 9. Balance the stack. And because EAX contain the return value,
14999 * we'd better not clobber it.
15000 */
15001 - leal efi_rt_function_ptr, %edx
15002 - movl (%edx), %ecx
15003 - pushl %ecx
15004 + pushl (efi_rt_function_ptr)
15005
15006 /*
15007 - * 10. Push the saved return address onto the stack and return.
15008 + * 10. Return to the saved return address.
15009 */
15010 - leal saved_return_addr, %edx
15011 - movl (%edx), %ecx
15012 - pushl %ecx
15013 - ret
15014 + jmpl *(saved_return_addr)
15015 ENDPROC(efi_call_phys)
15016 .previous
15017
15018 -.data
15019 +__INITDATA
15020 saved_return_addr:
15021 .long 0
15022 efi_rt_function_ptr:
15023 diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
15024 index 4c07cca..2c8427d 100644
15025 --- a/arch/x86/kernel/efi_stub_64.S
15026 +++ b/arch/x86/kernel/efi_stub_64.S
15027 @@ -7,6 +7,7 @@
15028 */
15029
15030 #include <linux/linkage.h>
15031 +#include <asm/alternative-asm.h>
15032
15033 #define SAVE_XMM \
15034 mov %rsp, %rax; \
15035 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
15036 call *%rdi
15037 addq $32, %rsp
15038 RESTORE_XMM
15039 + pax_force_retaddr 0, 1
15040 ret
15041 ENDPROC(efi_call0)
15042
15043 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
15044 call *%rdi
15045 addq $32, %rsp
15046 RESTORE_XMM
15047 + pax_force_retaddr 0, 1
15048 ret
15049 ENDPROC(efi_call1)
15050
15051 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
15052 call *%rdi
15053 addq $32, %rsp
15054 RESTORE_XMM
15055 + pax_force_retaddr 0, 1
15056 ret
15057 ENDPROC(efi_call2)
15058
15059 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
15060 call *%rdi
15061 addq $32, %rsp
15062 RESTORE_XMM
15063 + pax_force_retaddr 0, 1
15064 ret
15065 ENDPROC(efi_call3)
15066
15067 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
15068 call *%rdi
15069 addq $32, %rsp
15070 RESTORE_XMM
15071 + pax_force_retaddr 0, 1
15072 ret
15073 ENDPROC(efi_call4)
15074
15075 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
15076 call *%rdi
15077 addq $48, %rsp
15078 RESTORE_XMM
15079 + pax_force_retaddr 0, 1
15080 ret
15081 ENDPROC(efi_call5)
15082
15083 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
15084 call *%rdi
15085 addq $48, %rsp
15086 RESTORE_XMM
15087 + pax_force_retaddr 0, 1
15088 ret
15089 ENDPROC(efi_call6)
15090 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
15091 index c097e7d..c689cf4 100644
15092 --- a/arch/x86/kernel/entry_32.S
15093 +++ b/arch/x86/kernel/entry_32.S
15094 @@ -185,13 +185,146 @@
15095 /*CFI_REL_OFFSET gs, PT_GS*/
15096 .endm
15097 .macro SET_KERNEL_GS reg
15098 +
15099 +#ifdef CONFIG_CC_STACKPROTECTOR
15100 movl $(__KERNEL_STACK_CANARY), \reg
15101 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
15102 + movl $(__USER_DS), \reg
15103 +#else
15104 + xorl \reg, \reg
15105 +#endif
15106 +
15107 movl \reg, %gs
15108 .endm
15109
15110 #endif /* CONFIG_X86_32_LAZY_GS */
15111
15112 -.macro SAVE_ALL
15113 +.macro pax_enter_kernel
15114 +#ifdef CONFIG_PAX_KERNEXEC
15115 + call pax_enter_kernel
15116 +#endif
15117 +.endm
15118 +
15119 +.macro pax_exit_kernel
15120 +#ifdef CONFIG_PAX_KERNEXEC
15121 + call pax_exit_kernel
15122 +#endif
15123 +.endm
15124 +
15125 +#ifdef CONFIG_PAX_KERNEXEC
15126 +ENTRY(pax_enter_kernel)
15127 +#ifdef CONFIG_PARAVIRT
15128 + pushl %eax
15129 + pushl %ecx
15130 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
15131 + mov %eax, %esi
15132 +#else
15133 + mov %cr0, %esi
15134 +#endif
15135 + bts $16, %esi
15136 + jnc 1f
15137 + mov %cs, %esi
15138 + cmp $__KERNEL_CS, %esi
15139 + jz 3f
15140 + ljmp $__KERNEL_CS, $3f
15141 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
15142 +2:
15143 +#ifdef CONFIG_PARAVIRT
15144 + mov %esi, %eax
15145 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
15146 +#else
15147 + mov %esi, %cr0
15148 +#endif
15149 +3:
15150 +#ifdef CONFIG_PARAVIRT
15151 + popl %ecx
15152 + popl %eax
15153 +#endif
15154 + ret
15155 +ENDPROC(pax_enter_kernel)
15156 +
15157 +ENTRY(pax_exit_kernel)
15158 +#ifdef CONFIG_PARAVIRT
15159 + pushl %eax
15160 + pushl %ecx
15161 +#endif
15162 + mov %cs, %esi
15163 + cmp $__KERNEXEC_KERNEL_CS, %esi
15164 + jnz 2f
15165 +#ifdef CONFIG_PARAVIRT
15166 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
15167 + mov %eax, %esi
15168 +#else
15169 + mov %cr0, %esi
15170 +#endif
15171 + btr $16, %esi
15172 + ljmp $__KERNEL_CS, $1f
15173 +1:
15174 +#ifdef CONFIG_PARAVIRT
15175 + mov %esi, %eax
15176 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
15177 +#else
15178 + mov %esi, %cr0
15179 +#endif
15180 +2:
15181 +#ifdef CONFIG_PARAVIRT
15182 + popl %ecx
15183 + popl %eax
15184 +#endif
15185 + ret
15186 +ENDPROC(pax_exit_kernel)
15187 +#endif
15188 +
15189 +.macro pax_erase_kstack
15190 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15191 + call pax_erase_kstack
15192 +#endif
15193 +.endm
15194 +
15195 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15196 +/*
15197 + * ebp: thread_info
15198 + * ecx, edx: can be clobbered
15199 + */
15200 +ENTRY(pax_erase_kstack)
15201 + pushl %edi
15202 + pushl %eax
15203 +
15204 + mov TI_lowest_stack(%ebp), %edi
15205 + mov $-0xBEEF, %eax
15206 + std
15207 +
15208 +1: mov %edi, %ecx
15209 + and $THREAD_SIZE_asm - 1, %ecx
15210 + shr $2, %ecx
15211 + repne scasl
15212 + jecxz 2f
15213 +
15214 + cmp $2*16, %ecx
15215 + jc 2f
15216 +
15217 + mov $2*16, %ecx
15218 + repe scasl
15219 + jecxz 2f
15220 + jne 1b
15221 +
15222 +2: cld
15223 + mov %esp, %ecx
15224 + sub %edi, %ecx
15225 + shr $2, %ecx
15226 + rep stosl
15227 +
15228 + mov TI_task_thread_sp0(%ebp), %edi
15229 + sub $128, %edi
15230 + mov %edi, TI_lowest_stack(%ebp)
15231 +
15232 + popl %eax
15233 + popl %edi
15234 + ret
15235 +ENDPROC(pax_erase_kstack)
15236 +#endif
15237 +
15238 +.macro __SAVE_ALL _DS
15239 cld
15240 PUSH_GS
15241 pushl %fs
15242 @@ -224,7 +357,7 @@
15243 pushl %ebx
15244 CFI_ADJUST_CFA_OFFSET 4
15245 CFI_REL_OFFSET ebx, 0
15246 - movl $(__USER_DS), %edx
15247 + movl $\_DS, %edx
15248 movl %edx, %ds
15249 movl %edx, %es
15250 movl $(__KERNEL_PERCPU), %edx
15251 @@ -232,6 +365,15 @@
15252 SET_KERNEL_GS %edx
15253 .endm
15254
15255 +.macro SAVE_ALL
15256 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15257 + __SAVE_ALL __KERNEL_DS
15258 + pax_enter_kernel
15259 +#else
15260 + __SAVE_ALL __USER_DS
15261 +#endif
15262 +.endm
15263 +
15264 .macro RESTORE_INT_REGS
15265 popl %ebx
15266 CFI_ADJUST_CFA_OFFSET -4
15267 @@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
15268 CFI_ADJUST_CFA_OFFSET -4
15269 jmp syscall_exit
15270 CFI_ENDPROC
15271 -END(ret_from_fork)
15272 +ENDPROC(ret_from_fork)
15273
15274 /*
15275 * Return to user mode is not as complex as all this looks,
15276 @@ -352,7 +494,15 @@ check_userspace:
15277 movb PT_CS(%esp), %al
15278 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
15279 cmpl $USER_RPL, %eax
15280 +
15281 +#ifdef CONFIG_PAX_KERNEXEC
15282 + jae resume_userspace
15283 +
15284 + PAX_EXIT_KERNEL
15285 + jmp resume_kernel
15286 +#else
15287 jb resume_kernel # not returning to v8086 or userspace
15288 +#endif
15289
15290 ENTRY(resume_userspace)
15291 LOCKDEP_SYS_EXIT
15292 @@ -364,8 +514,8 @@ ENTRY(resume_userspace)
15293 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15294 # int/exception return?
15295 jne work_pending
15296 - jmp restore_all
15297 -END(ret_from_exception)
15298 + jmp restore_all_pax
15299 +ENDPROC(ret_from_exception)
15300
15301 #ifdef CONFIG_PREEMPT
15302 ENTRY(resume_kernel)
15303 @@ -380,7 +530,7 @@ need_resched:
15304 jz restore_all
15305 call preempt_schedule_irq
15306 jmp need_resched
15307 -END(resume_kernel)
15308 +ENDPROC(resume_kernel)
15309 #endif
15310 CFI_ENDPROC
15311
15312 @@ -414,25 +564,36 @@ sysenter_past_esp:
15313 /*CFI_REL_OFFSET cs, 0*/
15314 /*
15315 * Push current_thread_info()->sysenter_return to the stack.
15316 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15317 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
15318 */
15319 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
15320 + pushl $0
15321 CFI_ADJUST_CFA_OFFSET 4
15322 CFI_REL_OFFSET eip, 0
15323
15324 pushl %eax
15325 CFI_ADJUST_CFA_OFFSET 4
15326 SAVE_ALL
15327 + GET_THREAD_INFO(%ebp)
15328 + movl TI_sysenter_return(%ebp),%ebp
15329 + movl %ebp,PT_EIP(%esp)
15330 ENABLE_INTERRUPTS(CLBR_NONE)
15331
15332 /*
15333 * Load the potential sixth argument from user stack.
15334 * Careful about security.
15335 */
15336 + movl PT_OLDESP(%esp),%ebp
15337 +
15338 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15339 + mov PT_OLDSS(%esp),%ds
15340 +1: movl %ds:(%ebp),%ebp
15341 + push %ss
15342 + pop %ds
15343 +#else
15344 cmpl $__PAGE_OFFSET-3,%ebp
15345 jae syscall_fault
15346 1: movl (%ebp),%ebp
15347 +#endif
15348 +
15349 movl %ebp,PT_EBP(%esp)
15350 .section __ex_table,"a"
15351 .align 4
15352 @@ -455,12 +616,24 @@ sysenter_do_call:
15353 testl $_TIF_ALLWORK_MASK, %ecx
15354 jne sysexit_audit
15355 sysenter_exit:
15356 +
15357 +#ifdef CONFIG_PAX_RANDKSTACK
15358 + pushl_cfi %eax
15359 + movl %esp, %eax
15360 + call pax_randomize_kstack
15361 + popl_cfi %eax
15362 +#endif
15363 +
15364 + pax_erase_kstack
15365 +
15366 /* if something modifies registers it must also disable sysexit */
15367 movl PT_EIP(%esp), %edx
15368 movl PT_OLDESP(%esp), %ecx
15369 xorl %ebp,%ebp
15370 TRACE_IRQS_ON
15371 1: mov PT_FS(%esp), %fs
15372 +2: mov PT_DS(%esp), %ds
15373 +3: mov PT_ES(%esp), %es
15374 PTGS_TO_GS
15375 ENABLE_INTERRUPTS_SYSEXIT
15376
15377 @@ -477,6 +650,9 @@ sysenter_audit:
15378 movl %eax,%edx /* 2nd arg: syscall number */
15379 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15380 call audit_syscall_entry
15381 +
15382 + pax_erase_kstack
15383 +
15384 pushl %ebx
15385 CFI_ADJUST_CFA_OFFSET 4
15386 movl PT_EAX(%esp),%eax /* reload syscall number */
15387 @@ -504,11 +680,17 @@ sysexit_audit:
15388
15389 CFI_ENDPROC
15390 .pushsection .fixup,"ax"
15391 -2: movl $0,PT_FS(%esp)
15392 +4: movl $0,PT_FS(%esp)
15393 + jmp 1b
15394 +5: movl $0,PT_DS(%esp)
15395 + jmp 1b
15396 +6: movl $0,PT_ES(%esp)
15397 jmp 1b
15398 .section __ex_table,"a"
15399 .align 4
15400 - .long 1b,2b
15401 + .long 1b,4b
15402 + .long 2b,5b
15403 + .long 3b,6b
15404 .popsection
15405 PTGS_TO_GS_EX
15406 ENDPROC(ia32_sysenter_target)
15407 @@ -538,6 +720,15 @@ syscall_exit:
15408 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15409 jne syscall_exit_work
15410
15411 +restore_all_pax:
15412 +
15413 +#ifdef CONFIG_PAX_RANDKSTACK
15414 + movl %esp, %eax
15415 + call pax_randomize_kstack
15416 +#endif
15417 +
15418 + pax_erase_kstack
15419 +
15420 restore_all:
15421 TRACE_IRQS_IRET
15422 restore_all_notrace:
15423 @@ -602,10 +793,29 @@ ldt_ss:
15424 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15425 mov %dx, %ax /* eax: new kernel esp */
15426 sub %eax, %edx /* offset (low word is 0) */
15427 - PER_CPU(gdt_page, %ebx)
15428 +#ifdef CONFIG_SMP
15429 + movl PER_CPU_VAR(cpu_number), %ebx
15430 + shll $PAGE_SHIFT_asm, %ebx
15431 + addl $cpu_gdt_table, %ebx
15432 +#else
15433 + movl $cpu_gdt_table, %ebx
15434 +#endif
15435 shr $16, %edx
15436 +
15437 +#ifdef CONFIG_PAX_KERNEXEC
15438 + mov %cr0, %esi
15439 + btr $16, %esi
15440 + mov %esi, %cr0
15441 +#endif
15442 +
15443 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
15444 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
15445 +
15446 +#ifdef CONFIG_PAX_KERNEXEC
15447 + bts $16, %esi
15448 + mov %esi, %cr0
15449 +#endif
15450 +
15451 pushl $__ESPFIX_SS
15452 CFI_ADJUST_CFA_OFFSET 4
15453 push %eax /* new kernel esp */
15454 @@ -636,36 +846,30 @@ work_resched:
15455 movl TI_flags(%ebp), %ecx
15456 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15457 # than syscall tracing?
15458 - jz restore_all
15459 + jz restore_all_pax
15460 testb $_TIF_NEED_RESCHED, %cl
15461 jnz work_resched
15462
15463 work_notifysig: # deal with pending signals and
15464 # notify-resume requests
15465 + movl %esp, %eax
15466 #ifdef CONFIG_VM86
15467 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15468 - movl %esp, %eax
15469 - jne work_notifysig_v86 # returning to kernel-space or
15470 + jz 1f # returning to kernel-space or
15471 # vm86-space
15472 - xorl %edx, %edx
15473 - call do_notify_resume
15474 - jmp resume_userspace_sig
15475
15476 - ALIGN
15477 -work_notifysig_v86:
15478 pushl %ecx # save ti_flags for do_notify_resume
15479 CFI_ADJUST_CFA_OFFSET 4
15480 call save_v86_state # %eax contains pt_regs pointer
15481 popl %ecx
15482 CFI_ADJUST_CFA_OFFSET -4
15483 movl %eax, %esp
15484 -#else
15485 - movl %esp, %eax
15486 +1:
15487 #endif
15488 xorl %edx, %edx
15489 call do_notify_resume
15490 jmp resume_userspace_sig
15491 -END(work_pending)
15492 +ENDPROC(work_pending)
15493
15494 # perform syscall exit tracing
15495 ALIGN
15496 @@ -673,11 +877,14 @@ syscall_trace_entry:
15497 movl $-ENOSYS,PT_EAX(%esp)
15498 movl %esp, %eax
15499 call syscall_trace_enter
15500 +
15501 + pax_erase_kstack
15502 +
15503 /* What it returned is what we'll actually use. */
15504 cmpl $(nr_syscalls), %eax
15505 jnae syscall_call
15506 jmp syscall_exit
15507 -END(syscall_trace_entry)
15508 +ENDPROC(syscall_trace_entry)
15509
15510 # perform syscall exit tracing
15511 ALIGN
15512 @@ -690,20 +897,24 @@ syscall_exit_work:
15513 movl %esp, %eax
15514 call syscall_trace_leave
15515 jmp resume_userspace
15516 -END(syscall_exit_work)
15517 +ENDPROC(syscall_exit_work)
15518 CFI_ENDPROC
15519
15520 RING0_INT_FRAME # can't unwind into user space anyway
15521 syscall_fault:
15522 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15523 + push %ss
15524 + pop %ds
15525 +#endif
15526 GET_THREAD_INFO(%ebp)
15527 movl $-EFAULT,PT_EAX(%esp)
15528 jmp resume_userspace
15529 -END(syscall_fault)
15530 +ENDPROC(syscall_fault)
15531
15532 syscall_badsys:
15533 movl $-ENOSYS,PT_EAX(%esp)
15534 jmp resume_userspace
15535 -END(syscall_badsys)
15536 +ENDPROC(syscall_badsys)
15537 CFI_ENDPROC
15538
15539 /*
15540 @@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
15541 PTREGSCALL(vm86)
15542 PTREGSCALL(vm86old)
15543
15544 + ALIGN;
15545 +ENTRY(kernel_execve)
15546 + push %ebp
15547 + sub $PT_OLDSS+4,%esp
15548 + push %edi
15549 + push %ecx
15550 + push %eax
15551 + lea 3*4(%esp),%edi
15552 + mov $PT_OLDSS/4+1,%ecx
15553 + xorl %eax,%eax
15554 + rep stosl
15555 + pop %eax
15556 + pop %ecx
15557 + pop %edi
15558 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15559 + mov %eax,PT_EBX(%esp)
15560 + mov %edx,PT_ECX(%esp)
15561 + mov %ecx,PT_EDX(%esp)
15562 + mov %esp,%eax
15563 + call sys_execve
15564 + GET_THREAD_INFO(%ebp)
15565 + test %eax,%eax
15566 + jz syscall_exit
15567 + add $PT_OLDSS+4,%esp
15568 + pop %ebp
15569 + ret
15570 +
15571 .macro FIXUP_ESPFIX_STACK
15572 /*
15573 * Switch back for ESPFIX stack to the normal zerobased stack
15574 @@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
15575 * normal stack and adjusts ESP with the matching offset.
15576 */
15577 /* fixup the stack */
15578 - PER_CPU(gdt_page, %ebx)
15579 +#ifdef CONFIG_SMP
15580 + movl PER_CPU_VAR(cpu_number), %ebx
15581 + shll $PAGE_SHIFT_asm, %ebx
15582 + addl $cpu_gdt_table, %ebx
15583 +#else
15584 + movl $cpu_gdt_table, %ebx
15585 +#endif
15586 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
15587 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
15588 shl $16, %eax
15589 @@ -793,7 +1037,7 @@ vector=vector+1
15590 .endr
15591 2: jmp common_interrupt
15592 .endr
15593 -END(irq_entries_start)
15594 +ENDPROC(irq_entries_start)
15595
15596 .previous
15597 END(interrupt)
15598 @@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
15599 CFI_ADJUST_CFA_OFFSET 4
15600 jmp error_code
15601 CFI_ENDPROC
15602 -END(coprocessor_error)
15603 +ENDPROC(coprocessor_error)
15604
15605 ENTRY(simd_coprocessor_error)
15606 RING0_INT_FRAME
15607 @@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
15608 CFI_ADJUST_CFA_OFFSET 4
15609 jmp error_code
15610 CFI_ENDPROC
15611 -END(simd_coprocessor_error)
15612 +ENDPROC(simd_coprocessor_error)
15613
15614 ENTRY(device_not_available)
15615 RING0_INT_FRAME
15616 @@ -860,7 +1104,7 @@ ENTRY(device_not_available)
15617 CFI_ADJUST_CFA_OFFSET 4
15618 jmp error_code
15619 CFI_ENDPROC
15620 -END(device_not_available)
15621 +ENDPROC(device_not_available)
15622
15623 #ifdef CONFIG_PARAVIRT
15624 ENTRY(native_iret)
15625 @@ -869,12 +1113,12 @@ ENTRY(native_iret)
15626 .align 4
15627 .long native_iret, iret_exc
15628 .previous
15629 -END(native_iret)
15630 +ENDPROC(native_iret)
15631
15632 ENTRY(native_irq_enable_sysexit)
15633 sti
15634 sysexit
15635 -END(native_irq_enable_sysexit)
15636 +ENDPROC(native_irq_enable_sysexit)
15637 #endif
15638
15639 ENTRY(overflow)
15640 @@ -885,7 +1129,7 @@ ENTRY(overflow)
15641 CFI_ADJUST_CFA_OFFSET 4
15642 jmp error_code
15643 CFI_ENDPROC
15644 -END(overflow)
15645 +ENDPROC(overflow)
15646
15647 ENTRY(bounds)
15648 RING0_INT_FRAME
15649 @@ -895,7 +1139,7 @@ ENTRY(bounds)
15650 CFI_ADJUST_CFA_OFFSET 4
15651 jmp error_code
15652 CFI_ENDPROC
15653 -END(bounds)
15654 +ENDPROC(bounds)
15655
15656 ENTRY(invalid_op)
15657 RING0_INT_FRAME
15658 @@ -905,7 +1149,7 @@ ENTRY(invalid_op)
15659 CFI_ADJUST_CFA_OFFSET 4
15660 jmp error_code
15661 CFI_ENDPROC
15662 -END(invalid_op)
15663 +ENDPROC(invalid_op)
15664
15665 ENTRY(coprocessor_segment_overrun)
15666 RING0_INT_FRAME
15667 @@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
15668 CFI_ADJUST_CFA_OFFSET 4
15669 jmp error_code
15670 CFI_ENDPROC
15671 -END(coprocessor_segment_overrun)
15672 +ENDPROC(coprocessor_segment_overrun)
15673
15674 ENTRY(invalid_TSS)
15675 RING0_EC_FRAME
15676 @@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
15677 CFI_ADJUST_CFA_OFFSET 4
15678 jmp error_code
15679 CFI_ENDPROC
15680 -END(invalid_TSS)
15681 +ENDPROC(invalid_TSS)
15682
15683 ENTRY(segment_not_present)
15684 RING0_EC_FRAME
15685 @@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
15686 CFI_ADJUST_CFA_OFFSET 4
15687 jmp error_code
15688 CFI_ENDPROC
15689 -END(segment_not_present)
15690 +ENDPROC(segment_not_present)
15691
15692 ENTRY(stack_segment)
15693 RING0_EC_FRAME
15694 @@ -939,7 +1183,7 @@ ENTRY(stack_segment)
15695 CFI_ADJUST_CFA_OFFSET 4
15696 jmp error_code
15697 CFI_ENDPROC
15698 -END(stack_segment)
15699 +ENDPROC(stack_segment)
15700
15701 ENTRY(alignment_check)
15702 RING0_EC_FRAME
15703 @@ -947,7 +1191,7 @@ ENTRY(alignment_check)
15704 CFI_ADJUST_CFA_OFFSET 4
15705 jmp error_code
15706 CFI_ENDPROC
15707 -END(alignment_check)
15708 +ENDPROC(alignment_check)
15709
15710 ENTRY(divide_error)
15711 RING0_INT_FRAME
15712 @@ -957,7 +1201,7 @@ ENTRY(divide_error)
15713 CFI_ADJUST_CFA_OFFSET 4
15714 jmp error_code
15715 CFI_ENDPROC
15716 -END(divide_error)
15717 +ENDPROC(divide_error)
15718
15719 #ifdef CONFIG_X86_MCE
15720 ENTRY(machine_check)
15721 @@ -968,7 +1212,7 @@ ENTRY(machine_check)
15722 CFI_ADJUST_CFA_OFFSET 4
15723 jmp error_code
15724 CFI_ENDPROC
15725 -END(machine_check)
15726 +ENDPROC(machine_check)
15727 #endif
15728
15729 ENTRY(spurious_interrupt_bug)
15730 @@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
15731 CFI_ADJUST_CFA_OFFSET 4
15732 jmp error_code
15733 CFI_ENDPROC
15734 -END(spurious_interrupt_bug)
15735 +ENDPROC(spurious_interrupt_bug)
15736
15737 ENTRY(kernel_thread_helper)
15738 pushl $0 # fake return address for unwinder
15739 @@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
15740
15741 ENTRY(mcount)
15742 ret
15743 -END(mcount)
15744 +ENDPROC(mcount)
15745
15746 ENTRY(ftrace_caller)
15747 cmpl $0, function_trace_stop
15748 @@ -1124,7 +1368,7 @@ ftrace_graph_call:
15749 .globl ftrace_stub
15750 ftrace_stub:
15751 ret
15752 -END(ftrace_caller)
15753 +ENDPROC(ftrace_caller)
15754
15755 #else /* ! CONFIG_DYNAMIC_FTRACE */
15756
15757 @@ -1160,7 +1404,7 @@ trace:
15758 popl %ecx
15759 popl %eax
15760 jmp ftrace_stub
15761 -END(mcount)
15762 +ENDPROC(mcount)
15763 #endif /* CONFIG_DYNAMIC_FTRACE */
15764 #endif /* CONFIG_FUNCTION_TRACER */
15765
15766 @@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
15767 popl %ecx
15768 popl %eax
15769 ret
15770 -END(ftrace_graph_caller)
15771 +ENDPROC(ftrace_graph_caller)
15772
15773 .globl return_to_handler
15774 return_to_handler:
15775 @@ -1198,7 +1442,6 @@ return_to_handler:
15776 ret
15777 #endif
15778
15779 -.section .rodata,"a"
15780 #include "syscall_table_32.S"
15781
15782 syscall_table_size=(.-sys_call_table)
15783 @@ -1255,15 +1498,18 @@ error_code:
15784 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15785 REG_TO_PTGS %ecx
15786 SET_KERNEL_GS %ecx
15787 - movl $(__USER_DS), %ecx
15788 + movl $(__KERNEL_DS), %ecx
15789 movl %ecx, %ds
15790 movl %ecx, %es
15791 +
15792 + pax_enter_kernel
15793 +
15794 TRACE_IRQS_OFF
15795 movl %esp,%eax # pt_regs pointer
15796 call *%edi
15797 jmp ret_from_exception
15798 CFI_ENDPROC
15799 -END(page_fault)
15800 +ENDPROC(page_fault)
15801
15802 /*
15803 * Debug traps and NMI can happen at the one SYSENTER instruction
15804 @@ -1309,7 +1555,7 @@ debug_stack_correct:
15805 call do_debug
15806 jmp ret_from_exception
15807 CFI_ENDPROC
15808 -END(debug)
15809 +ENDPROC(debug)
15810
15811 /*
15812 * NMI is doubly nasty. It can happen _while_ we're handling
15813 @@ -1351,6 +1597,9 @@ nmi_stack_correct:
15814 xorl %edx,%edx # zero error code
15815 movl %esp,%eax # pt_regs pointer
15816 call do_nmi
15817 +
15818 + pax_exit_kernel
15819 +
15820 jmp restore_all_notrace
15821 CFI_ENDPROC
15822
15823 @@ -1391,12 +1640,15 @@ nmi_espfix_stack:
15824 FIXUP_ESPFIX_STACK # %eax == %esp
15825 xorl %edx,%edx # zero error code
15826 call do_nmi
15827 +
15828 + pax_exit_kernel
15829 +
15830 RESTORE_REGS
15831 lss 12+4(%esp), %esp # back to espfix stack
15832 CFI_ADJUST_CFA_OFFSET -24
15833 jmp irq_return
15834 CFI_ENDPROC
15835 -END(nmi)
15836 +ENDPROC(nmi)
15837
15838 ENTRY(int3)
15839 RING0_INT_FRAME
15840 @@ -1409,7 +1661,7 @@ ENTRY(int3)
15841 call do_int3
15842 jmp ret_from_exception
15843 CFI_ENDPROC
15844 -END(int3)
15845 +ENDPROC(int3)
15846
15847 ENTRY(general_protection)
15848 RING0_EC_FRAME
15849 @@ -1417,7 +1669,7 @@ ENTRY(general_protection)
15850 CFI_ADJUST_CFA_OFFSET 4
15851 jmp error_code
15852 CFI_ENDPROC
15853 -END(general_protection)
15854 +ENDPROC(general_protection)
15855
15856 /*
15857 * End of kprobes section
15858 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15859 index 34a56a9..87790b4 100644
15860 --- a/arch/x86/kernel/entry_64.S
15861 +++ b/arch/x86/kernel/entry_64.S
15862 @@ -53,6 +53,8 @@
15863 #include <asm/paravirt.h>
15864 #include <asm/ftrace.h>
15865 #include <asm/percpu.h>
15866 +#include <asm/pgtable.h>
15867 +#include <asm/alternative-asm.h>
15868
15869 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15870 #include <linux/elf-em.h>
15871 @@ -64,8 +66,9 @@
15872 #ifdef CONFIG_FUNCTION_TRACER
15873 #ifdef CONFIG_DYNAMIC_FTRACE
15874 ENTRY(mcount)
15875 + pax_force_retaddr
15876 retq
15877 -END(mcount)
15878 +ENDPROC(mcount)
15879
15880 ENTRY(ftrace_caller)
15881 cmpl $0, function_trace_stop
15882 @@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
15883 #endif
15884
15885 GLOBAL(ftrace_stub)
15886 + pax_force_retaddr
15887 retq
15888 -END(ftrace_caller)
15889 +ENDPROC(ftrace_caller)
15890
15891 #else /* ! CONFIG_DYNAMIC_FTRACE */
15892 ENTRY(mcount)
15893 @@ -108,6 +112,7 @@ ENTRY(mcount)
15894 #endif
15895
15896 GLOBAL(ftrace_stub)
15897 + pax_force_retaddr
15898 retq
15899
15900 trace:
15901 @@ -117,12 +122,13 @@ trace:
15902 movq 8(%rbp), %rsi
15903 subq $MCOUNT_INSN_SIZE, %rdi
15904
15905 + pax_force_fptr ftrace_trace_function
15906 call *ftrace_trace_function
15907
15908 MCOUNT_RESTORE_FRAME
15909
15910 jmp ftrace_stub
15911 -END(mcount)
15912 +ENDPROC(mcount)
15913 #endif /* CONFIG_DYNAMIC_FTRACE */
15914 #endif /* CONFIG_FUNCTION_TRACER */
15915
15916 @@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
15917
15918 MCOUNT_RESTORE_FRAME
15919
15920 + pax_force_retaddr
15921 retq
15922 -END(ftrace_graph_caller)
15923 +ENDPROC(ftrace_graph_caller)
15924
15925 GLOBAL(return_to_handler)
15926 subq $24, %rsp
15927 @@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
15928 movq 8(%rsp), %rdx
15929 movq (%rsp), %rax
15930 addq $16, %rsp
15931 + pax_force_retaddr
15932 retq
15933 #endif
15934
15935 @@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
15936 ENDPROC(native_usergs_sysret64)
15937 #endif /* CONFIG_PARAVIRT */
15938
15939 + .macro ljmpq sel, off
15940 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15941 + .byte 0x48; ljmp *1234f(%rip)
15942 + .pushsection .rodata
15943 + .align 16
15944 + 1234: .quad \off; .word \sel
15945 + .popsection
15946 +#else
15947 + pushq $\sel
15948 + pushq $\off
15949 + lretq
15950 +#endif
15951 + .endm
15952 +
15953 + .macro pax_enter_kernel
15954 + pax_set_fptr_mask
15955 +#ifdef CONFIG_PAX_KERNEXEC
15956 + call pax_enter_kernel
15957 +#endif
15958 + .endm
15959 +
15960 + .macro pax_exit_kernel
15961 +#ifdef CONFIG_PAX_KERNEXEC
15962 + call pax_exit_kernel
15963 +#endif
15964 + .endm
15965 +
15966 +#ifdef CONFIG_PAX_KERNEXEC
15967 +ENTRY(pax_enter_kernel)
15968 + pushq %rdi
15969 +
15970 +#ifdef CONFIG_PARAVIRT
15971 + PV_SAVE_REGS(CLBR_RDI)
15972 +#endif
15973 +
15974 + GET_CR0_INTO_RDI
15975 + bts $16,%rdi
15976 + jnc 3f
15977 + mov %cs,%edi
15978 + cmp $__KERNEL_CS,%edi
15979 + jnz 2f
15980 +1:
15981 +
15982 +#ifdef CONFIG_PARAVIRT
15983 + PV_RESTORE_REGS(CLBR_RDI)
15984 +#endif
15985 +
15986 + popq %rdi
15987 + pax_force_retaddr
15988 + retq
15989 +
15990 +2: ljmpq __KERNEL_CS,1f
15991 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15992 +4: SET_RDI_INTO_CR0
15993 + jmp 1b
15994 +ENDPROC(pax_enter_kernel)
15995 +
15996 +ENTRY(pax_exit_kernel)
15997 + pushq %rdi
15998 +
15999 +#ifdef CONFIG_PARAVIRT
16000 + PV_SAVE_REGS(CLBR_RDI)
16001 +#endif
16002 +
16003 + mov %cs,%rdi
16004 + cmp $__KERNEXEC_KERNEL_CS,%edi
16005 + jz 2f
16006 +1:
16007 +
16008 +#ifdef CONFIG_PARAVIRT
16009 + PV_RESTORE_REGS(CLBR_RDI);
16010 +#endif
16011 +
16012 + popq %rdi
16013 + pax_force_retaddr
16014 + retq
16015 +
16016 +2: GET_CR0_INTO_RDI
16017 + btr $16,%rdi
16018 + ljmpq __KERNEL_CS,3f
16019 +3: SET_RDI_INTO_CR0
16020 + jmp 1b
16021 +#ifdef CONFIG_PARAVIRT
16022 + PV_RESTORE_REGS(CLBR_RDI);
16023 +#endif
16024 +
16025 + popq %rdi
16026 + pax_force_retaddr
16027 + retq
16028 +ENDPROC(pax_exit_kernel)
16029 +#endif
16030 +
16031 + .macro pax_enter_kernel_user
16032 + pax_set_fptr_mask
16033 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16034 + call pax_enter_kernel_user
16035 +#endif
16036 + .endm
16037 +
16038 + .macro pax_exit_kernel_user
16039 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16040 + call pax_exit_kernel_user
16041 +#endif
16042 +#ifdef CONFIG_PAX_RANDKSTACK
16043 + pushq %rax
16044 + call pax_randomize_kstack
16045 + popq %rax
16046 +#endif
16047 + .endm
16048 +
16049 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16050 +ENTRY(pax_enter_kernel_user)
16051 + pushq %rdi
16052 + pushq %rbx
16053 +
16054 +#ifdef CONFIG_PARAVIRT
16055 + PV_SAVE_REGS(CLBR_RDI)
16056 +#endif
16057 +
16058 + GET_CR3_INTO_RDI
16059 + mov %rdi,%rbx
16060 + add $__START_KERNEL_map,%rbx
16061 + sub phys_base(%rip),%rbx
16062 +
16063 +#ifdef CONFIG_PARAVIRT
16064 + pushq %rdi
16065 + cmpl $0, pv_info+PARAVIRT_enabled
16066 + jz 1f
16067 + i = 0
16068 + .rept USER_PGD_PTRS
16069 + mov i*8(%rbx),%rsi
16070 + mov $0,%sil
16071 + lea i*8(%rbx),%rdi
16072 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16073 + i = i + 1
16074 + .endr
16075 + jmp 2f
16076 +1:
16077 +#endif
16078 +
16079 + i = 0
16080 + .rept USER_PGD_PTRS
16081 + movb $0,i*8(%rbx)
16082 + i = i + 1
16083 + .endr
16084 +
16085 +#ifdef CONFIG_PARAVIRT
16086 +2: popq %rdi
16087 +#endif
16088 + SET_RDI_INTO_CR3
16089 +
16090 +#ifdef CONFIG_PAX_KERNEXEC
16091 + GET_CR0_INTO_RDI
16092 + bts $16,%rdi
16093 + SET_RDI_INTO_CR0
16094 +#endif
16095 +
16096 +#ifdef CONFIG_PARAVIRT
16097 + PV_RESTORE_REGS(CLBR_RDI)
16098 +#endif
16099 +
16100 + popq %rbx
16101 + popq %rdi
16102 + pax_force_retaddr
16103 + retq
16104 +ENDPROC(pax_enter_kernel_user)
16105 +
16106 +ENTRY(pax_exit_kernel_user)
16107 + push %rdi
16108 +
16109 +#ifdef CONFIG_PARAVIRT
16110 + pushq %rbx
16111 + PV_SAVE_REGS(CLBR_RDI)
16112 +#endif
16113 +
16114 +#ifdef CONFIG_PAX_KERNEXEC
16115 + GET_CR0_INTO_RDI
16116 + btr $16,%rdi
16117 + SET_RDI_INTO_CR0
16118 +#endif
16119 +
16120 + GET_CR3_INTO_RDI
16121 + add $__START_KERNEL_map,%rdi
16122 + sub phys_base(%rip),%rdi
16123 +
16124 +#ifdef CONFIG_PARAVIRT
16125 + cmpl $0, pv_info+PARAVIRT_enabled
16126 + jz 1f
16127 + mov %rdi,%rbx
16128 + i = 0
16129 + .rept USER_PGD_PTRS
16130 + mov i*8(%rbx),%rsi
16131 + mov $0x67,%sil
16132 + lea i*8(%rbx),%rdi
16133 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16134 + i = i + 1
16135 + .endr
16136 + jmp 2f
16137 +1:
16138 +#endif
16139 +
16140 + i = 0
16141 + .rept USER_PGD_PTRS
16142 + movb $0x67,i*8(%rdi)
16143 + i = i + 1
16144 + .endr
16145 +
16146 +#ifdef CONFIG_PARAVIRT
16147 +2: PV_RESTORE_REGS(CLBR_RDI)
16148 + popq %rbx
16149 +#endif
16150 +
16151 + popq %rdi
16152 + pax_force_retaddr
16153 + retq
16154 +ENDPROC(pax_exit_kernel_user)
16155 +#endif
16156 +
16157 +.macro pax_erase_kstack
16158 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16159 + call pax_erase_kstack
16160 +#endif
16161 +.endm
16162 +
16163 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16164 +/*
16165 + * r11: thread_info
16166 + * rcx, rdx: can be clobbered
16167 + */
16168 +ENTRY(pax_erase_kstack)
16169 + pushq %rdi
16170 + pushq %rax
16171 + pushq %r11
16172 +
16173 + GET_THREAD_INFO(%r11)
16174 + mov TI_lowest_stack(%r11), %rdi
16175 + mov $-0xBEEF, %rax
16176 + std
16177 +
16178 +1: mov %edi, %ecx
16179 + and $THREAD_SIZE_asm - 1, %ecx
16180 + shr $3, %ecx
16181 + repne scasq
16182 + jecxz 2f
16183 +
16184 + cmp $2*8, %ecx
16185 + jc 2f
16186 +
16187 + mov $2*8, %ecx
16188 + repe scasq
16189 + jecxz 2f
16190 + jne 1b
16191 +
16192 +2: cld
16193 + mov %esp, %ecx
16194 + sub %edi, %ecx
16195 +
16196 + cmp $THREAD_SIZE_asm, %rcx
16197 + jb 3f
16198 + ud2
16199 +3:
16200 +
16201 + shr $3, %ecx
16202 + rep stosq
16203 +
16204 + mov TI_task_thread_sp0(%r11), %rdi
16205 + sub $256, %rdi
16206 + mov %rdi, TI_lowest_stack(%r11)
16207 +
16208 + popq %r11
16209 + popq %rax
16210 + popq %rdi
16211 + pax_force_retaddr
16212 + ret
16213 +ENDPROC(pax_erase_kstack)
16214 +#endif
16215
16216 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
16217 #ifdef CONFIG_TRACE_IRQFLAGS
16218 @@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
16219 .endm
16220
16221 .macro UNFAKE_STACK_FRAME
16222 - addq $8*6, %rsp
16223 - CFI_ADJUST_CFA_OFFSET -(6*8)
16224 + addq $8*6 + ARG_SKIP, %rsp
16225 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16226 .endm
16227
16228 /*
16229 @@ -317,7 +601,7 @@ ENTRY(save_args)
16230 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
16231 movq_cfi rbp, 8 /* push %rbp */
16232 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
16233 - testl $3, CS(%rdi)
16234 + testb $3, CS(%rdi)
16235 je 1f
16236 SWAPGS
16237 /*
16238 @@ -337,9 +621,10 @@ ENTRY(save_args)
16239 * We entered an interrupt context - irqs are off:
16240 */
16241 2: TRACE_IRQS_OFF
16242 + pax_force_retaddr
16243 ret
16244 CFI_ENDPROC
16245 -END(save_args)
16246 +ENDPROC(save_args)
16247
16248 ENTRY(save_rest)
16249 PARTIAL_FRAME 1 REST_SKIP+8
16250 @@ -352,9 +637,10 @@ ENTRY(save_rest)
16251 movq_cfi r15, R15+16
16252 movq %r11, 8(%rsp) /* return address */
16253 FIXUP_TOP_OF_STACK %r11, 16
16254 + pax_force_retaddr
16255 ret
16256 CFI_ENDPROC
16257 -END(save_rest)
16258 +ENDPROC(save_rest)
16259
16260 /* save complete stack frame */
16261 .pushsection .kprobes.text, "ax"
16262 @@ -383,9 +669,10 @@ ENTRY(save_paranoid)
16263 js 1f /* negative -> in kernel */
16264 SWAPGS
16265 xorl %ebx,%ebx
16266 -1: ret
16267 +1: pax_force_retaddr_bts
16268 + ret
16269 CFI_ENDPROC
16270 -END(save_paranoid)
16271 +ENDPROC(save_paranoid)
16272 .popsection
16273
16274 /*
16275 @@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
16276
16277 RESTORE_REST
16278
16279 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16280 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16281 je int_ret_from_sys_call
16282
16283 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16284 @@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
16285 jmp ret_from_sys_call # go to the SYSRET fastpath
16286
16287 CFI_ENDPROC
16288 -END(ret_from_fork)
16289 +ENDPROC(ret_from_fork)
16290
16291 /*
16292 * System call entry. Upto 6 arguments in registers are supported.
16293 @@ -455,7 +742,7 @@ END(ret_from_fork)
16294 ENTRY(system_call)
16295 CFI_STARTPROC simple
16296 CFI_SIGNAL_FRAME
16297 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16298 + CFI_DEF_CFA rsp,0
16299 CFI_REGISTER rip,rcx
16300 /*CFI_REGISTER rflags,r11*/
16301 SWAPGS_UNSAFE_STACK
16302 @@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
16303
16304 movq %rsp,PER_CPU_VAR(old_rsp)
16305 movq PER_CPU_VAR(kernel_stack),%rsp
16306 + SAVE_ARGS 8*6,1
16307 + pax_enter_kernel_user
16308 /*
16309 * No need to follow this irqs off/on section - it's straight
16310 * and short:
16311 */
16312 ENABLE_INTERRUPTS(CLBR_NONE)
16313 - SAVE_ARGS 8,1
16314 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16315 movq %rcx,RIP-ARGOFFSET(%rsp)
16316 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16317 @@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
16318 system_call_fastpath:
16319 cmpq $__NR_syscall_max,%rax
16320 ja badsys
16321 - movq %r10,%rcx
16322 + movq R10-ARGOFFSET(%rsp),%rcx
16323 call *sys_call_table(,%rax,8) # XXX: rip relative
16324 movq %rax,RAX-ARGOFFSET(%rsp)
16325 /*
16326 @@ -502,6 +790,8 @@ sysret_check:
16327 andl %edi,%edx
16328 jnz sysret_careful
16329 CFI_REMEMBER_STATE
16330 + pax_exit_kernel_user
16331 + pax_erase_kstack
16332 /*
16333 * sysretq will re-enable interrupts:
16334 */
16335 @@ -555,14 +845,18 @@ badsys:
16336 * jump back to the normal fast path.
16337 */
16338 auditsys:
16339 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
16340 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16341 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16342 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16343 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16344 movq %rax,%rsi /* 2nd arg: syscall number */
16345 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16346 call audit_syscall_entry
16347 +
16348 + pax_erase_kstack
16349 +
16350 LOAD_ARGS 0 /* reload call-clobbered registers */
16351 + pax_set_fptr_mask
16352 jmp system_call_fastpath
16353
16354 /*
16355 @@ -592,16 +886,20 @@ tracesys:
16356 FIXUP_TOP_OF_STACK %rdi
16357 movq %rsp,%rdi
16358 call syscall_trace_enter
16359 +
16360 + pax_erase_kstack
16361 +
16362 /*
16363 * Reload arg registers from stack in case ptrace changed them.
16364 * We don't reload %rax because syscall_trace_enter() returned
16365 * the value it wants us to use in the table lookup.
16366 */
16367 LOAD_ARGS ARGOFFSET, 1
16368 + pax_set_fptr_mask
16369 RESTORE_REST
16370 cmpq $__NR_syscall_max,%rax
16371 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16372 - movq %r10,%rcx /* fixup for C */
16373 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16374 call *sys_call_table(,%rax,8)
16375 movq %rax,RAX-ARGOFFSET(%rsp)
16376 /* Use IRET because user could have changed frame */
16377 @@ -613,7 +911,7 @@ tracesys:
16378 GLOBAL(int_ret_from_sys_call)
16379 DISABLE_INTERRUPTS(CLBR_NONE)
16380 TRACE_IRQS_OFF
16381 - testl $3,CS-ARGOFFSET(%rsp)
16382 + testb $3,CS-ARGOFFSET(%rsp)
16383 je retint_restore_args
16384 movl $_TIF_ALLWORK_MASK,%edi
16385 /* edi: mask to check */
16386 @@ -624,6 +922,7 @@ GLOBAL(int_with_check)
16387 andl %edi,%edx
16388 jnz int_careful
16389 andl $~TS_COMPAT,TI_status(%rcx)
16390 + pax_erase_kstack
16391 jmp retint_swapgs
16392
16393 /* Either reschedule or signal or syscall exit tracking needed. */
16394 @@ -674,7 +973,7 @@ int_restore_rest:
16395 TRACE_IRQS_OFF
16396 jmp int_with_check
16397 CFI_ENDPROC
16398 -END(system_call)
16399 +ENDPROC(system_call)
16400
16401 /*
16402 * Certain special system calls that need to save a complete full stack frame.
16403 @@ -690,7 +989,7 @@ ENTRY(\label)
16404 call \func
16405 jmp ptregscall_common
16406 CFI_ENDPROC
16407 -END(\label)
16408 +ENDPROC(\label)
16409 .endm
16410
16411 PTREGSCALL stub_clone, sys_clone, %r8
16412 @@ -708,9 +1007,10 @@ ENTRY(ptregscall_common)
16413 movq_cfi_restore R12+8, r12
16414 movq_cfi_restore RBP+8, rbp
16415 movq_cfi_restore RBX+8, rbx
16416 + pax_force_retaddr
16417 ret $REST_SKIP /* pop extended registers */
16418 CFI_ENDPROC
16419 -END(ptregscall_common)
16420 +ENDPROC(ptregscall_common)
16421
16422 ENTRY(stub_execve)
16423 CFI_STARTPROC
16424 @@ -726,7 +1026,7 @@ ENTRY(stub_execve)
16425 RESTORE_REST
16426 jmp int_ret_from_sys_call
16427 CFI_ENDPROC
16428 -END(stub_execve)
16429 +ENDPROC(stub_execve)
16430
16431 /*
16432 * sigreturn is special because it needs to restore all registers on return.
16433 @@ -744,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
16434 RESTORE_REST
16435 jmp int_ret_from_sys_call
16436 CFI_ENDPROC
16437 -END(stub_rt_sigreturn)
16438 +ENDPROC(stub_rt_sigreturn)
16439
16440 /*
16441 * Build the entry stubs and pointer table with some assembler magic.
16442 @@ -780,7 +1080,7 @@ vector=vector+1
16443 2: jmp common_interrupt
16444 .endr
16445 CFI_ENDPROC
16446 -END(irq_entries_start)
16447 +ENDPROC(irq_entries_start)
16448
16449 .previous
16450 END(interrupt)
16451 @@ -800,6 +1100,16 @@ END(interrupt)
16452 CFI_ADJUST_CFA_OFFSET 10*8
16453 call save_args
16454 PARTIAL_FRAME 0
16455 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16456 + testb $3, CS(%rdi)
16457 + jnz 1f
16458 + pax_enter_kernel
16459 + jmp 2f
16460 +1: pax_enter_kernel_user
16461 +2:
16462 +#else
16463 + pax_enter_kernel
16464 +#endif
16465 call \func
16466 .endm
16467
16468 @@ -822,7 +1132,7 @@ ret_from_intr:
16469 CFI_ADJUST_CFA_OFFSET -8
16470 exit_intr:
16471 GET_THREAD_INFO(%rcx)
16472 - testl $3,CS-ARGOFFSET(%rsp)
16473 + testb $3,CS-ARGOFFSET(%rsp)
16474 je retint_kernel
16475
16476 /* Interrupt came from user space */
16477 @@ -844,12 +1154,15 @@ retint_swapgs: /* return to user-space */
16478 * The iretq could re-enable interrupts:
16479 */
16480 DISABLE_INTERRUPTS(CLBR_ANY)
16481 + pax_exit_kernel_user
16482 TRACE_IRQS_IRETQ
16483 SWAPGS
16484 jmp restore_args
16485
16486 retint_restore_args: /* return to kernel space */
16487 DISABLE_INTERRUPTS(CLBR_ANY)
16488 + pax_exit_kernel
16489 + pax_force_retaddr RIP-ARGOFFSET
16490 /*
16491 * The iretq could re-enable interrupts:
16492 */
16493 @@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
16494 #endif
16495
16496 CFI_ENDPROC
16497 -END(common_interrupt)
16498 +ENDPROC(common_interrupt)
16499
16500 /*
16501 * APIC interrupts.
16502 @@ -953,7 +1266,7 @@ ENTRY(\sym)
16503 interrupt \do_sym
16504 jmp ret_from_intr
16505 CFI_ENDPROC
16506 -END(\sym)
16507 +ENDPROC(\sym)
16508 .endm
16509
16510 #ifdef CONFIG_SMP
16511 @@ -1032,12 +1345,22 @@ ENTRY(\sym)
16512 CFI_ADJUST_CFA_OFFSET 15*8
16513 call error_entry
16514 DEFAULT_FRAME 0
16515 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16516 + testb $3, CS(%rsp)
16517 + jnz 1f
16518 + pax_enter_kernel
16519 + jmp 2f
16520 +1: pax_enter_kernel_user
16521 +2:
16522 +#else
16523 + pax_enter_kernel
16524 +#endif
16525 movq %rsp,%rdi /* pt_regs pointer */
16526 xorl %esi,%esi /* no error code */
16527 call \do_sym
16528 jmp error_exit /* %ebx: no swapgs flag */
16529 CFI_ENDPROC
16530 -END(\sym)
16531 +ENDPROC(\sym)
16532 .endm
16533
16534 .macro paranoidzeroentry sym do_sym
16535 @@ -1049,12 +1372,22 @@ ENTRY(\sym)
16536 subq $15*8, %rsp
16537 call save_paranoid
16538 TRACE_IRQS_OFF
16539 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16540 + testb $3, CS(%rsp)
16541 + jnz 1f
16542 + pax_enter_kernel
16543 + jmp 2f
16544 +1: pax_enter_kernel_user
16545 +2:
16546 +#else
16547 + pax_enter_kernel
16548 +#endif
16549 movq %rsp,%rdi /* pt_regs pointer */
16550 xorl %esi,%esi /* no error code */
16551 call \do_sym
16552 jmp paranoid_exit /* %ebx: no swapgs flag */
16553 CFI_ENDPROC
16554 -END(\sym)
16555 +ENDPROC(\sym)
16556 .endm
16557
16558 .macro paranoidzeroentry_ist sym do_sym ist
16559 @@ -1066,15 +1399,30 @@ ENTRY(\sym)
16560 subq $15*8, %rsp
16561 call save_paranoid
16562 TRACE_IRQS_OFF
16563 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16564 + testb $3, CS(%rsp)
16565 + jnz 1f
16566 + pax_enter_kernel
16567 + jmp 2f
16568 +1: pax_enter_kernel_user
16569 +2:
16570 +#else
16571 + pax_enter_kernel
16572 +#endif
16573 movq %rsp,%rdi /* pt_regs pointer */
16574 xorl %esi,%esi /* no error code */
16575 - PER_CPU(init_tss, %rbp)
16576 +#ifdef CONFIG_SMP
16577 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
16578 + lea init_tss(%rbp), %rbp
16579 +#else
16580 + lea init_tss(%rip), %rbp
16581 +#endif
16582 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16583 call \do_sym
16584 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16585 jmp paranoid_exit /* %ebx: no swapgs flag */
16586 CFI_ENDPROC
16587 -END(\sym)
16588 +ENDPROC(\sym)
16589 .endm
16590
16591 .macro errorentry sym do_sym
16592 @@ -1085,13 +1433,23 @@ ENTRY(\sym)
16593 CFI_ADJUST_CFA_OFFSET 15*8
16594 call error_entry
16595 DEFAULT_FRAME 0
16596 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16597 + testb $3, CS(%rsp)
16598 + jnz 1f
16599 + pax_enter_kernel
16600 + jmp 2f
16601 +1: pax_enter_kernel_user
16602 +2:
16603 +#else
16604 + pax_enter_kernel
16605 +#endif
16606 movq %rsp,%rdi /* pt_regs pointer */
16607 movq ORIG_RAX(%rsp),%rsi /* get error code */
16608 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16609 call \do_sym
16610 jmp error_exit /* %ebx: no swapgs flag */
16611 CFI_ENDPROC
16612 -END(\sym)
16613 +ENDPROC(\sym)
16614 .endm
16615
16616 /* error code is on the stack already */
16617 @@ -1104,13 +1462,23 @@ ENTRY(\sym)
16618 call save_paranoid
16619 DEFAULT_FRAME 0
16620 TRACE_IRQS_OFF
16621 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16622 + testb $3, CS(%rsp)
16623 + jnz 1f
16624 + pax_enter_kernel
16625 + jmp 2f
16626 +1: pax_enter_kernel_user
16627 +2:
16628 +#else
16629 + pax_enter_kernel
16630 +#endif
16631 movq %rsp,%rdi /* pt_regs pointer */
16632 movq ORIG_RAX(%rsp),%rsi /* get error code */
16633 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16634 call \do_sym
16635 jmp paranoid_exit /* %ebx: no swapgs flag */
16636 CFI_ENDPROC
16637 -END(\sym)
16638 +ENDPROC(\sym)
16639 .endm
16640
16641 zeroentry divide_error do_divide_error
16642 @@ -1141,9 +1509,10 @@ gs_change:
16643 SWAPGS
16644 popf
16645 CFI_ADJUST_CFA_OFFSET -8
16646 + pax_force_retaddr
16647 ret
16648 CFI_ENDPROC
16649 -END(native_load_gs_index)
16650 +ENDPROC(native_load_gs_index)
16651
16652 .section __ex_table,"a"
16653 .align 8
16654 @@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
16655 * of hacks for example to fork off the per-CPU idle tasks.
16656 * [Hopefully no generic code relies on the reschedule -AK]
16657 */
16658 - RESTORE_ALL
16659 + RESTORE_REST
16660 UNFAKE_STACK_FRAME
16661 + pax_force_retaddr
16662 ret
16663 CFI_ENDPROC
16664 -END(kernel_thread)
16665 +ENDPROC(kernel_thread)
16666
16667 ENTRY(child_rip)
16668 pushq $0 # fake return address
16669 @@ -1208,13 +1578,14 @@ ENTRY(child_rip)
16670 */
16671 movq %rdi, %rax
16672 movq %rsi, %rdi
16673 + pax_force_fptr %rax
16674 call *%rax
16675 # exit
16676 mov %eax, %edi
16677 call do_exit
16678 ud2 # padding for call trace
16679 CFI_ENDPROC
16680 -END(child_rip)
16681 +ENDPROC(child_rip)
16682
16683 /*
16684 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16685 @@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
16686 RESTORE_REST
16687 testq %rax,%rax
16688 je int_ret_from_sys_call
16689 - RESTORE_ARGS
16690 UNFAKE_STACK_FRAME
16691 + pax_force_retaddr
16692 ret
16693 CFI_ENDPROC
16694 -END(kernel_execve)
16695 +ENDPROC(kernel_execve)
16696
16697 /* Call softirq on interrupt stack. Interrupts are off. */
16698 ENTRY(call_softirq)
16699 @@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
16700 CFI_DEF_CFA_REGISTER rsp
16701 CFI_ADJUST_CFA_OFFSET -8
16702 decl PER_CPU_VAR(irq_count)
16703 + pax_force_retaddr
16704 ret
16705 CFI_ENDPROC
16706 -END(call_softirq)
16707 +ENDPROC(call_softirq)
16708
16709 #ifdef CONFIG_XEN
16710 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16711 @@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16712 decl PER_CPU_VAR(irq_count)
16713 jmp error_exit
16714 CFI_ENDPROC
16715 -END(xen_do_hypervisor_callback)
16716 +ENDPROC(xen_do_hypervisor_callback)
16717
16718 /*
16719 * Hypervisor uses this for application faults while it executes.
16720 @@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
16721 SAVE_ALL
16722 jmp error_exit
16723 CFI_ENDPROC
16724 -END(xen_failsafe_callback)
16725 +ENDPROC(xen_failsafe_callback)
16726
16727 #endif /* CONFIG_XEN */
16728
16729 @@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
16730 TRACE_IRQS_OFF
16731 testl %ebx,%ebx /* swapgs needed? */
16732 jnz paranoid_restore
16733 - testl $3,CS(%rsp)
16734 + testb $3,CS(%rsp)
16735 jnz paranoid_userspace
16736 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16737 + pax_exit_kernel
16738 + TRACE_IRQS_IRETQ 0
16739 + SWAPGS_UNSAFE_STACK
16740 + RESTORE_ALL 8
16741 + pax_force_retaddr_bts
16742 + jmp irq_return
16743 +#endif
16744 paranoid_swapgs:
16745 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16746 + pax_exit_kernel_user
16747 +#else
16748 + pax_exit_kernel
16749 +#endif
16750 TRACE_IRQS_IRETQ 0
16751 SWAPGS_UNSAFE_STACK
16752 RESTORE_ALL 8
16753 jmp irq_return
16754 paranoid_restore:
16755 + pax_exit_kernel
16756 TRACE_IRQS_IRETQ 0
16757 RESTORE_ALL 8
16758 + pax_force_retaddr_bts
16759 jmp irq_return
16760 paranoid_userspace:
16761 GET_THREAD_INFO(%rcx)
16762 @@ -1443,7 +1830,7 @@ paranoid_schedule:
16763 TRACE_IRQS_OFF
16764 jmp paranoid_userspace
16765 CFI_ENDPROC
16766 -END(paranoid_exit)
16767 +ENDPROC(paranoid_exit)
16768
16769 /*
16770 * Exception entry point. This expects an error code/orig_rax on the stack.
16771 @@ -1470,12 +1857,13 @@ ENTRY(error_entry)
16772 movq_cfi r14, R14+8
16773 movq_cfi r15, R15+8
16774 xorl %ebx,%ebx
16775 - testl $3,CS+8(%rsp)
16776 + testb $3,CS+8(%rsp)
16777 je error_kernelspace
16778 error_swapgs:
16779 SWAPGS
16780 error_sti:
16781 TRACE_IRQS_OFF
16782 + pax_force_retaddr_bts
16783 ret
16784 CFI_ENDPROC
16785
16786 @@ -1497,7 +1885,7 @@ error_kernelspace:
16787 cmpq $gs_change,RIP+8(%rsp)
16788 je error_swapgs
16789 jmp error_sti
16790 -END(error_entry)
16791 +ENDPROC(error_entry)
16792
16793
16794 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16795 @@ -1517,7 +1905,7 @@ ENTRY(error_exit)
16796 jnz retint_careful
16797 jmp retint_swapgs
16798 CFI_ENDPROC
16799 -END(error_exit)
16800 +ENDPROC(error_exit)
16801
16802
16803 /* runs on exception stack */
16804 @@ -1529,6 +1917,16 @@ ENTRY(nmi)
16805 CFI_ADJUST_CFA_OFFSET 15*8
16806 call save_paranoid
16807 DEFAULT_FRAME 0
16808 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16809 + testb $3, CS(%rsp)
16810 + jnz 1f
16811 + pax_enter_kernel
16812 + jmp 2f
16813 +1: pax_enter_kernel_user
16814 +2:
16815 +#else
16816 + pax_enter_kernel
16817 +#endif
16818 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16819 movq %rsp,%rdi
16820 movq $-1,%rsi
16821 @@ -1539,12 +1937,28 @@ ENTRY(nmi)
16822 DISABLE_INTERRUPTS(CLBR_NONE)
16823 testl %ebx,%ebx /* swapgs needed? */
16824 jnz nmi_restore
16825 - testl $3,CS(%rsp)
16826 + testb $3,CS(%rsp)
16827 jnz nmi_userspace
16828 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16829 + pax_exit_kernel
16830 + SWAPGS_UNSAFE_STACK
16831 + RESTORE_ALL 8
16832 + pax_force_retaddr_bts
16833 + jmp irq_return
16834 +#endif
16835 nmi_swapgs:
16836 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16837 + pax_exit_kernel_user
16838 +#else
16839 + pax_exit_kernel
16840 +#endif
16841 SWAPGS_UNSAFE_STACK
16842 + RESTORE_ALL 8
16843 + jmp irq_return
16844 nmi_restore:
16845 + pax_exit_kernel
16846 RESTORE_ALL 8
16847 + pax_force_retaddr_bts
16848 jmp irq_return
16849 nmi_userspace:
16850 GET_THREAD_INFO(%rcx)
16851 @@ -1573,14 +1987,14 @@ nmi_schedule:
16852 jmp paranoid_exit
16853 CFI_ENDPROC
16854 #endif
16855 -END(nmi)
16856 +ENDPROC(nmi)
16857
16858 ENTRY(ignore_sysret)
16859 CFI_STARTPROC
16860 mov $-ENOSYS,%eax
16861 sysret
16862 CFI_ENDPROC
16863 -END(ignore_sysret)
16864 +ENDPROC(ignore_sysret)
16865
16866 /*
16867 * End of kprobes section
16868 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16869 index 9dbb527..7b3615a 100644
16870 --- a/arch/x86/kernel/ftrace.c
16871 +++ b/arch/x86/kernel/ftrace.c
16872 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16873 static void *mod_code_newcode; /* holds the text to write to the IP */
16874
16875 static unsigned nmi_wait_count;
16876 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
16877 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16878
16879 int ftrace_arch_read_dyn_info(char *buf, int size)
16880 {
16881 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16882
16883 r = snprintf(buf, size, "%u %u",
16884 nmi_wait_count,
16885 - atomic_read(&nmi_update_count));
16886 + atomic_read_unchecked(&nmi_update_count));
16887 return r;
16888 }
16889
16890 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
16891 {
16892 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16893 smp_rmb();
16894 + pax_open_kernel();
16895 ftrace_mod_code();
16896 - atomic_inc(&nmi_update_count);
16897 + pax_close_kernel();
16898 + atomic_inc_unchecked(&nmi_update_count);
16899 }
16900 /* Must have previous changes seen before executions */
16901 smp_mb();
16902 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
16903
16904
16905
16906 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
16907 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
16908
16909 static unsigned char *ftrace_nop_replace(void)
16910 {
16911 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
16912 {
16913 unsigned char replaced[MCOUNT_INSN_SIZE];
16914
16915 + ip = ktla_ktva(ip);
16916 +
16917 /*
16918 * Note: Due to modules and __init, code can
16919 * disappear and change, we need to protect against faulting
16920 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16921 unsigned char old[MCOUNT_INSN_SIZE], *new;
16922 int ret;
16923
16924 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16925 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16926 new = ftrace_call_replace(ip, (unsigned long)func);
16927 ret = ftrace_modify_code(ip, old, new);
16928
16929 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
16930 switch (faulted) {
16931 case 0:
16932 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
16933 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
16934 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
16935 break;
16936 case 1:
16937 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
16938 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
16939 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
16940 break;
16941 case 2:
16942 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
16943 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
16944 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
16945 break;
16946 }
16947
16948 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16949 {
16950 unsigned char code[MCOUNT_INSN_SIZE];
16951
16952 + ip = ktla_ktva(ip);
16953 +
16954 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16955 return -EFAULT;
16956
16957 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16958 index 4f8e250..df24706 100644
16959 --- a/arch/x86/kernel/head32.c
16960 +++ b/arch/x86/kernel/head32.c
16961 @@ -16,6 +16,7 @@
16962 #include <asm/apic.h>
16963 #include <asm/io_apic.h>
16964 #include <asm/bios_ebda.h>
16965 +#include <asm/boot.h>
16966
16967 static void __init i386_default_early_setup(void)
16968 {
16969 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
16970 {
16971 reserve_trampoline_memory();
16972
16973 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16974 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16975
16976 #ifdef CONFIG_BLK_DEV_INITRD
16977 /* Reserve INITRD */
16978 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16979 index 34c3308..6fc4e76 100644
16980 --- a/arch/x86/kernel/head_32.S
16981 +++ b/arch/x86/kernel/head_32.S
16982 @@ -19,10 +19,17 @@
16983 #include <asm/setup.h>
16984 #include <asm/processor-flags.h>
16985 #include <asm/percpu.h>
16986 +#include <asm/msr-index.h>
16987
16988 /* Physical address */
16989 #define pa(X) ((X) - __PAGE_OFFSET)
16990
16991 +#ifdef CONFIG_PAX_KERNEXEC
16992 +#define ta(X) (X)
16993 +#else
16994 +#define ta(X) ((X) - __PAGE_OFFSET)
16995 +#endif
16996 +
16997 /*
16998 * References to members of the new_cpu_data structure.
16999 */
17000 @@ -52,11 +59,7 @@
17001 * and small than max_low_pfn, otherwise will waste some page table entries
17002 */
17003
17004 -#if PTRS_PER_PMD > 1
17005 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
17006 -#else
17007 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
17008 -#endif
17009 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
17010
17011 /* Enough space to fit pagetables for the low memory linear map */
17012 MAPPING_BEYOND_END = \
17013 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
17014 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17015
17016 /*
17017 + * Real beginning of normal "text" segment
17018 + */
17019 +ENTRY(stext)
17020 +ENTRY(_stext)
17021 +
17022 +/*
17023 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
17024 * %esi points to the real-mode code as a 32-bit pointer.
17025 * CS and DS must be 4 GB flat segments, but we don't depend on
17026 @@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17027 * can.
17028 */
17029 __HEAD
17030 +
17031 +#ifdef CONFIG_PAX_KERNEXEC
17032 + jmp startup_32
17033 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
17034 +.fill PAGE_SIZE-5,1,0xcc
17035 +#endif
17036 +
17037 ENTRY(startup_32)
17038 + movl pa(stack_start),%ecx
17039 +
17040 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
17041 us to not reload segments */
17042 testb $(1<<6), BP_loadflags(%esi)
17043 @@ -95,7 +113,60 @@ ENTRY(startup_32)
17044 movl %eax,%es
17045 movl %eax,%fs
17046 movl %eax,%gs
17047 + movl %eax,%ss
17048 2:
17049 + leal -__PAGE_OFFSET(%ecx),%esp
17050 +
17051 +#ifdef CONFIG_SMP
17052 + movl $pa(cpu_gdt_table),%edi
17053 + movl $__per_cpu_load,%eax
17054 + movw %ax,__KERNEL_PERCPU + 2(%edi)
17055 + rorl $16,%eax
17056 + movb %al,__KERNEL_PERCPU + 4(%edi)
17057 + movb %ah,__KERNEL_PERCPU + 7(%edi)
17058 + movl $__per_cpu_end - 1,%eax
17059 + subl $__per_cpu_start,%eax
17060 + movw %ax,__KERNEL_PERCPU + 0(%edi)
17061 +#endif
17062 +
17063 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17064 + movl $NR_CPUS,%ecx
17065 + movl $pa(cpu_gdt_table),%edi
17066 +1:
17067 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
17068 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
17069 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
17070 + addl $PAGE_SIZE_asm,%edi
17071 + loop 1b
17072 +#endif
17073 +
17074 +#ifdef CONFIG_PAX_KERNEXEC
17075 + movl $pa(boot_gdt),%edi
17076 + movl $__LOAD_PHYSICAL_ADDR,%eax
17077 + movw %ax,__BOOT_CS + 2(%edi)
17078 + rorl $16,%eax
17079 + movb %al,__BOOT_CS + 4(%edi)
17080 + movb %ah,__BOOT_CS + 7(%edi)
17081 + rorl $16,%eax
17082 +
17083 + ljmp $(__BOOT_CS),$1f
17084 +1:
17085 +
17086 + movl $NR_CPUS,%ecx
17087 + movl $pa(cpu_gdt_table),%edi
17088 + addl $__PAGE_OFFSET,%eax
17089 +1:
17090 + movw %ax,__KERNEL_CS + 2(%edi)
17091 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
17092 + rorl $16,%eax
17093 + movb %al,__KERNEL_CS + 4(%edi)
17094 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
17095 + movb %ah,__KERNEL_CS + 7(%edi)
17096 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
17097 + rorl $16,%eax
17098 + addl $PAGE_SIZE_asm,%edi
17099 + loop 1b
17100 +#endif
17101
17102 /*
17103 * Clear BSS first so that there are no surprises...
17104 @@ -140,9 +211,7 @@ ENTRY(startup_32)
17105 cmpl $num_subarch_entries, %eax
17106 jae bad_subarch
17107
17108 - movl pa(subarch_entries)(,%eax,4), %eax
17109 - subl $__PAGE_OFFSET, %eax
17110 - jmp *%eax
17111 + jmp *pa(subarch_entries)(,%eax,4)
17112
17113 bad_subarch:
17114 WEAK(lguest_entry)
17115 @@ -154,10 +223,10 @@ WEAK(xen_entry)
17116 __INITDATA
17117
17118 subarch_entries:
17119 - .long default_entry /* normal x86/PC */
17120 - .long lguest_entry /* lguest hypervisor */
17121 - .long xen_entry /* Xen hypervisor */
17122 - .long default_entry /* Moorestown MID */
17123 + .long ta(default_entry) /* normal x86/PC */
17124 + .long ta(lguest_entry) /* lguest hypervisor */
17125 + .long ta(xen_entry) /* Xen hypervisor */
17126 + .long ta(default_entry) /* Moorestown MID */
17127 num_subarch_entries = (. - subarch_entries) / 4
17128 .previous
17129 #endif /* CONFIG_PARAVIRT */
17130 @@ -218,8 +287,11 @@ default_entry:
17131 movl %eax, pa(max_pfn_mapped)
17132
17133 /* Do early initialization of the fixmap area */
17134 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
17135 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17136 +#ifdef CONFIG_COMPAT_VDSO
17137 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17138 +#else
17139 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17140 +#endif
17141 #else /* Not PAE */
17142
17143 page_pde_offset = (__PAGE_OFFSET >> 20);
17144 @@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
17145 movl %eax, pa(max_pfn_mapped)
17146
17147 /* Do early initialization of the fixmap area */
17148 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
17149 - movl %eax,pa(swapper_pg_dir+0xffc)
17150 +#ifdef CONFIG_COMPAT_VDSO
17151 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
17152 +#else
17153 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
17154 +#endif
17155 #endif
17156 jmp 3f
17157 /*
17158 @@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
17159 movl %eax,%es
17160 movl %eax,%fs
17161 movl %eax,%gs
17162 + movl pa(stack_start),%ecx
17163 + movl %eax,%ss
17164 + leal -__PAGE_OFFSET(%ecx),%esp
17165 #endif /* CONFIG_SMP */
17166 3:
17167
17168 @@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
17169 orl %edx,%eax
17170 movl %eax,%cr4
17171
17172 +#ifdef CONFIG_X86_PAE
17173 btl $5, %eax # check if PAE is enabled
17174 jnc 6f
17175
17176 @@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
17177 cpuid
17178 cmpl $0x80000000, %eax
17179 jbe 6f
17180 +
17181 + /* Clear bogus XD_DISABLE bits */
17182 + call verify_cpu
17183 +
17184 mov $0x80000001, %eax
17185 cpuid
17186 /* Execute Disable bit supported? */
17187 @@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
17188 jnc 6f
17189
17190 /* Setup EFER (Extended Feature Enable Register) */
17191 - movl $0xc0000080, %ecx
17192 + movl $MSR_EFER, %ecx
17193 rdmsr
17194
17195 btsl $11, %eax
17196 /* Make changes effective */
17197 wrmsr
17198
17199 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
17200 + movl $1,pa(nx_enabled)
17201 +#endif
17202 +
17203 6:
17204
17205 /*
17206 @@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
17207 movl %eax,%cr0 /* ..and set paging (PG) bit */
17208 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
17209 1:
17210 - /* Set up the stack pointer */
17211 - lss stack_start,%esp
17212 + /* Shift the stack pointer to a virtual address */
17213 + addl $__PAGE_OFFSET, %esp
17214
17215 /*
17216 * Initialize eflags. Some BIOS's leave bits like NT set. This would
17217 @@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
17218
17219 #ifdef CONFIG_SMP
17220 cmpb $0, ready
17221 - jz 1f /* Initial CPU cleans BSS */
17222 - jmp checkCPUtype
17223 -1:
17224 + jnz checkCPUtype
17225 #endif /* CONFIG_SMP */
17226
17227 /*
17228 @@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
17229 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
17230 movl %eax,%ss # after changing gdt.
17231
17232 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
17233 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
17234 movl %eax,%ds
17235 movl %eax,%es
17236
17237 @@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
17238 */
17239 cmpb $0,ready
17240 jne 1f
17241 - movl $per_cpu__gdt_page,%eax
17242 + movl $cpu_gdt_table,%eax
17243 movl $per_cpu__stack_canary,%ecx
17244 +#ifdef CONFIG_SMP
17245 + addl $__per_cpu_load,%ecx
17246 +#endif
17247 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17248 shrl $16, %ecx
17249 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17250 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
17251 1:
17252 -#endif
17253 movl $(__KERNEL_STACK_CANARY),%eax
17254 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17255 + movl $(__USER_DS),%eax
17256 +#else
17257 + xorl %eax,%eax
17258 +#endif
17259 movl %eax,%gs
17260
17261 xorl %eax,%eax # Clear LDT
17262 @@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
17263
17264 cld # gcc2 wants the direction flag cleared at all times
17265 pushl $0 # fake return address for unwinder
17266 -#ifdef CONFIG_SMP
17267 - movb ready, %cl
17268 movb $1, ready
17269 - cmpb $0,%cl # the first CPU calls start_kernel
17270 - je 1f
17271 - movl (stack_start), %esp
17272 -1:
17273 -#endif /* CONFIG_SMP */
17274 jmp *(initial_code)
17275
17276 /*
17277 @@ -546,22 +631,22 @@ early_page_fault:
17278 jmp early_fault
17279
17280 early_fault:
17281 - cld
17282 #ifdef CONFIG_PRINTK
17283 + cmpl $1,%ss:early_recursion_flag
17284 + je hlt_loop
17285 + incl %ss:early_recursion_flag
17286 + cld
17287 pusha
17288 movl $(__KERNEL_DS),%eax
17289 movl %eax,%ds
17290 movl %eax,%es
17291 - cmpl $2,early_recursion_flag
17292 - je hlt_loop
17293 - incl early_recursion_flag
17294 movl %cr2,%eax
17295 pushl %eax
17296 pushl %edx /* trapno */
17297 pushl $fault_msg
17298 call printk
17299 +; call dump_stack
17300 #endif
17301 - call dump_stack
17302 hlt_loop:
17303 hlt
17304 jmp hlt_loop
17305 @@ -569,8 +654,11 @@ hlt_loop:
17306 /* This is the default interrupt "handler" :-) */
17307 ALIGN
17308 ignore_int:
17309 - cld
17310 #ifdef CONFIG_PRINTK
17311 + cmpl $2,%ss:early_recursion_flag
17312 + je hlt_loop
17313 + incl %ss:early_recursion_flag
17314 + cld
17315 pushl %eax
17316 pushl %ecx
17317 pushl %edx
17318 @@ -579,9 +667,6 @@ ignore_int:
17319 movl $(__KERNEL_DS),%eax
17320 movl %eax,%ds
17321 movl %eax,%es
17322 - cmpl $2,early_recursion_flag
17323 - je hlt_loop
17324 - incl early_recursion_flag
17325 pushl 16(%esp)
17326 pushl 24(%esp)
17327 pushl 32(%esp)
17328 @@ -600,6 +685,8 @@ ignore_int:
17329 #endif
17330 iret
17331
17332 +#include "verify_cpu.S"
17333 +
17334 __REFDATA
17335 .align 4
17336 ENTRY(initial_code)
17337 @@ -610,31 +697,47 @@ ENTRY(initial_page_table)
17338 /*
17339 * BSS section
17340 */
17341 -__PAGE_ALIGNED_BSS
17342 - .align PAGE_SIZE_asm
17343 #ifdef CONFIG_X86_PAE
17344 +.section .swapper_pg_pmd,"a",@progbits
17345 swapper_pg_pmd:
17346 .fill 1024*KPMDS,4,0
17347 #else
17348 +.section .swapper_pg_dir,"a",@progbits
17349 ENTRY(swapper_pg_dir)
17350 .fill 1024,4,0
17351 #endif
17352 +.section .swapper_pg_fixmap,"a",@progbits
17353 swapper_pg_fixmap:
17354 .fill 1024,4,0
17355 #ifdef CONFIG_X86_TRAMPOLINE
17356 +.section .trampoline_pg_dir,"a",@progbits
17357 ENTRY(trampoline_pg_dir)
17358 +#ifdef CONFIG_X86_PAE
17359 + .fill 4,8,0
17360 +#else
17361 .fill 1024,4,0
17362 #endif
17363 +#endif
17364 +
17365 +.section .empty_zero_page,"a",@progbits
17366 ENTRY(empty_zero_page)
17367 .fill 4096,1,0
17368
17369 /*
17370 + * The IDT has to be page-aligned to simplify the Pentium
17371 + * F0 0F bug workaround.. We have a special link segment
17372 + * for this.
17373 + */
17374 +.section .idt,"a",@progbits
17375 +ENTRY(idt_table)
17376 + .fill 256,8,0
17377 +
17378 +/*
17379 * This starts the data section.
17380 */
17381 #ifdef CONFIG_X86_PAE
17382 -__PAGE_ALIGNED_DATA
17383 - /* Page-aligned for the benefit of paravirt? */
17384 - .align PAGE_SIZE_asm
17385 +.section .swapper_pg_dir,"a",@progbits
17386 +
17387 ENTRY(swapper_pg_dir)
17388 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17389 # if KPMDS == 3
17390 @@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
17391 # error "Kernel PMDs should be 1, 2 or 3"
17392 # endif
17393 .align PAGE_SIZE_asm /* needs to be page-sized too */
17394 +
17395 +#ifdef CONFIG_PAX_PER_CPU_PGD
17396 +ENTRY(cpu_pgd)
17397 + .rept NR_CPUS
17398 + .fill 4,8,0
17399 + .endr
17400 +#endif
17401 +
17402 #endif
17403
17404 .data
17405 +.balign 4
17406 ENTRY(stack_start)
17407 - .long init_thread_union+THREAD_SIZE
17408 - .long __BOOT_DS
17409 + .long init_thread_union+THREAD_SIZE-8
17410
17411 ready: .byte 0
17412
17413 +.section .rodata,"a",@progbits
17414 early_recursion_flag:
17415 .long 0
17416
17417 @@ -697,7 +809,7 @@ fault_msg:
17418 .word 0 # 32 bit align gdt_desc.address
17419 boot_gdt_descr:
17420 .word __BOOT_DS+7
17421 - .long boot_gdt - __PAGE_OFFSET
17422 + .long pa(boot_gdt)
17423
17424 .word 0 # 32-bit align idt_desc.address
17425 idt_descr:
17426 @@ -708,7 +820,7 @@ idt_descr:
17427 .word 0 # 32 bit align gdt_desc.address
17428 ENTRY(early_gdt_descr)
17429 .word GDT_ENTRIES*8-1
17430 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
17431 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
17432
17433 /*
17434 * The boot_gdt must mirror the equivalent in setup.S and is
17435 @@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
17436 .align L1_CACHE_BYTES
17437 ENTRY(boot_gdt)
17438 .fill GDT_ENTRY_BOOT_CS,8,0
17439 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17440 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17441 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17442 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17443 +
17444 + .align PAGE_SIZE_asm
17445 +ENTRY(cpu_gdt_table)
17446 + .rept NR_CPUS
17447 + .quad 0x0000000000000000 /* NULL descriptor */
17448 + .quad 0x0000000000000000 /* 0x0b reserved */
17449 + .quad 0x0000000000000000 /* 0x13 reserved */
17450 + .quad 0x0000000000000000 /* 0x1b reserved */
17451 +
17452 +#ifdef CONFIG_PAX_KERNEXEC
17453 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17454 +#else
17455 + .quad 0x0000000000000000 /* 0x20 unused */
17456 +#endif
17457 +
17458 + .quad 0x0000000000000000 /* 0x28 unused */
17459 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17460 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17461 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17462 + .quad 0x0000000000000000 /* 0x4b reserved */
17463 + .quad 0x0000000000000000 /* 0x53 reserved */
17464 + .quad 0x0000000000000000 /* 0x5b reserved */
17465 +
17466 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17467 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17468 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17469 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17470 +
17471 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17472 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17473 +
17474 + /*
17475 + * Segments used for calling PnP BIOS have byte granularity.
17476 + * The code segments and data segments have fixed 64k limits,
17477 + * the transfer segment sizes are set at run time.
17478 + */
17479 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
17480 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
17481 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
17482 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
17483 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
17484 +
17485 + /*
17486 + * The APM segments have byte granularity and their bases
17487 + * are set at run time. All have 64k limits.
17488 + */
17489 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17490 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17491 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
17492 +
17493 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17494 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17495 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17496 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17497 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17498 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17499 +
17500 + /* Be sure this is zeroed to avoid false validations in Xen */
17501 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17502 + .endr
17503 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17504 index 780cd92..758b2a6 100644
17505 --- a/arch/x86/kernel/head_64.S
17506 +++ b/arch/x86/kernel/head_64.S
17507 @@ -19,6 +19,8 @@
17508 #include <asm/cache.h>
17509 #include <asm/processor-flags.h>
17510 #include <asm/percpu.h>
17511 +#include <asm/cpufeature.h>
17512 +#include <asm/alternative-asm.h>
17513
17514 #ifdef CONFIG_PARAVIRT
17515 #include <asm/asm-offsets.h>
17516 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17517 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17518 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17519 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17520 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
17521 +L3_VMALLOC_START = pud_index(VMALLOC_START)
17522 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
17523 +L3_VMALLOC_END = pud_index(VMALLOC_END)
17524 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17525 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17526
17527 .text
17528 __HEAD
17529 @@ -85,35 +93,23 @@ startup_64:
17530 */
17531 addq %rbp, init_level4_pgt + 0(%rip)
17532 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17533 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17534 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17535 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17536 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17537
17538 addq %rbp, level3_ident_pgt + 0(%rip)
17539 +#ifndef CONFIG_XEN
17540 + addq %rbp, level3_ident_pgt + 8(%rip)
17541 +#endif
17542
17543 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17544 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17545 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17546 +
17547 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17548 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17549
17550 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17551 -
17552 - /* Add an Identity mapping if I am above 1G */
17553 - leaq _text(%rip), %rdi
17554 - andq $PMD_PAGE_MASK, %rdi
17555 -
17556 - movq %rdi, %rax
17557 - shrq $PUD_SHIFT, %rax
17558 - andq $(PTRS_PER_PUD - 1), %rax
17559 - jz ident_complete
17560 -
17561 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17562 - leaq level3_ident_pgt(%rip), %rbx
17563 - movq %rdx, 0(%rbx, %rax, 8)
17564 -
17565 - movq %rdi, %rax
17566 - shrq $PMD_SHIFT, %rax
17567 - andq $(PTRS_PER_PMD - 1), %rax
17568 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17569 - leaq level2_spare_pgt(%rip), %rbx
17570 - movq %rdx, 0(%rbx, %rax, 8)
17571 -ident_complete:
17572 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17573
17574 /*
17575 * Fixup the kernel text+data virtual addresses. Note that
17576 @@ -161,8 +157,8 @@ ENTRY(secondary_startup_64)
17577 * after the boot processor executes this code.
17578 */
17579
17580 - /* Enable PAE mode and PGE */
17581 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17582 + /* Enable PAE mode and PSE/PGE */
17583 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17584 movq %rax, %cr4
17585
17586 /* Setup early boot stage 4 level pagetables. */
17587 @@ -184,9 +180,16 @@ ENTRY(secondary_startup_64)
17588 movl $MSR_EFER, %ecx
17589 rdmsr
17590 btsl $_EFER_SCE, %eax /* Enable System Call */
17591 - btl $20,%edi /* No Execute supported? */
17592 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17593 jnc 1f
17594 btsl $_EFER_NX, %eax
17595 + leaq init_level4_pgt(%rip), %rdi
17596 +#ifndef CONFIG_EFI
17597 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17598 +#endif
17599 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17600 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17601 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17602 1: wrmsr /* Make changes effective */
17603
17604 /* Setup cr0 */
17605 @@ -249,6 +252,7 @@ ENTRY(secondary_startup_64)
17606 * jump. In addition we need to ensure %cs is set so we make this
17607 * a far return.
17608 */
17609 + pax_set_fptr_mask
17610 movq initial_code(%rip),%rax
17611 pushq $0 # fake return address to stop unwinder
17612 pushq $__KERNEL_CS # set correct cs
17613 @@ -262,16 +266,16 @@ ENTRY(secondary_startup_64)
17614 .quad x86_64_start_kernel
17615 ENTRY(initial_gs)
17616 .quad INIT_PER_CPU_VAR(irq_stack_union)
17617 - __FINITDATA
17618
17619 ENTRY(stack_start)
17620 .quad init_thread_union+THREAD_SIZE-8
17621 .word 0
17622 + __FINITDATA
17623
17624 bad_address:
17625 jmp bad_address
17626
17627 - .section ".init.text","ax"
17628 + __INIT
17629 #ifdef CONFIG_EARLY_PRINTK
17630 .globl early_idt_handlers
17631 early_idt_handlers:
17632 @@ -316,18 +320,23 @@ ENTRY(early_idt_handler)
17633 #endif /* EARLY_PRINTK */
17634 1: hlt
17635 jmp 1b
17636 + .previous
17637
17638 #ifdef CONFIG_EARLY_PRINTK
17639 + __INITDATA
17640 early_recursion_flag:
17641 .long 0
17642 + .previous
17643
17644 + .section .rodata,"a",@progbits
17645 early_idt_msg:
17646 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17647 early_idt_ripmsg:
17648 .asciz "RIP %s\n"
17649 + .previous
17650 #endif /* CONFIG_EARLY_PRINTK */
17651 - .previous
17652
17653 + .section .rodata,"a",@progbits
17654 #define NEXT_PAGE(name) \
17655 .balign PAGE_SIZE; \
17656 ENTRY(name)
17657 @@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
17658 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17659 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17660 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17661 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
17662 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17663 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
17664 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17665 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17666 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17667 .org init_level4_pgt + L4_START_KERNEL*8, 0
17668 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17669 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17670
17671 +#ifdef CONFIG_PAX_PER_CPU_PGD
17672 +NEXT_PAGE(cpu_pgd)
17673 + .rept NR_CPUS
17674 + .fill 512,8,0
17675 + .endr
17676 +#endif
17677 +
17678 NEXT_PAGE(level3_ident_pgt)
17679 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17680 +#ifdef CONFIG_XEN
17681 .fill 511,8,0
17682 +#else
17683 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17684 + .fill 510,8,0
17685 +#endif
17686 +
17687 +NEXT_PAGE(level3_vmalloc_start_pgt)
17688 + .fill 512,8,0
17689 +
17690 +NEXT_PAGE(level3_vmalloc_end_pgt)
17691 + .fill 512,8,0
17692 +
17693 +NEXT_PAGE(level3_vmemmap_pgt)
17694 + .fill L3_VMEMMAP_START,8,0
17695 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17696
17697 NEXT_PAGE(level3_kernel_pgt)
17698 .fill L3_START_KERNEL,8,0
17699 @@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt)
17700 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17701 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17702
17703 +NEXT_PAGE(level2_vmemmap_pgt)
17704 + .fill 512,8,0
17705 +
17706 NEXT_PAGE(level2_fixmap_pgt)
17707 - .fill 506,8,0
17708 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17709 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17710 - .fill 5,8,0
17711 + .fill 507,8,0
17712 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17713 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17714 + .fill 4,8,0
17715
17716 -NEXT_PAGE(level1_fixmap_pgt)
17717 +NEXT_PAGE(level1_vsyscall_pgt)
17718 .fill 512,8,0
17719
17720 -NEXT_PAGE(level2_ident_pgt)
17721 - /* Since I easily can, map the first 1G.
17722 + /* Since I easily can, map the first 2G.
17723 * Don't set NX because code runs from these pages.
17724 */
17725 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17726 +NEXT_PAGE(level2_ident_pgt)
17727 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17728
17729 NEXT_PAGE(level2_kernel_pgt)
17730 /*
17731 @@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt)
17732 * If you want to increase this then increase MODULES_VADDR
17733 * too.)
17734 */
17735 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17736 - KERNEL_IMAGE_SIZE/PMD_SIZE)
17737 -
17738 -NEXT_PAGE(level2_spare_pgt)
17739 - .fill 512, 8, 0
17740 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17741
17742 #undef PMDS
17743 #undef NEXT_PAGE
17744
17745 - .data
17746 + .align PAGE_SIZE
17747 +ENTRY(cpu_gdt_table)
17748 + .rept NR_CPUS
17749 + .quad 0x0000000000000000 /* NULL descriptor */
17750 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17751 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
17752 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
17753 + .quad 0x00cffb000000ffff /* __USER32_CS */
17754 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17755 + .quad 0x00affb000000ffff /* __USER_CS */
17756 +
17757 +#ifdef CONFIG_PAX_KERNEXEC
17758 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17759 +#else
17760 + .quad 0x0 /* unused */
17761 +#endif
17762 +
17763 + .quad 0,0 /* TSS */
17764 + .quad 0,0 /* LDT */
17765 + .quad 0,0,0 /* three TLS descriptors */
17766 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
17767 + /* asm/segment.h:GDT_ENTRIES must match this */
17768 +
17769 + /* zero the remaining page */
17770 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17771 + .endr
17772 +
17773 .align 16
17774 .globl early_gdt_descr
17775 early_gdt_descr:
17776 .word GDT_ENTRIES*8-1
17777 early_gdt_descr_base:
17778 - .quad INIT_PER_CPU_VAR(gdt_page)
17779 + .quad cpu_gdt_table
17780
17781 ENTRY(phys_base)
17782 /* This must match the first entry in level2_kernel_pgt */
17783 .quad 0x0000000000000000
17784
17785 #include "../../x86/xen/xen-head.S"
17786 -
17787 - .section .bss, "aw", @nobits
17788 +
17789 + .section .rodata,"a",@progbits
17790 .align L1_CACHE_BYTES
17791 ENTRY(idt_table)
17792 - .skip IDT_ENTRIES * 16
17793 + .fill 512,8,0
17794
17795 __PAGE_ALIGNED_BSS
17796 .align PAGE_SIZE
17797 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17798 index 9c3bd4a..e1d9b35 100644
17799 --- a/arch/x86/kernel/i386_ksyms_32.c
17800 +++ b/arch/x86/kernel/i386_ksyms_32.c
17801 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17802 EXPORT_SYMBOL(cmpxchg8b_emu);
17803 #endif
17804
17805 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
17806 +
17807 /* Networking helper routines. */
17808 EXPORT_SYMBOL(csum_partial_copy_generic);
17809 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17810 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17811
17812 EXPORT_SYMBOL(__get_user_1);
17813 EXPORT_SYMBOL(__get_user_2);
17814 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17815
17816 EXPORT_SYMBOL(csum_partial);
17817 EXPORT_SYMBOL(empty_zero_page);
17818 +
17819 +#ifdef CONFIG_PAX_KERNEXEC
17820 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17821 +#endif
17822 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17823 index df89102..a244320 100644
17824 --- a/arch/x86/kernel/i8259.c
17825 +++ b/arch/x86/kernel/i8259.c
17826 @@ -208,7 +208,7 @@ spurious_8259A_irq:
17827 "spurious 8259A interrupt: IRQ%d.\n", irq);
17828 spurious_irq_mask |= irqmask;
17829 }
17830 - atomic_inc(&irq_err_count);
17831 + atomic_inc_unchecked(&irq_err_count);
17832 /*
17833 * Theoretically we do not have to handle this IRQ,
17834 * but in Linux this does not cause problems and is
17835 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17836 index 3a54dcb..1c22348 100644
17837 --- a/arch/x86/kernel/init_task.c
17838 +++ b/arch/x86/kernel/init_task.c
17839 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17840 * way process stacks are handled. This is done by having a special
17841 * "init_task" linker map entry..
17842 */
17843 -union thread_union init_thread_union __init_task_data =
17844 - { INIT_THREAD_INFO(init_task) };
17845 +union thread_union init_thread_union __init_task_data;
17846
17847 /*
17848 * Initial task structure.
17849 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17850 * section. Since TSS's are completely CPU-local, we want them
17851 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17852 */
17853 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17854 -
17855 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17856 +EXPORT_SYMBOL(init_tss);
17857 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17858 index 99c4d30..74c84e9 100644
17859 --- a/arch/x86/kernel/ioport.c
17860 +++ b/arch/x86/kernel/ioport.c
17861 @@ -6,6 +6,7 @@
17862 #include <linux/sched.h>
17863 #include <linux/kernel.h>
17864 #include <linux/capability.h>
17865 +#include <linux/security.h>
17866 #include <linux/errno.h>
17867 #include <linux/types.h>
17868 #include <linux/ioport.h>
17869 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17870
17871 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17872 return -EINVAL;
17873 +#ifdef CONFIG_GRKERNSEC_IO
17874 + if (turn_on && grsec_disable_privio) {
17875 + gr_handle_ioperm();
17876 + return -EPERM;
17877 + }
17878 +#endif
17879 if (turn_on && !capable(CAP_SYS_RAWIO))
17880 return -EPERM;
17881
17882 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17883 * because the ->io_bitmap_max value must match the bitmap
17884 * contents:
17885 */
17886 - tss = &per_cpu(init_tss, get_cpu());
17887 + tss = init_tss + get_cpu();
17888
17889 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
17890
17891 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
17892 return -EINVAL;
17893 /* Trying to gain more privileges? */
17894 if (level > old) {
17895 +#ifdef CONFIG_GRKERNSEC_IO
17896 + if (grsec_disable_privio) {
17897 + gr_handle_iopl();
17898 + return -EPERM;
17899 + }
17900 +#endif
17901 if (!capable(CAP_SYS_RAWIO))
17902 return -EPERM;
17903 }
17904 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17905 index 04bbd52..83a07d9 100644
17906 --- a/arch/x86/kernel/irq.c
17907 +++ b/arch/x86/kernel/irq.c
17908 @@ -15,7 +15,7 @@
17909 #include <asm/mce.h>
17910 #include <asm/hw_irq.h>
17911
17912 -atomic_t irq_err_count;
17913 +atomic_unchecked_t irq_err_count;
17914
17915 /* Function pointer for generic interrupt vector handling */
17916 void (*generic_interrupt_extension)(void) = NULL;
17917 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
17918 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17919 seq_printf(p, " Machine check polls\n");
17920 #endif
17921 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17922 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17923 #if defined(CONFIG_X86_IO_APIC)
17924 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17925 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17926 #endif
17927 return 0;
17928 }
17929 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17930
17931 u64 arch_irq_stat(void)
17932 {
17933 - u64 sum = atomic_read(&irq_err_count);
17934 + u64 sum = atomic_read_unchecked(&irq_err_count);
17935
17936 #ifdef CONFIG_X86_IO_APIC
17937 - sum += atomic_read(&irq_mis_count);
17938 + sum += atomic_read_unchecked(&irq_mis_count);
17939 #endif
17940 return sum;
17941 }
17942 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17943 index 7d35d0f..03f1d52 100644
17944 --- a/arch/x86/kernel/irq_32.c
17945 +++ b/arch/x86/kernel/irq_32.c
17946 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
17947 __asm__ __volatile__("andl %%esp,%0" :
17948 "=r" (sp) : "0" (THREAD_SIZE - 1));
17949
17950 - return sp < (sizeof(struct thread_info) + STACK_WARN);
17951 + return sp < STACK_WARN;
17952 }
17953
17954 static void print_stack_overflow(void)
17955 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
17956 * per-CPU IRQ handling contexts (thread information and stack)
17957 */
17958 union irq_ctx {
17959 - struct thread_info tinfo;
17960 - u32 stack[THREAD_SIZE/sizeof(u32)];
17961 -} __attribute__((aligned(PAGE_SIZE)));
17962 + unsigned long previous_esp;
17963 + u32 stack[THREAD_SIZE/sizeof(u32)];
17964 +} __attribute__((aligned(THREAD_SIZE)));
17965
17966 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17967 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
17968 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
17969 static inline int
17970 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17971 {
17972 - union irq_ctx *curctx, *irqctx;
17973 + union irq_ctx *irqctx;
17974 u32 *isp, arg1, arg2;
17975
17976 - curctx = (union irq_ctx *) current_thread_info();
17977 irqctx = __get_cpu_var(hardirq_ctx);
17978
17979 /*
17980 @@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17981 * handler) we can't do that and just have to keep using the
17982 * current stack (which is the irq stack already after all)
17983 */
17984 - if (unlikely(curctx == irqctx))
17985 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17986 return 0;
17987
17988 /* build the stack frame on the IRQ stack */
17989 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17990 - irqctx->tinfo.task = curctx->tinfo.task;
17991 - irqctx->tinfo.previous_esp = current_stack_pointer;
17992 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17993 + irqctx->previous_esp = current_stack_pointer;
17994
17995 - /*
17996 - * Copy the softirq bits in preempt_count so that the
17997 - * softirq checks work in the hardirq context.
17998 - */
17999 - irqctx->tinfo.preempt_count =
18000 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
18001 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
18002 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18003 + __set_fs(MAKE_MM_SEG(0));
18004 +#endif
18005
18006 if (unlikely(overflow))
18007 call_on_stack(print_stack_overflow, isp);
18008 @@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18009 : "0" (irq), "1" (desc), "2" (isp),
18010 "D" (desc->handle_irq)
18011 : "memory", "cc", "ecx");
18012 +
18013 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18014 + __set_fs(current_thread_info()->addr_limit);
18015 +#endif
18016 +
18017 return 1;
18018 }
18019
18020 @@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18021 */
18022 void __cpuinit irq_ctx_init(int cpu)
18023 {
18024 - union irq_ctx *irqctx;
18025 -
18026 if (per_cpu(hardirq_ctx, cpu))
18027 return;
18028
18029 - irqctx = &per_cpu(hardirq_stack, cpu);
18030 - irqctx->tinfo.task = NULL;
18031 - irqctx->tinfo.exec_domain = NULL;
18032 - irqctx->tinfo.cpu = cpu;
18033 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
18034 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18035 -
18036 - per_cpu(hardirq_ctx, cpu) = irqctx;
18037 -
18038 - irqctx = &per_cpu(softirq_stack, cpu);
18039 - irqctx->tinfo.task = NULL;
18040 - irqctx->tinfo.exec_domain = NULL;
18041 - irqctx->tinfo.cpu = cpu;
18042 - irqctx->tinfo.preempt_count = 0;
18043 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18044 -
18045 - per_cpu(softirq_ctx, cpu) = irqctx;
18046 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
18047 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
18048
18049 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
18050 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
18051 @@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
18052 asmlinkage void do_softirq(void)
18053 {
18054 unsigned long flags;
18055 - struct thread_info *curctx;
18056 union irq_ctx *irqctx;
18057 u32 *isp;
18058
18059 @@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
18060 local_irq_save(flags);
18061
18062 if (local_softirq_pending()) {
18063 - curctx = current_thread_info();
18064 irqctx = __get_cpu_var(softirq_ctx);
18065 - irqctx->tinfo.task = curctx->task;
18066 - irqctx->tinfo.previous_esp = current_stack_pointer;
18067 + irqctx->previous_esp = current_stack_pointer;
18068
18069 /* build the stack frame on the softirq stack */
18070 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
18071 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
18072 +
18073 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18074 + __set_fs(MAKE_MM_SEG(0));
18075 +#endif
18076
18077 call_on_stack(__do_softirq, isp);
18078 +
18079 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18080 + __set_fs(current_thread_info()->addr_limit);
18081 +#endif
18082 +
18083 /*
18084 * Shouldnt happen, we returned above if in_interrupt():
18085 */
18086 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
18087 index 8d82a77..0baf312 100644
18088 --- a/arch/x86/kernel/kgdb.c
18089 +++ b/arch/x86/kernel/kgdb.c
18090 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
18091
18092 /* clear the trace bit */
18093 linux_regs->flags &= ~X86_EFLAGS_TF;
18094 - atomic_set(&kgdb_cpu_doing_single_step, -1);
18095 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
18096
18097 /* set the trace bit if we're stepping */
18098 if (remcomInBuffer[0] == 's') {
18099 linux_regs->flags |= X86_EFLAGS_TF;
18100 kgdb_single_step = 1;
18101 - atomic_set(&kgdb_cpu_doing_single_step,
18102 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
18103 raw_smp_processor_id());
18104 }
18105
18106 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
18107 break;
18108
18109 case DIE_DEBUG:
18110 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
18111 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
18112 raw_smp_processor_id()) {
18113 if (user_mode(regs))
18114 return single_step_cont(regs, args);
18115 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
18116 return instruction_pointer(regs);
18117 }
18118
18119 -struct kgdb_arch arch_kgdb_ops = {
18120 +const struct kgdb_arch arch_kgdb_ops = {
18121 /* Breakpoint instruction: */
18122 .gdb_bpt_instr = { 0xcc },
18123 .flags = KGDB_HW_BREAKPOINT,
18124 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
18125 index 7a67820..70ea187 100644
18126 --- a/arch/x86/kernel/kprobes.c
18127 +++ b/arch/x86/kernel/kprobes.c
18128 @@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
18129 char op;
18130 s32 raddr;
18131 } __attribute__((packed)) * jop;
18132 - jop = (struct __arch_jmp_op *)from;
18133 +
18134 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
18135 +
18136 + pax_open_kernel();
18137 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
18138 jop->op = RELATIVEJUMP_INSTRUCTION;
18139 + pax_close_kernel();
18140 }
18141
18142 /*
18143 @@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
18144 kprobe_opcode_t opcode;
18145 kprobe_opcode_t *orig_opcodes = opcodes;
18146
18147 - if (search_exception_tables((unsigned long)opcodes))
18148 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
18149 return 0; /* Page fault may occur on this address. */
18150
18151 retry:
18152 @@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
18153 disp = (u8 *) p->addr + *((s32 *) insn) -
18154 (u8 *) p->ainsn.insn;
18155 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
18156 + pax_open_kernel();
18157 *(s32 *)insn = (s32) disp;
18158 + pax_close_kernel();
18159 }
18160 }
18161 #endif
18162 @@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
18163
18164 static void __kprobes arch_copy_kprobe(struct kprobe *p)
18165 {
18166 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
18167 + pax_open_kernel();
18168 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
18169 + pax_close_kernel();
18170
18171 fix_riprel(p);
18172
18173 - if (can_boost(p->addr))
18174 + if (can_boost(ktla_ktva(p->addr)))
18175 p->ainsn.boostable = 0;
18176 else
18177 p->ainsn.boostable = -1;
18178
18179 - p->opcode = *p->addr;
18180 + p->opcode = *(ktla_ktva(p->addr));
18181 }
18182
18183 int __kprobes arch_prepare_kprobe(struct kprobe *p)
18184 @@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
18185 if (p->opcode == BREAKPOINT_INSTRUCTION)
18186 regs->ip = (unsigned long)p->addr;
18187 else
18188 - regs->ip = (unsigned long)p->ainsn.insn;
18189 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18190 }
18191
18192 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
18193 @@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
18194 if (p->ainsn.boostable == 1 && !p->post_handler) {
18195 /* Boost up -- we can execute copied instructions directly */
18196 reset_current_kprobe();
18197 - regs->ip = (unsigned long)p->ainsn.insn;
18198 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18199 preempt_enable_no_resched();
18200 return;
18201 }
18202 @@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
18203 struct kprobe_ctlblk *kcb;
18204
18205 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
18206 - if (*addr != BREAKPOINT_INSTRUCTION) {
18207 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
18208 /*
18209 * The breakpoint instruction was removed right
18210 * after we hit it. Another cpu has removed
18211 @@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
18212 /* Skip orig_ax, ip, cs */
18213 " addq $24, %rsp\n"
18214 " popfq\n"
18215 +#ifdef KERNEXEC_PLUGIN
18216 + " btsq $63,(%rsp)\n"
18217 +#endif
18218 #else
18219 " pushf\n"
18220 /*
18221 @@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
18222 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
18223 {
18224 unsigned long *tos = stack_addr(regs);
18225 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
18226 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
18227 unsigned long orig_ip = (unsigned long)p->addr;
18228 kprobe_opcode_t *insn = p->ainsn.insn;
18229
18230 @@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
18231 struct die_args *args = data;
18232 int ret = NOTIFY_DONE;
18233
18234 - if (args->regs && user_mode_vm(args->regs))
18235 + if (args->regs && user_mode(args->regs))
18236 return ret;
18237
18238 switch (val) {
18239 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
18240 index 63b0ec8..6d92227 100644
18241 --- a/arch/x86/kernel/kvm.c
18242 +++ b/arch/x86/kernel/kvm.c
18243 @@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
18244 pv_mmu_ops.set_pud = kvm_set_pud;
18245 #if PAGETABLE_LEVELS == 4
18246 pv_mmu_ops.set_pgd = kvm_set_pgd;
18247 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
18248 #endif
18249 #endif
18250 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
18251 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18252 index ec6ef60..ab2c824 100644
18253 --- a/arch/x86/kernel/ldt.c
18254 +++ b/arch/x86/kernel/ldt.c
18255 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18256 if (reload) {
18257 #ifdef CONFIG_SMP
18258 preempt_disable();
18259 - load_LDT(pc);
18260 + load_LDT_nolock(pc);
18261 if (!cpumask_equal(mm_cpumask(current->mm),
18262 cpumask_of(smp_processor_id())))
18263 smp_call_function(flush_ldt, current->mm, 1);
18264 preempt_enable();
18265 #else
18266 - load_LDT(pc);
18267 + load_LDT_nolock(pc);
18268 #endif
18269 }
18270 if (oldsize) {
18271 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18272 return err;
18273
18274 for (i = 0; i < old->size; i++)
18275 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18276 + write_ldt_entry(new->ldt, i, old->ldt + i);
18277 return 0;
18278 }
18279
18280 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18281 retval = copy_ldt(&mm->context, &old_mm->context);
18282 mutex_unlock(&old_mm->context.lock);
18283 }
18284 +
18285 + if (tsk == current) {
18286 + mm->context.vdso = 0;
18287 +
18288 +#ifdef CONFIG_X86_32
18289 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18290 + mm->context.user_cs_base = 0UL;
18291 + mm->context.user_cs_limit = ~0UL;
18292 +
18293 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18294 + cpus_clear(mm->context.cpu_user_cs_mask);
18295 +#endif
18296 +
18297 +#endif
18298 +#endif
18299 +
18300 + }
18301 +
18302 return retval;
18303 }
18304
18305 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18306 }
18307 }
18308
18309 +#ifdef CONFIG_PAX_SEGMEXEC
18310 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18311 + error = -EINVAL;
18312 + goto out_unlock;
18313 + }
18314 +#endif
18315 +
18316 fill_ldt(&ldt, &ldt_info);
18317 if (oldmode)
18318 ldt.avl = 0;
18319 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18320 index c1c429d..f02eaf9 100644
18321 --- a/arch/x86/kernel/machine_kexec_32.c
18322 +++ b/arch/x86/kernel/machine_kexec_32.c
18323 @@ -26,7 +26,7 @@
18324 #include <asm/system.h>
18325 #include <asm/cacheflush.h>
18326
18327 -static void set_idt(void *newidt, __u16 limit)
18328 +static void set_idt(struct desc_struct *newidt, __u16 limit)
18329 {
18330 struct desc_ptr curidt;
18331
18332 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18333 }
18334
18335
18336 -static void set_gdt(void *newgdt, __u16 limit)
18337 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18338 {
18339 struct desc_ptr curgdt;
18340
18341 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
18342 }
18343
18344 control_page = page_address(image->control_code_page);
18345 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18346 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18347
18348 relocate_kernel_ptr = control_page;
18349 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18350 diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
18351 index 1e47679..e73449d 100644
18352 --- a/arch/x86/kernel/microcode_amd.c
18353 +++ b/arch/x86/kernel/microcode_amd.c
18354 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
18355 uci->mc = NULL;
18356 }
18357
18358 -static struct microcode_ops microcode_amd_ops = {
18359 +static const struct microcode_ops microcode_amd_ops = {
18360 .request_microcode_user = request_microcode_user,
18361 .request_microcode_fw = request_microcode_fw,
18362 .collect_cpu_info = collect_cpu_info_amd,
18363 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
18364 .microcode_fini_cpu = microcode_fini_cpu_amd,
18365 };
18366
18367 -struct microcode_ops * __init init_amd_microcode(void)
18368 +const struct microcode_ops * __init init_amd_microcode(void)
18369 {
18370 return &microcode_amd_ops;
18371 }
18372 diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
18373 index 378e9a8..b5a6ea9 100644
18374 --- a/arch/x86/kernel/microcode_core.c
18375 +++ b/arch/x86/kernel/microcode_core.c
18376 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
18377
18378 #define MICROCODE_VERSION "2.00"
18379
18380 -static struct microcode_ops *microcode_ops;
18381 +static const struct microcode_ops *microcode_ops;
18382
18383 /*
18384 * Synchronization.
18385 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18386 index 0d334dd..14cedaf 100644
18387 --- a/arch/x86/kernel/microcode_intel.c
18388 +++ b/arch/x86/kernel/microcode_intel.c
18389 @@ -443,13 +443,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18390
18391 static int get_ucode_user(void *to, const void *from, size_t n)
18392 {
18393 - return copy_from_user(to, from, n);
18394 + return copy_from_user(to, (const void __force_user *)from, n);
18395 }
18396
18397 static enum ucode_state
18398 request_microcode_user(int cpu, const void __user *buf, size_t size)
18399 {
18400 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18401 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18402 }
18403
18404 static void microcode_fini_cpu(int cpu)
18405 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
18406 uci->mc = NULL;
18407 }
18408
18409 -static struct microcode_ops microcode_intel_ops = {
18410 +static const struct microcode_ops microcode_intel_ops = {
18411 .request_microcode_user = request_microcode_user,
18412 .request_microcode_fw = request_microcode_fw,
18413 .collect_cpu_info = collect_cpu_info,
18414 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_intel_ops = {
18415 .microcode_fini_cpu = microcode_fini_cpu,
18416 };
18417
18418 -struct microcode_ops * __init init_intel_microcode(void)
18419 +const struct microcode_ops * __init init_intel_microcode(void)
18420 {
18421 return &microcode_intel_ops;
18422 }
18423 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18424 index 89f386f..9028f51 100644
18425 --- a/arch/x86/kernel/module.c
18426 +++ b/arch/x86/kernel/module.c
18427 @@ -34,7 +34,7 @@
18428 #define DEBUGP(fmt...)
18429 #endif
18430
18431 -void *module_alloc(unsigned long size)
18432 +static void *__module_alloc(unsigned long size, pgprot_t prot)
18433 {
18434 struct vm_struct *area;
18435
18436 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
18437 if (!area)
18438 return NULL;
18439
18440 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
18441 - PAGE_KERNEL_EXEC);
18442 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
18443 +}
18444 +
18445 +void *module_alloc(unsigned long size)
18446 +{
18447 +
18448 +#ifdef CONFIG_PAX_KERNEXEC
18449 + return __module_alloc(size, PAGE_KERNEL);
18450 +#else
18451 + return __module_alloc(size, PAGE_KERNEL_EXEC);
18452 +#endif
18453 +
18454 }
18455
18456 /* Free memory returned from module_alloc */
18457 @@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
18458 vfree(module_region);
18459 }
18460
18461 +#ifdef CONFIG_PAX_KERNEXEC
18462 +#ifdef CONFIG_X86_32
18463 +void *module_alloc_exec(unsigned long size)
18464 +{
18465 + struct vm_struct *area;
18466 +
18467 + if (size == 0)
18468 + return NULL;
18469 +
18470 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18471 + return area ? area->addr : NULL;
18472 +}
18473 +EXPORT_SYMBOL(module_alloc_exec);
18474 +
18475 +void module_free_exec(struct module *mod, void *module_region)
18476 +{
18477 + vunmap(module_region);
18478 +}
18479 +EXPORT_SYMBOL(module_free_exec);
18480 +#else
18481 +void module_free_exec(struct module *mod, void *module_region)
18482 +{
18483 + module_free(mod, module_region);
18484 +}
18485 +EXPORT_SYMBOL(module_free_exec);
18486 +
18487 +void *module_alloc_exec(unsigned long size)
18488 +{
18489 + return __module_alloc(size, PAGE_KERNEL_RX);
18490 +}
18491 +EXPORT_SYMBOL(module_alloc_exec);
18492 +#endif
18493 +#endif
18494 +
18495 /* We don't need anything special. */
18496 int module_frob_arch_sections(Elf_Ehdr *hdr,
18497 Elf_Shdr *sechdrs,
18498 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18499 unsigned int i;
18500 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18501 Elf32_Sym *sym;
18502 - uint32_t *location;
18503 + uint32_t *plocation, location;
18504
18505 DEBUGP("Applying relocate section %u to %u\n", relsec,
18506 sechdrs[relsec].sh_info);
18507 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18508 /* This is where to make the change */
18509 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18510 - + rel[i].r_offset;
18511 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18512 + location = (uint32_t)plocation;
18513 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18514 + plocation = ktla_ktva((void *)plocation);
18515 /* This is the symbol it is referring to. Note that all
18516 undefined symbols have been resolved. */
18517 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18518 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18519 switch (ELF32_R_TYPE(rel[i].r_info)) {
18520 case R_386_32:
18521 /* We add the value into the location given */
18522 - *location += sym->st_value;
18523 + pax_open_kernel();
18524 + *plocation += sym->st_value;
18525 + pax_close_kernel();
18526 break;
18527 case R_386_PC32:
18528 /* Add the value, subtract its postition */
18529 - *location += sym->st_value - (uint32_t)location;
18530 + pax_open_kernel();
18531 + *plocation += sym->st_value - location;
18532 + pax_close_kernel();
18533 break;
18534 default:
18535 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18536 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18537 case R_X86_64_NONE:
18538 break;
18539 case R_X86_64_64:
18540 + pax_open_kernel();
18541 *(u64 *)loc = val;
18542 + pax_close_kernel();
18543 break;
18544 case R_X86_64_32:
18545 + pax_open_kernel();
18546 *(u32 *)loc = val;
18547 + pax_close_kernel();
18548 if (val != *(u32 *)loc)
18549 goto overflow;
18550 break;
18551 case R_X86_64_32S:
18552 + pax_open_kernel();
18553 *(s32 *)loc = val;
18554 + pax_close_kernel();
18555 if ((s64)val != *(s32 *)loc)
18556 goto overflow;
18557 break;
18558 case R_X86_64_PC32:
18559 val -= (u64)loc;
18560 + pax_open_kernel();
18561 *(u32 *)loc = val;
18562 + pax_close_kernel();
18563 +
18564 #if 0
18565 if ((s64)val != *(s32 *)loc)
18566 goto overflow;
18567 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18568 index 3a7c5a4..9191528 100644
18569 --- a/arch/x86/kernel/paravirt-spinlocks.c
18570 +++ b/arch/x86/kernel/paravirt-spinlocks.c
18571 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
18572 __raw_spin_lock(lock);
18573 }
18574
18575 -struct pv_lock_ops pv_lock_ops = {
18576 +struct pv_lock_ops pv_lock_ops __read_only = {
18577 #ifdef CONFIG_SMP
18578 .spin_is_locked = __ticket_spin_is_locked,
18579 .spin_is_contended = __ticket_spin_is_contended,
18580 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18581 index 1b1739d..dea6077 100644
18582 --- a/arch/x86/kernel/paravirt.c
18583 +++ b/arch/x86/kernel/paravirt.c
18584 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
18585 {
18586 return x;
18587 }
18588 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18589 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18590 +#endif
18591
18592 void __init default_banner(void)
18593 {
18594 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
18595 * corresponding structure. */
18596 static void *get_call_destination(u8 type)
18597 {
18598 - struct paravirt_patch_template tmpl = {
18599 + const struct paravirt_patch_template tmpl = {
18600 .pv_init_ops = pv_init_ops,
18601 .pv_time_ops = pv_time_ops,
18602 .pv_cpu_ops = pv_cpu_ops,
18603 @@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
18604 .pv_lock_ops = pv_lock_ops,
18605 #endif
18606 };
18607 +
18608 + pax_track_stack();
18609 return *((void **)&tmpl + type);
18610 }
18611
18612 @@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18613 if (opfunc == NULL)
18614 /* If there's no function, patch it with a ud2a (BUG) */
18615 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18616 - else if (opfunc == _paravirt_nop)
18617 + else if (opfunc == (void *)_paravirt_nop)
18618 /* If the operation is a nop, then nop the callsite */
18619 ret = paravirt_patch_nop();
18620
18621 /* identity functions just return their single argument */
18622 - else if (opfunc == _paravirt_ident_32)
18623 + else if (opfunc == (void *)_paravirt_ident_32)
18624 ret = paravirt_patch_ident_32(insnbuf, len);
18625 - else if (opfunc == _paravirt_ident_64)
18626 + else if (opfunc == (void *)_paravirt_ident_64)
18627 ret = paravirt_patch_ident_64(insnbuf, len);
18628 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18629 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18630 + ret = paravirt_patch_ident_64(insnbuf, len);
18631 +#endif
18632
18633 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18634 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18635 @@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18636 if (insn_len > len || start == NULL)
18637 insn_len = len;
18638 else
18639 - memcpy(insnbuf, start, insn_len);
18640 + memcpy(insnbuf, ktla_ktva(start), insn_len);
18641
18642 return insn_len;
18643 }
18644 @@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
18645 preempt_enable();
18646 }
18647
18648 -struct pv_info pv_info = {
18649 +struct pv_info pv_info __read_only = {
18650 .name = "bare hardware",
18651 .paravirt_enabled = 0,
18652 .kernel_rpl = 0,
18653 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
18654 };
18655
18656 -struct pv_init_ops pv_init_ops = {
18657 +struct pv_init_ops pv_init_ops __read_only = {
18658 .patch = native_patch,
18659 };
18660
18661 -struct pv_time_ops pv_time_ops = {
18662 +struct pv_time_ops pv_time_ops __read_only = {
18663 .sched_clock = native_sched_clock,
18664 };
18665
18666 -struct pv_irq_ops pv_irq_ops = {
18667 +struct pv_irq_ops pv_irq_ops __read_only = {
18668 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18669 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18670 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18671 @@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
18672 #endif
18673 };
18674
18675 -struct pv_cpu_ops pv_cpu_ops = {
18676 +struct pv_cpu_ops pv_cpu_ops __read_only = {
18677 .cpuid = native_cpuid,
18678 .get_debugreg = native_get_debugreg,
18679 .set_debugreg = native_set_debugreg,
18680 @@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18681 .end_context_switch = paravirt_nop,
18682 };
18683
18684 -struct pv_apic_ops pv_apic_ops = {
18685 +struct pv_apic_ops pv_apic_ops __read_only = {
18686 #ifdef CONFIG_X86_LOCAL_APIC
18687 .startup_ipi_hook = paravirt_nop,
18688 #endif
18689 };
18690
18691 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18692 +#ifdef CONFIG_X86_32
18693 +#ifdef CONFIG_X86_PAE
18694 +/* 64-bit pagetable entries */
18695 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18696 +#else
18697 /* 32-bit pagetable entries */
18698 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18699 +#endif
18700 #else
18701 /* 64-bit pagetable entries */
18702 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18703 #endif
18704
18705 -struct pv_mmu_ops pv_mmu_ops = {
18706 +struct pv_mmu_ops pv_mmu_ops __read_only = {
18707
18708 .read_cr2 = native_read_cr2,
18709 .write_cr2 = native_write_cr2,
18710 @@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18711 .make_pud = PTE_IDENT,
18712
18713 .set_pgd = native_set_pgd,
18714 + .set_pgd_batched = native_set_pgd_batched,
18715 #endif
18716 #endif /* PAGETABLE_LEVELS >= 3 */
18717
18718 @@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18719 },
18720
18721 .set_fixmap = native_set_fixmap,
18722 +
18723 +#ifdef CONFIG_PAX_KERNEXEC
18724 + .pax_open_kernel = native_pax_open_kernel,
18725 + .pax_close_kernel = native_pax_close_kernel,
18726 +#endif
18727 +
18728 };
18729
18730 EXPORT_SYMBOL_GPL(pv_time_ops);
18731 diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
18732 index 1a2d4b1..6a0dd55 100644
18733 --- a/arch/x86/kernel/pci-calgary_64.c
18734 +++ b/arch/x86/kernel/pci-calgary_64.c
18735 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
18736 free_pages((unsigned long)vaddr, get_order(size));
18737 }
18738
18739 -static struct dma_map_ops calgary_dma_ops = {
18740 +static const struct dma_map_ops calgary_dma_ops = {
18741 .alloc_coherent = calgary_alloc_coherent,
18742 .free_coherent = calgary_free_coherent,
18743 .map_sg = calgary_map_sg,
18744 diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
18745 index 6ac3931..42b4414 100644
18746 --- a/arch/x86/kernel/pci-dma.c
18747 +++ b/arch/x86/kernel/pci-dma.c
18748 @@ -14,7 +14,7 @@
18749
18750 static int forbid_dac __read_mostly;
18751
18752 -struct dma_map_ops *dma_ops;
18753 +const struct dma_map_ops *dma_ops;
18754 EXPORT_SYMBOL(dma_ops);
18755
18756 static int iommu_sac_force __read_mostly;
18757 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
18758
18759 int dma_supported(struct device *dev, u64 mask)
18760 {
18761 - struct dma_map_ops *ops = get_dma_ops(dev);
18762 + const struct dma_map_ops *ops = get_dma_ops(dev);
18763
18764 #ifdef CONFIG_PCI
18765 if (mask > 0xffffffff && forbid_dac > 0) {
18766 diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
18767 index 1c76691..e3632db 100644
18768 --- a/arch/x86/kernel/pci-gart_64.c
18769 +++ b/arch/x86/kernel/pci-gart_64.c
18770 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
18771 return -1;
18772 }
18773
18774 -static struct dma_map_ops gart_dma_ops = {
18775 +static const struct dma_map_ops gart_dma_ops = {
18776 .map_sg = gart_map_sg,
18777 .unmap_sg = gart_unmap_sg,
18778 .map_page = gart_map_page,
18779 diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
18780 index a3933d4..c898869 100644
18781 --- a/arch/x86/kernel/pci-nommu.c
18782 +++ b/arch/x86/kernel/pci-nommu.c
18783 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
18784 flush_write_buffers();
18785 }
18786
18787 -struct dma_map_ops nommu_dma_ops = {
18788 +const struct dma_map_ops nommu_dma_ops = {
18789 .alloc_coherent = dma_generic_alloc_coherent,
18790 .free_coherent = nommu_free_coherent,
18791 .map_sg = nommu_map_sg,
18792 diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
18793 index aaa6b78..4de1881 100644
18794 --- a/arch/x86/kernel/pci-swiotlb.c
18795 +++ b/arch/x86/kernel/pci-swiotlb.c
18796 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
18797 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
18798 }
18799
18800 -static struct dma_map_ops swiotlb_dma_ops = {
18801 +static const struct dma_map_ops swiotlb_dma_ops = {
18802 .mapping_error = swiotlb_dma_mapping_error,
18803 .alloc_coherent = x86_swiotlb_alloc_coherent,
18804 .free_coherent = swiotlb_free_coherent,
18805 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18806 index fc6c84d..0312ca2 100644
18807 --- a/arch/x86/kernel/process.c
18808 +++ b/arch/x86/kernel/process.c
18809 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
18810
18811 void free_thread_info(struct thread_info *ti)
18812 {
18813 - free_thread_xstate(ti->task);
18814 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
18815 }
18816
18817 +static struct kmem_cache *task_struct_cachep;
18818 +
18819 void arch_task_cache_init(void)
18820 {
18821 - task_xstate_cachep =
18822 - kmem_cache_create("task_xstate", xstate_size,
18823 + /* create a slab on which task_structs can be allocated */
18824 + task_struct_cachep =
18825 + kmem_cache_create("task_struct", sizeof(struct task_struct),
18826 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18827 +
18828 + task_xstate_cachep =
18829 + kmem_cache_create("task_xstate", xstate_size,
18830 __alignof__(union thread_xstate),
18831 - SLAB_PANIC | SLAB_NOTRACK, NULL);
18832 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18833 +}
18834 +
18835 +struct task_struct *alloc_task_struct(void)
18836 +{
18837 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
18838 +}
18839 +
18840 +void free_task_struct(struct task_struct *task)
18841 +{
18842 + free_thread_xstate(task);
18843 + kmem_cache_free(task_struct_cachep, task);
18844 }
18845
18846 /*
18847 @@ -73,7 +90,7 @@ void exit_thread(void)
18848 unsigned long *bp = t->io_bitmap_ptr;
18849
18850 if (bp) {
18851 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18852 + struct tss_struct *tss = init_tss + get_cpu();
18853
18854 t->io_bitmap_ptr = NULL;
18855 clear_thread_flag(TIF_IO_BITMAP);
18856 @@ -93,6 +110,9 @@ void flush_thread(void)
18857
18858 clear_tsk_thread_flag(tsk, TIF_DEBUG);
18859
18860 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18861 + loadsegment(gs, 0);
18862 +#endif
18863 tsk->thread.debugreg0 = 0;
18864 tsk->thread.debugreg1 = 0;
18865 tsk->thread.debugreg2 = 0;
18866 @@ -307,7 +327,7 @@ void default_idle(void)
18867 EXPORT_SYMBOL(default_idle);
18868 #endif
18869
18870 -void stop_this_cpu(void *dummy)
18871 +__noreturn void stop_this_cpu(void *dummy)
18872 {
18873 local_irq_disable();
18874 /*
18875 @@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
18876 }
18877 early_param("idle", idle_setup);
18878
18879 -unsigned long arch_align_stack(unsigned long sp)
18880 +#ifdef CONFIG_PAX_RANDKSTACK
18881 +void pax_randomize_kstack(struct pt_regs *regs)
18882 {
18883 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18884 - sp -= get_random_int() % 8192;
18885 - return sp & ~0xf;
18886 -}
18887 + struct thread_struct *thread = &current->thread;
18888 + unsigned long time;
18889
18890 -unsigned long arch_randomize_brk(struct mm_struct *mm)
18891 -{
18892 - unsigned long range_end = mm->brk + 0x02000000;
18893 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18894 + if (!randomize_va_space)
18895 + return;
18896 +
18897 + if (v8086_mode(regs))
18898 + return;
18899 +
18900 + rdtscl(time);
18901 +
18902 + /* P4 seems to return a 0 LSB, ignore it */
18903 +#ifdef CONFIG_MPENTIUM4
18904 + time &= 0x3EUL;
18905 + time <<= 2;
18906 +#elif defined(CONFIG_X86_64)
18907 + time &= 0xFUL;
18908 + time <<= 4;
18909 +#else
18910 + time &= 0x1FUL;
18911 + time <<= 3;
18912 +#endif
18913 +
18914 + thread->sp0 ^= time;
18915 + load_sp0(init_tss + smp_processor_id(), thread);
18916 +
18917 +#ifdef CONFIG_X86_64
18918 + percpu_write(kernel_stack, thread->sp0);
18919 +#endif
18920 }
18921 +#endif
18922
18923 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18924 index c40c432..6e1df72 100644
18925 --- a/arch/x86/kernel/process_32.c
18926 +++ b/arch/x86/kernel/process_32.c
18927 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18928 unsigned long thread_saved_pc(struct task_struct *tsk)
18929 {
18930 return ((unsigned long *)tsk->thread.sp)[3];
18931 +//XXX return tsk->thread.eip;
18932 }
18933
18934 #ifndef CONFIG_SMP
18935 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
18936 unsigned short ss, gs;
18937 const char *board;
18938
18939 - if (user_mode_vm(regs)) {
18940 + if (user_mode(regs)) {
18941 sp = regs->sp;
18942 ss = regs->ss & 0xffff;
18943 - gs = get_user_gs(regs);
18944 } else {
18945 sp = (unsigned long) (&regs->sp);
18946 savesegment(ss, ss);
18947 - savesegment(gs, gs);
18948 }
18949 + gs = get_user_gs(regs);
18950
18951 printk("\n");
18952
18953 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18954 regs.bx = (unsigned long) fn;
18955 regs.dx = (unsigned long) arg;
18956
18957 - regs.ds = __USER_DS;
18958 - regs.es = __USER_DS;
18959 + regs.ds = __KERNEL_DS;
18960 + regs.es = __KERNEL_DS;
18961 regs.fs = __KERNEL_PERCPU;
18962 - regs.gs = __KERNEL_STACK_CANARY;
18963 + savesegment(gs, regs.gs);
18964 regs.orig_ax = -1;
18965 regs.ip = (unsigned long) kernel_thread_helper;
18966 regs.cs = __KERNEL_CS | get_kernel_rpl();
18967 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18968 struct task_struct *tsk;
18969 int err;
18970
18971 - childregs = task_pt_regs(p);
18972 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18973 *childregs = *regs;
18974 childregs->ax = 0;
18975 childregs->sp = sp;
18976
18977 p->thread.sp = (unsigned long) childregs;
18978 p->thread.sp0 = (unsigned long) (childregs+1);
18979 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18980
18981 p->thread.ip = (unsigned long) ret_from_fork;
18982
18983 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18984 struct thread_struct *prev = &prev_p->thread,
18985 *next = &next_p->thread;
18986 int cpu = smp_processor_id();
18987 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18988 + struct tss_struct *tss = init_tss + cpu;
18989 bool preload_fpu;
18990
18991 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18992 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18993 */
18994 lazy_save_gs(prev->gs);
18995
18996 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18997 + __set_fs(task_thread_info(next_p)->addr_limit);
18998 +#endif
18999 +
19000 /*
19001 * Load the per-thread Thread-Local Storage descriptor.
19002 */
19003 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19004 */
19005 arch_end_context_switch(next_p);
19006
19007 + percpu_write(current_task, next_p);
19008 + percpu_write(current_tinfo, &next_p->tinfo);
19009 +
19010 if (preload_fpu)
19011 __math_state_restore();
19012
19013 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19014 if (prev->gs | next->gs)
19015 lazy_load_gs(next->gs);
19016
19017 - percpu_write(current_task, next_p);
19018 -
19019 return prev_p;
19020 }
19021
19022 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
19023 } while (count++ < 16);
19024 return 0;
19025 }
19026 -
19027 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
19028 index 39493bc..196816d 100644
19029 --- a/arch/x86/kernel/process_64.c
19030 +++ b/arch/x86/kernel/process_64.c
19031 @@ -91,7 +91,7 @@ static void __exit_idle(void)
19032 void exit_idle(void)
19033 {
19034 /* idle loop has pid 0 */
19035 - if (current->pid)
19036 + if (task_pid_nr(current))
19037 return;
19038 __exit_idle();
19039 }
19040 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
19041 if (!board)
19042 board = "";
19043 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
19044 - current->pid, current->comm, print_tainted(),
19045 + task_pid_nr(current), current->comm, print_tainted(),
19046 init_utsname()->release,
19047 (int)strcspn(init_utsname()->version, " "),
19048 init_utsname()->version, board);
19049 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19050 struct pt_regs *childregs;
19051 struct task_struct *me = current;
19052
19053 - childregs = ((struct pt_regs *)
19054 - (THREAD_SIZE + task_stack_page(p))) - 1;
19055 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
19056 *childregs = *regs;
19057
19058 childregs->ax = 0;
19059 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19060 p->thread.sp = (unsigned long) childregs;
19061 p->thread.sp0 = (unsigned long) (childregs+1);
19062 p->thread.usersp = me->thread.usersp;
19063 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
19064
19065 set_tsk_thread_flag(p, TIF_FORK);
19066
19067 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19068 struct thread_struct *prev = &prev_p->thread;
19069 struct thread_struct *next = &next_p->thread;
19070 int cpu = smp_processor_id();
19071 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
19072 + struct tss_struct *tss = init_tss + cpu;
19073 unsigned fsindex, gsindex;
19074 bool preload_fpu;
19075
19076 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19077 prev->usersp = percpu_read(old_rsp);
19078 percpu_write(old_rsp, next->usersp);
19079 percpu_write(current_task, next_p);
19080 + percpu_write(current_tinfo, &next_p->tinfo);
19081
19082 - percpu_write(kernel_stack,
19083 - (unsigned long)task_stack_page(next_p) +
19084 - THREAD_SIZE - KERNEL_STACK_OFFSET);
19085 + percpu_write(kernel_stack, next->sp0);
19086
19087 /*
19088 * Now maybe reload the debug registers and handle I/O bitmaps
19089 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
19090 if (!p || p == current || p->state == TASK_RUNNING)
19091 return 0;
19092 stack = (unsigned long)task_stack_page(p);
19093 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
19094 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
19095 return 0;
19096 fp = *(u64 *)(p->thread.sp);
19097 do {
19098 - if (fp < (unsigned long)stack ||
19099 - fp >= (unsigned long)stack+THREAD_SIZE)
19100 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
19101 return 0;
19102 ip = *(u64 *)(fp+8);
19103 if (!in_sched_functions(ip))
19104 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
19105 index c06acdd..3f5fff5 100644
19106 --- a/arch/x86/kernel/ptrace.c
19107 +++ b/arch/x86/kernel/ptrace.c
19108 @@ -925,7 +925,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
19109 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19110 {
19111 int ret;
19112 - unsigned long __user *datap = (unsigned long __user *)data;
19113 + unsigned long __user *datap = (__force unsigned long __user *)data;
19114
19115 switch (request) {
19116 /* read the word at location addr in the USER area. */
19117 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19118 if (addr < 0)
19119 return -EIO;
19120 ret = do_get_thread_area(child, addr,
19121 - (struct user_desc __user *) data);
19122 + (__force struct user_desc __user *) data);
19123 break;
19124
19125 case PTRACE_SET_THREAD_AREA:
19126 if (addr < 0)
19127 return -EIO;
19128 ret = do_set_thread_area(child, addr,
19129 - (struct user_desc __user *) data, 0);
19130 + (__force struct user_desc __user *) data, 0);
19131 break;
19132 #endif
19133
19134 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19135 #ifdef CONFIG_X86_PTRACE_BTS
19136 case PTRACE_BTS_CONFIG:
19137 ret = ptrace_bts_config
19138 - (child, data, (struct ptrace_bts_config __user *)addr);
19139 + (child, data, (__force struct ptrace_bts_config __user *)addr);
19140 break;
19141
19142 case PTRACE_BTS_STATUS:
19143 ret = ptrace_bts_status
19144 - (child, data, (struct ptrace_bts_config __user *)addr);
19145 + (child, data, (__force struct ptrace_bts_config __user *)addr);
19146 break;
19147
19148 case PTRACE_BTS_SIZE:
19149 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19150
19151 case PTRACE_BTS_GET:
19152 ret = ptrace_bts_read_record
19153 - (child, data, (struct bts_struct __user *) addr);
19154 + (child, data, (__force struct bts_struct __user *) addr);
19155 break;
19156
19157 case PTRACE_BTS_CLEAR:
19158 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19159
19160 case PTRACE_BTS_DRAIN:
19161 ret = ptrace_bts_drain
19162 - (child, data, (struct bts_struct __user *) addr);
19163 + (child, data, (__force struct bts_struct __user *) addr);
19164 break;
19165 #endif /* CONFIG_X86_PTRACE_BTS */
19166
19167 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
19168 info.si_code = si_code;
19169
19170 /* User-mode ip? */
19171 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
19172 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
19173
19174 /* Send us the fake SIGTRAP */
19175 force_sig_info(SIGTRAP, &info, tsk);
19176 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
19177 * We must return the syscall number to actually look up in the table.
19178 * This can be -1L to skip running any syscall at all.
19179 */
19180 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
19181 +long syscall_trace_enter(struct pt_regs *regs)
19182 {
19183 long ret = 0;
19184
19185 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
19186 return ret ?: regs->orig_ax;
19187 }
19188
19189 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
19190 +void syscall_trace_leave(struct pt_regs *regs)
19191 {
19192 if (unlikely(current->audit_context))
19193 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
19194 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
19195 index cf98100..e76e03d 100644
19196 --- a/arch/x86/kernel/reboot.c
19197 +++ b/arch/x86/kernel/reboot.c
19198 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
19199 EXPORT_SYMBOL(pm_power_off);
19200
19201 static const struct desc_ptr no_idt = {};
19202 -static int reboot_mode;
19203 +static unsigned short reboot_mode;
19204 enum reboot_type reboot_type = BOOT_KBD;
19205 int reboot_force;
19206
19207 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
19208 controller to pulse the CPU reset line, which is more thorough, but
19209 doesn't work with at least one type of 486 motherboard. It is easy
19210 to stop this code working; hence the copious comments. */
19211 -static const unsigned long long
19212 -real_mode_gdt_entries [3] =
19213 +static struct desc_struct
19214 +real_mode_gdt_entries [3] __read_only =
19215 {
19216 - 0x0000000000000000ULL, /* Null descriptor */
19217 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
19218 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
19219 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
19220 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
19221 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
19222 };
19223
19224 static const struct desc_ptr
19225 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
19226 * specified by the code and length parameters.
19227 * We assume that length will aways be less that 100!
19228 */
19229 -void machine_real_restart(const unsigned char *code, int length)
19230 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
19231 {
19232 local_irq_disable();
19233
19234 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
19235 /* Remap the kernel at virtual address zero, as well as offset zero
19236 from the kernel segment. This assumes the kernel segment starts at
19237 virtual address PAGE_OFFSET. */
19238 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19239 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
19240 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19241 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
19242
19243 /*
19244 * Use `swapper_pg_dir' as our page directory.
19245 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
19246 boot)". This seems like a fairly standard thing that gets set by
19247 REBOOT.COM programs, and the previous reset routine did this
19248 too. */
19249 - *((unsigned short *)0x472) = reboot_mode;
19250 + *(unsigned short *)(__va(0x472)) = reboot_mode;
19251
19252 /* For the switch to real mode, copy some code to low memory. It has
19253 to be in the first 64k because it is running in 16-bit mode, and it
19254 has to have the same physical and virtual address, because it turns
19255 off paging. Copy it near the end of the first page, out of the way
19256 of BIOS variables. */
19257 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
19258 - real_mode_switch, sizeof (real_mode_switch));
19259 - memcpy((void *)(0x1000 - 100), code, length);
19260 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
19261 + memcpy(__va(0x1000 - 100), code, length);
19262
19263 /* Set up the IDT for real mode. */
19264 load_idt(&real_mode_idt);
19265 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
19266 __asm__ __volatile__ ("ljmp $0x0008,%0"
19267 :
19268 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
19269 + do { } while (1);
19270 }
19271 #ifdef CONFIG_APM_MODULE
19272 EXPORT_SYMBOL(machine_real_restart);
19273 @@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
19274 {
19275 }
19276
19277 -static void native_machine_emergency_restart(void)
19278 +__noreturn static void native_machine_emergency_restart(void)
19279 {
19280 int i;
19281
19282 @@ -659,13 +659,13 @@ void native_machine_shutdown(void)
19283 #endif
19284 }
19285
19286 -static void __machine_emergency_restart(int emergency)
19287 +static __noreturn void __machine_emergency_restart(int emergency)
19288 {
19289 reboot_emergency = emergency;
19290 machine_ops.emergency_restart();
19291 }
19292
19293 -static void native_machine_restart(char *__unused)
19294 +static __noreturn void native_machine_restart(char *__unused)
19295 {
19296 printk("machine restart\n");
19297
19298 @@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
19299 __machine_emergency_restart(0);
19300 }
19301
19302 -static void native_machine_halt(void)
19303 +static __noreturn void native_machine_halt(void)
19304 {
19305 /* stop other cpus and apics */
19306 machine_shutdown();
19307 @@ -685,7 +685,7 @@ static void native_machine_halt(void)
19308 stop_this_cpu(NULL);
19309 }
19310
19311 -static void native_machine_power_off(void)
19312 +__noreturn static void native_machine_power_off(void)
19313 {
19314 if (pm_power_off) {
19315 if (!reboot_force)
19316 @@ -694,6 +694,7 @@ static void native_machine_power_off(void)
19317 }
19318 /* a fallback in case there is no PM info available */
19319 tboot_shutdown(TB_SHUTDOWN_HALT);
19320 + do { } while (1);
19321 }
19322
19323 struct machine_ops machine_ops = {
19324 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19325 index 7a6f3b3..976a959 100644
19326 --- a/arch/x86/kernel/relocate_kernel_64.S
19327 +++ b/arch/x86/kernel/relocate_kernel_64.S
19328 @@ -11,6 +11,7 @@
19329 #include <asm/kexec.h>
19330 #include <asm/processor-flags.h>
19331 #include <asm/pgtable_types.h>
19332 +#include <asm/alternative-asm.h>
19333
19334 /*
19335 * Must be relocatable PIC code callable as a C function
19336 @@ -167,6 +168,7 @@ identity_mapped:
19337 xorq %r14, %r14
19338 xorq %r15, %r15
19339
19340 + pax_force_retaddr 0, 1
19341 ret
19342
19343 1:
19344 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19345 index 5449a26..0b6c759 100644
19346 --- a/arch/x86/kernel/setup.c
19347 +++ b/arch/x86/kernel/setup.c
19348 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
19349
19350 if (!boot_params.hdr.root_flags)
19351 root_mountflags &= ~MS_RDONLY;
19352 - init_mm.start_code = (unsigned long) _text;
19353 - init_mm.end_code = (unsigned long) _etext;
19354 + init_mm.start_code = ktla_ktva((unsigned long) _text);
19355 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
19356 init_mm.end_data = (unsigned long) _edata;
19357 init_mm.brk = _brk_end;
19358
19359 - code_resource.start = virt_to_phys(_text);
19360 - code_resource.end = virt_to_phys(_etext)-1;
19361 - data_resource.start = virt_to_phys(_etext);
19362 + code_resource.start = virt_to_phys(ktla_ktva(_text));
19363 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19364 + data_resource.start = virt_to_phys(_sdata);
19365 data_resource.end = virt_to_phys(_edata)-1;
19366 bss_resource.start = virt_to_phys(&__bss_start);
19367 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19368 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19369 index d559af9..524c6ad 100644
19370 --- a/arch/x86/kernel/setup_percpu.c
19371 +++ b/arch/x86/kernel/setup_percpu.c
19372 @@ -25,19 +25,17 @@
19373 # define DBG(x...)
19374 #endif
19375
19376 -DEFINE_PER_CPU(int, cpu_number);
19377 +#ifdef CONFIG_SMP
19378 +DEFINE_PER_CPU(unsigned int, cpu_number);
19379 EXPORT_PER_CPU_SYMBOL(cpu_number);
19380 +#endif
19381
19382 -#ifdef CONFIG_X86_64
19383 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19384 -#else
19385 -#define BOOT_PERCPU_OFFSET 0
19386 -#endif
19387
19388 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19389 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19390
19391 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19392 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19393 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19394 };
19395 EXPORT_SYMBOL(__per_cpu_offset);
19396 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
19397 {
19398 #ifdef CONFIG_X86_32
19399 struct desc_struct gdt;
19400 + unsigned long base = per_cpu_offset(cpu);
19401
19402 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19403 - 0x2 | DESCTYPE_S, 0x8);
19404 - gdt.s = 1;
19405 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19406 + 0x83 | DESCTYPE_S, 0xC);
19407 write_gdt_entry(get_cpu_gdt_table(cpu),
19408 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19409 #endif
19410 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
19411 /* alrighty, percpu areas up and running */
19412 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19413 for_each_possible_cpu(cpu) {
19414 +#ifdef CONFIG_CC_STACKPROTECTOR
19415 +#ifdef CONFIG_X86_32
19416 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
19417 +#endif
19418 +#endif
19419 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19420 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19421 per_cpu(cpu_number, cpu) = cpu;
19422 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
19423 early_per_cpu_map(x86_cpu_to_node_map, cpu);
19424 #endif
19425 #endif
19426 +#ifdef CONFIG_CC_STACKPROTECTOR
19427 +#ifdef CONFIG_X86_32
19428 + if (!cpu)
19429 + per_cpu(stack_canary.canary, cpu) = canary;
19430 +#endif
19431 +#endif
19432 /*
19433 * Up to this point, the boot CPU has been using .data.init
19434 * area. Reload any changed state for the boot CPU.
19435 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19436 index 6a44a76..a9287a1 100644
19437 --- a/arch/x86/kernel/signal.c
19438 +++ b/arch/x86/kernel/signal.c
19439 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
19440 * Align the stack pointer according to the i386 ABI,
19441 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19442 */
19443 - sp = ((sp + 4) & -16ul) - 4;
19444 + sp = ((sp - 12) & -16ul) - 4;
19445 #else /* !CONFIG_X86_32 */
19446 sp = round_down(sp, 16) - 8;
19447 #endif
19448 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19449 * Return an always-bogus address instead so we will die with SIGSEGV.
19450 */
19451 if (onsigstack && !likely(on_sig_stack(sp)))
19452 - return (void __user *)-1L;
19453 + return (__force void __user *)-1L;
19454
19455 /* save i387 state */
19456 if (used_math() && save_i387_xstate(*fpstate) < 0)
19457 - return (void __user *)-1L;
19458 + return (__force void __user *)-1L;
19459
19460 return (void __user *)sp;
19461 }
19462 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19463 }
19464
19465 if (current->mm->context.vdso)
19466 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19467 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19468 else
19469 - restorer = &frame->retcode;
19470 + restorer = (void __user *)&frame->retcode;
19471 if (ka->sa.sa_flags & SA_RESTORER)
19472 restorer = ka->sa.sa_restorer;
19473
19474 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19475 * reasons and because gdb uses it as a signature to notice
19476 * signal handler stack frames.
19477 */
19478 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19479 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19480
19481 if (err)
19482 return -EFAULT;
19483 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19484 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19485
19486 /* Set up to return from userspace. */
19487 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19488 + if (current->mm->context.vdso)
19489 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19490 + else
19491 + restorer = (void __user *)&frame->retcode;
19492 if (ka->sa.sa_flags & SA_RESTORER)
19493 restorer = ka->sa.sa_restorer;
19494 put_user_ex(restorer, &frame->pretcode);
19495 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19496 * reasons and because gdb uses it as a signature to notice
19497 * signal handler stack frames.
19498 */
19499 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19500 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19501 } put_user_catch(err);
19502
19503 if (err)
19504 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
19505 int signr;
19506 sigset_t *oldset;
19507
19508 + pax_track_stack();
19509 +
19510 /*
19511 * We want the common case to go fast, which is why we may in certain
19512 * cases get here from kernel mode. Just return without doing anything
19513 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
19514 * X86_32: vm86 regs switched out by assembly code before reaching
19515 * here, so testing against kernel CS suffices.
19516 */
19517 - if (!user_mode(regs))
19518 + if (!user_mode_novm(regs))
19519 return;
19520
19521 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
19522 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19523 index 7e8e905..64d5c32 100644
19524 --- a/arch/x86/kernel/smpboot.c
19525 +++ b/arch/x86/kernel/smpboot.c
19526 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
19527 */
19528 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
19529
19530 -void cpu_hotplug_driver_lock()
19531 +void cpu_hotplug_driver_lock(void)
19532 {
19533 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
19534 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
19535 }
19536
19537 -void cpu_hotplug_driver_unlock()
19538 +void cpu_hotplug_driver_unlock(void)
19539 {
19540 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19541 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19542 }
19543
19544 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
19545 @@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
19546 * target processor state.
19547 */
19548 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
19549 - (unsigned long)stack_start.sp);
19550 + stack_start);
19551
19552 /*
19553 * Run STARTUP IPI loop.
19554 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19555 set_idle_for_cpu(cpu, c_idle.idle);
19556 do_rest:
19557 per_cpu(current_task, cpu) = c_idle.idle;
19558 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19559 #ifdef CONFIG_X86_32
19560 /* Stack for startup_32 can be just as for start_secondary onwards */
19561 irq_ctx_init(cpu);
19562 @@ -750,13 +751,15 @@ do_rest:
19563 #else
19564 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19565 initial_gs = per_cpu_offset(cpu);
19566 - per_cpu(kernel_stack, cpu) =
19567 - (unsigned long)task_stack_page(c_idle.idle) -
19568 - KERNEL_STACK_OFFSET + THREAD_SIZE;
19569 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19570 #endif
19571 +
19572 + pax_open_kernel();
19573 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19574 + pax_close_kernel();
19575 +
19576 initial_code = (unsigned long)start_secondary;
19577 - stack_start.sp = (void *) c_idle.idle->thread.sp;
19578 + stack_start = c_idle.idle->thread.sp;
19579
19580 /* start_ip had better be page-aligned! */
19581 start_ip = setup_trampoline();
19582 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19583
19584 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19585
19586 +#ifdef CONFIG_PAX_PER_CPU_PGD
19587 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19588 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19589 + KERNEL_PGD_PTRS);
19590 +#endif
19591 +
19592 err = do_boot_cpu(apicid, cpu);
19593
19594 if (err) {
19595 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19596 index 3149032..14f1053 100644
19597 --- a/arch/x86/kernel/step.c
19598 +++ b/arch/x86/kernel/step.c
19599 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19600 struct desc_struct *desc;
19601 unsigned long base;
19602
19603 - seg &= ~7UL;
19604 + seg >>= 3;
19605
19606 mutex_lock(&child->mm->context.lock);
19607 - if (unlikely((seg >> 3) >= child->mm->context.size))
19608 + if (unlikely(seg >= child->mm->context.size))
19609 addr = -1L; /* bogus selector, access would fault */
19610 else {
19611 desc = child->mm->context.ldt + seg;
19612 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19613 addr += base;
19614 }
19615 mutex_unlock(&child->mm->context.lock);
19616 - }
19617 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19618 + addr = ktla_ktva(addr);
19619
19620 return addr;
19621 }
19622 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19623 unsigned char opcode[15];
19624 unsigned long addr = convert_ip_to_linear(child, regs);
19625
19626 + if (addr == -EINVAL)
19627 + return 0;
19628 +
19629 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19630 for (i = 0; i < copied; i++) {
19631 switch (opcode[i]) {
19632 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19633
19634 #ifdef CONFIG_X86_64
19635 case 0x40 ... 0x4f:
19636 - if (regs->cs != __USER_CS)
19637 + if ((regs->cs & 0xffff) != __USER_CS)
19638 /* 32-bit mode: register increment */
19639 return 0;
19640 /* 64-bit mode: REX prefix */
19641 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19642 index dee1ff7..a397f7f 100644
19643 --- a/arch/x86/kernel/sys_i386_32.c
19644 +++ b/arch/x86/kernel/sys_i386_32.c
19645 @@ -24,6 +24,21 @@
19646
19647 #include <asm/syscalls.h>
19648
19649 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19650 +{
19651 + unsigned long pax_task_size = TASK_SIZE;
19652 +
19653 +#ifdef CONFIG_PAX_SEGMEXEC
19654 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19655 + pax_task_size = SEGMEXEC_TASK_SIZE;
19656 +#endif
19657 +
19658 + if (len > pax_task_size || addr > pax_task_size - len)
19659 + return -EINVAL;
19660 +
19661 + return 0;
19662 +}
19663 +
19664 /*
19665 * Perform the select(nd, in, out, ex, tv) and mmap() system
19666 * calls. Linux/i386 didn't use to be able to handle more than
19667 @@ -58,6 +73,212 @@ out:
19668 return err;
19669 }
19670
19671 +unsigned long
19672 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
19673 + unsigned long len, unsigned long pgoff, unsigned long flags)
19674 +{
19675 + struct mm_struct *mm = current->mm;
19676 + struct vm_area_struct *vma;
19677 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19678 +
19679 +#ifdef CONFIG_PAX_SEGMEXEC
19680 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19681 + pax_task_size = SEGMEXEC_TASK_SIZE;
19682 +#endif
19683 +
19684 + pax_task_size -= PAGE_SIZE;
19685 +
19686 + if (len > pax_task_size)
19687 + return -ENOMEM;
19688 +
19689 + if (flags & MAP_FIXED)
19690 + return addr;
19691 +
19692 +#ifdef CONFIG_PAX_RANDMMAP
19693 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19694 +#endif
19695 +
19696 + if (addr) {
19697 + addr = PAGE_ALIGN(addr);
19698 + if (pax_task_size - len >= addr) {
19699 + vma = find_vma(mm, addr);
19700 + if (check_heap_stack_gap(vma, addr, len))
19701 + return addr;
19702 + }
19703 + }
19704 + if (len > mm->cached_hole_size) {
19705 + start_addr = addr = mm->free_area_cache;
19706 + } else {
19707 + start_addr = addr = mm->mmap_base;
19708 + mm->cached_hole_size = 0;
19709 + }
19710 +
19711 +#ifdef CONFIG_PAX_PAGEEXEC
19712 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19713 + start_addr = 0x00110000UL;
19714 +
19715 +#ifdef CONFIG_PAX_RANDMMAP
19716 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19717 + start_addr += mm->delta_mmap & 0x03FFF000UL;
19718 +#endif
19719 +
19720 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19721 + start_addr = addr = mm->mmap_base;
19722 + else
19723 + addr = start_addr;
19724 + }
19725 +#endif
19726 +
19727 +full_search:
19728 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19729 + /* At this point: (!vma || addr < vma->vm_end). */
19730 + if (pax_task_size - len < addr) {
19731 + /*
19732 + * Start a new search - just in case we missed
19733 + * some holes.
19734 + */
19735 + if (start_addr != mm->mmap_base) {
19736 + start_addr = addr = mm->mmap_base;
19737 + mm->cached_hole_size = 0;
19738 + goto full_search;
19739 + }
19740 + return -ENOMEM;
19741 + }
19742 + if (check_heap_stack_gap(vma, addr, len))
19743 + break;
19744 + if (addr + mm->cached_hole_size < vma->vm_start)
19745 + mm->cached_hole_size = vma->vm_start - addr;
19746 + addr = vma->vm_end;
19747 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
19748 + start_addr = addr = mm->mmap_base;
19749 + mm->cached_hole_size = 0;
19750 + goto full_search;
19751 + }
19752 + }
19753 +
19754 + /*
19755 + * Remember the place where we stopped the search:
19756 + */
19757 + mm->free_area_cache = addr + len;
19758 + return addr;
19759 +}
19760 +
19761 +unsigned long
19762 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19763 + const unsigned long len, const unsigned long pgoff,
19764 + const unsigned long flags)
19765 +{
19766 + struct vm_area_struct *vma;
19767 + struct mm_struct *mm = current->mm;
19768 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19769 +
19770 +#ifdef CONFIG_PAX_SEGMEXEC
19771 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19772 + pax_task_size = SEGMEXEC_TASK_SIZE;
19773 +#endif
19774 +
19775 + pax_task_size -= PAGE_SIZE;
19776 +
19777 + /* requested length too big for entire address space */
19778 + if (len > pax_task_size)
19779 + return -ENOMEM;
19780 +
19781 + if (flags & MAP_FIXED)
19782 + return addr;
19783 +
19784 +#ifdef CONFIG_PAX_PAGEEXEC
19785 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19786 + goto bottomup;
19787 +#endif
19788 +
19789 +#ifdef CONFIG_PAX_RANDMMAP
19790 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19791 +#endif
19792 +
19793 + /* requesting a specific address */
19794 + if (addr) {
19795 + addr = PAGE_ALIGN(addr);
19796 + if (pax_task_size - len >= addr) {
19797 + vma = find_vma(mm, addr);
19798 + if (check_heap_stack_gap(vma, addr, len))
19799 + return addr;
19800 + }
19801 + }
19802 +
19803 + /* check if free_area_cache is useful for us */
19804 + if (len <= mm->cached_hole_size) {
19805 + mm->cached_hole_size = 0;
19806 + mm->free_area_cache = mm->mmap_base;
19807 + }
19808 +
19809 + /* either no address requested or can't fit in requested address hole */
19810 + addr = mm->free_area_cache;
19811 +
19812 + /* make sure it can fit in the remaining address space */
19813 + if (addr > len) {
19814 + vma = find_vma(mm, addr-len);
19815 + if (check_heap_stack_gap(vma, addr - len, len))
19816 + /* remember the address as a hint for next time */
19817 + return (mm->free_area_cache = addr-len);
19818 + }
19819 +
19820 + if (mm->mmap_base < len)
19821 + goto bottomup;
19822 +
19823 + addr = mm->mmap_base-len;
19824 +
19825 + do {
19826 + /*
19827 + * Lookup failure means no vma is above this address,
19828 + * else if new region fits below vma->vm_start,
19829 + * return with success:
19830 + */
19831 + vma = find_vma(mm, addr);
19832 + if (check_heap_stack_gap(vma, addr, len))
19833 + /* remember the address as a hint for next time */
19834 + return (mm->free_area_cache = addr);
19835 +
19836 + /* remember the largest hole we saw so far */
19837 + if (addr + mm->cached_hole_size < vma->vm_start)
19838 + mm->cached_hole_size = vma->vm_start - addr;
19839 +
19840 + /* try just below the current vma->vm_start */
19841 + addr = skip_heap_stack_gap(vma, len);
19842 + } while (!IS_ERR_VALUE(addr));
19843 +
19844 +bottomup:
19845 + /*
19846 + * A failed mmap() very likely causes application failure,
19847 + * so fall back to the bottom-up function here. This scenario
19848 + * can happen with large stack limits and large mmap()
19849 + * allocations.
19850 + */
19851 +
19852 +#ifdef CONFIG_PAX_SEGMEXEC
19853 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19854 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19855 + else
19856 +#endif
19857 +
19858 + mm->mmap_base = TASK_UNMAPPED_BASE;
19859 +
19860 +#ifdef CONFIG_PAX_RANDMMAP
19861 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19862 + mm->mmap_base += mm->delta_mmap;
19863 +#endif
19864 +
19865 + mm->free_area_cache = mm->mmap_base;
19866 + mm->cached_hole_size = ~0UL;
19867 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19868 + /*
19869 + * Restore the topdown base:
19870 + */
19871 + mm->mmap_base = base;
19872 + mm->free_area_cache = base;
19873 + mm->cached_hole_size = ~0UL;
19874 +
19875 + return addr;
19876 +}
19877
19878 struct sel_arg_struct {
19879 unsigned long n;
19880 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19881 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
19882 case SEMTIMEDOP:
19883 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
19884 - (const struct timespec __user *)fifth);
19885 + (__force const struct timespec __user *)fifth);
19886
19887 case SEMGET:
19888 return sys_semget(first, second, third);
19889 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19890 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
19891 if (ret)
19892 return ret;
19893 - return put_user(raddr, (ulong __user *) third);
19894 + return put_user(raddr, (__force ulong __user *) third);
19895 }
19896 case 1: /* iBCS2 emulator entry point */
19897 if (!segment_eq(get_fs(), get_ds()))
19898 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
19899
19900 return error;
19901 }
19902 -
19903 -
19904 -/*
19905 - * Do a system call from kernel instead of calling sys_execve so we
19906 - * end up with proper pt_regs.
19907 - */
19908 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
19909 -{
19910 - long __res;
19911 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
19912 - : "=a" (__res)
19913 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
19914 - return __res;
19915 -}
19916 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19917 index 8aa2057..b604bc1 100644
19918 --- a/arch/x86/kernel/sys_x86_64.c
19919 +++ b/arch/x86/kernel/sys_x86_64.c
19920 @@ -32,8 +32,8 @@ out:
19921 return error;
19922 }
19923
19924 -static void find_start_end(unsigned long flags, unsigned long *begin,
19925 - unsigned long *end)
19926 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
19927 + unsigned long *begin, unsigned long *end)
19928 {
19929 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19930 unsigned long new_begin;
19931 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19932 *begin = new_begin;
19933 }
19934 } else {
19935 - *begin = TASK_UNMAPPED_BASE;
19936 + *begin = mm->mmap_base;
19937 *end = TASK_SIZE;
19938 }
19939 }
19940 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19941 if (flags & MAP_FIXED)
19942 return addr;
19943
19944 - find_start_end(flags, &begin, &end);
19945 + find_start_end(mm, flags, &begin, &end);
19946
19947 if (len > end)
19948 return -ENOMEM;
19949
19950 +#ifdef CONFIG_PAX_RANDMMAP
19951 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19952 +#endif
19953 +
19954 if (addr) {
19955 addr = PAGE_ALIGN(addr);
19956 vma = find_vma(mm, addr);
19957 - if (end - len >= addr &&
19958 - (!vma || addr + len <= vma->vm_start))
19959 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19960 return addr;
19961 }
19962 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19963 @@ -106,7 +109,7 @@ full_search:
19964 }
19965 return -ENOMEM;
19966 }
19967 - if (!vma || addr + len <= vma->vm_start) {
19968 + if (check_heap_stack_gap(vma, addr, len)) {
19969 /*
19970 * Remember the place where we stopped the search:
19971 */
19972 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19973 {
19974 struct vm_area_struct *vma;
19975 struct mm_struct *mm = current->mm;
19976 - unsigned long addr = addr0;
19977 + unsigned long base = mm->mmap_base, addr = addr0;
19978
19979 /* requested length too big for entire address space */
19980 if (len > TASK_SIZE)
19981 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19982 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19983 goto bottomup;
19984
19985 +#ifdef CONFIG_PAX_RANDMMAP
19986 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19987 +#endif
19988 +
19989 /* requesting a specific address */
19990 if (addr) {
19991 addr = PAGE_ALIGN(addr);
19992 - vma = find_vma(mm, addr);
19993 - if (TASK_SIZE - len >= addr &&
19994 - (!vma || addr + len <= vma->vm_start))
19995 - return addr;
19996 + if (TASK_SIZE - len >= addr) {
19997 + vma = find_vma(mm, addr);
19998 + if (check_heap_stack_gap(vma, addr, len))
19999 + return addr;
20000 + }
20001 }
20002
20003 /* check if free_area_cache is useful for us */
20004 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20005 /* make sure it can fit in the remaining address space */
20006 if (addr > len) {
20007 vma = find_vma(mm, addr-len);
20008 - if (!vma || addr <= vma->vm_start)
20009 + if (check_heap_stack_gap(vma, addr - len, len))
20010 /* remember the address as a hint for next time */
20011 return mm->free_area_cache = addr-len;
20012 }
20013 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20014 * return with success:
20015 */
20016 vma = find_vma(mm, addr);
20017 - if (!vma || addr+len <= vma->vm_start)
20018 + if (check_heap_stack_gap(vma, addr, len))
20019 /* remember the address as a hint for next time */
20020 return mm->free_area_cache = addr;
20021
20022 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20023 mm->cached_hole_size = vma->vm_start - addr;
20024
20025 /* try just below the current vma->vm_start */
20026 - addr = vma->vm_start-len;
20027 - } while (len < vma->vm_start);
20028 + addr = skip_heap_stack_gap(vma, len);
20029 + } while (!IS_ERR_VALUE(addr));
20030
20031 bottomup:
20032 /*
20033 @@ -198,13 +206,21 @@ bottomup:
20034 * can happen with large stack limits and large mmap()
20035 * allocations.
20036 */
20037 + mm->mmap_base = TASK_UNMAPPED_BASE;
20038 +
20039 +#ifdef CONFIG_PAX_RANDMMAP
20040 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20041 + mm->mmap_base += mm->delta_mmap;
20042 +#endif
20043 +
20044 + mm->free_area_cache = mm->mmap_base;
20045 mm->cached_hole_size = ~0UL;
20046 - mm->free_area_cache = TASK_UNMAPPED_BASE;
20047 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
20048 /*
20049 * Restore the topdown base:
20050 */
20051 - mm->free_area_cache = mm->mmap_base;
20052 + mm->mmap_base = base;
20053 + mm->free_area_cache = base;
20054 mm->cached_hole_size = ~0UL;
20055
20056 return addr;
20057 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
20058 index 76d70a4..4c94a44 100644
20059 --- a/arch/x86/kernel/syscall_table_32.S
20060 +++ b/arch/x86/kernel/syscall_table_32.S
20061 @@ -1,3 +1,4 @@
20062 +.section .rodata,"a",@progbits
20063 ENTRY(sys_call_table)
20064 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
20065 .long sys_exit
20066 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
20067 index 46b8277..3349d55 100644
20068 --- a/arch/x86/kernel/tboot.c
20069 +++ b/arch/x86/kernel/tboot.c
20070 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
20071
20072 void tboot_shutdown(u32 shutdown_type)
20073 {
20074 - void (*shutdown)(void);
20075 + void (* __noreturn shutdown)(void);
20076
20077 if (!tboot_enabled())
20078 return;
20079 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
20080
20081 switch_to_tboot_pt();
20082
20083 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
20084 + shutdown = (void *)tboot->shutdown_entry;
20085 shutdown();
20086
20087 /* should not reach here */
20088 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
20089 tboot_shutdown(acpi_shutdown_map[sleep_state]);
20090 }
20091
20092 -static atomic_t ap_wfs_count;
20093 +static atomic_unchecked_t ap_wfs_count;
20094
20095 static int tboot_wait_for_aps(int num_aps)
20096 {
20097 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
20098 {
20099 switch (action) {
20100 case CPU_DYING:
20101 - atomic_inc(&ap_wfs_count);
20102 + atomic_inc_unchecked(&ap_wfs_count);
20103 if (num_online_cpus() == 1)
20104 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
20105 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
20106 return NOTIFY_BAD;
20107 break;
20108 }
20109 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
20110
20111 tboot_create_trampoline();
20112
20113 - atomic_set(&ap_wfs_count, 0);
20114 + atomic_set_unchecked(&ap_wfs_count, 0);
20115 register_hotcpu_notifier(&tboot_cpu_notifier);
20116 return 0;
20117 }
20118 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
20119 index be25734..87fe232 100644
20120 --- a/arch/x86/kernel/time.c
20121 +++ b/arch/x86/kernel/time.c
20122 @@ -26,17 +26,13 @@
20123 int timer_ack;
20124 #endif
20125
20126 -#ifdef CONFIG_X86_64
20127 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
20128 -#endif
20129 -
20130 unsigned long profile_pc(struct pt_regs *regs)
20131 {
20132 unsigned long pc = instruction_pointer(regs);
20133
20134 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
20135 + if (!user_mode(regs) && in_lock_functions(pc)) {
20136 #ifdef CONFIG_FRAME_POINTER
20137 - return *(unsigned long *)(regs->bp + sizeof(long));
20138 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
20139 #else
20140 unsigned long *sp =
20141 (unsigned long *)kernel_stack_pointer(regs);
20142 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
20143 * or above a saved flags. Eflags has bits 22-31 zero,
20144 * kernel addresses don't.
20145 */
20146 +
20147 +#ifdef CONFIG_PAX_KERNEXEC
20148 + return ktla_ktva(sp[0]);
20149 +#else
20150 if (sp[0] >> 22)
20151 return sp[0];
20152 if (sp[1] >> 22)
20153 return sp[1];
20154 #endif
20155 +
20156 +#endif
20157 }
20158 return pc;
20159 }
20160 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
20161 index 6bb7b85..dd853e1 100644
20162 --- a/arch/x86/kernel/tls.c
20163 +++ b/arch/x86/kernel/tls.c
20164 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
20165 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
20166 return -EINVAL;
20167
20168 +#ifdef CONFIG_PAX_SEGMEXEC
20169 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
20170 + return -EINVAL;
20171 +#endif
20172 +
20173 set_tls_desc(p, idx, &info, 1);
20174
20175 return 0;
20176 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
20177 index 8508237..229b664 100644
20178 --- a/arch/x86/kernel/trampoline_32.S
20179 +++ b/arch/x86/kernel/trampoline_32.S
20180 @@ -32,6 +32,12 @@
20181 #include <asm/segment.h>
20182 #include <asm/page_types.h>
20183
20184 +#ifdef CONFIG_PAX_KERNEXEC
20185 +#define ta(X) (X)
20186 +#else
20187 +#define ta(X) ((X) - __PAGE_OFFSET)
20188 +#endif
20189 +
20190 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
20191 __CPUINITRODATA
20192 .code16
20193 @@ -60,7 +66,7 @@ r_base = .
20194 inc %ax # protected mode (PE) bit
20195 lmsw %ax # into protected mode
20196 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
20197 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
20198 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
20199
20200 # These need to be in the same 64K segment as the above;
20201 # hence we don't use the boot_gdt_descr defined in head.S
20202 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
20203 index 3af2dff..ba8aa49 100644
20204 --- a/arch/x86/kernel/trampoline_64.S
20205 +++ b/arch/x86/kernel/trampoline_64.S
20206 @@ -91,7 +91,7 @@ startup_32:
20207 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
20208 movl %eax, %ds
20209
20210 - movl $X86_CR4_PAE, %eax
20211 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
20212 movl %eax, %cr4 # Enable PAE mode
20213
20214 # Setup trampoline 4 level pagetables
20215 @@ -127,7 +127,7 @@ startup_64:
20216 no_longmode:
20217 hlt
20218 jmp no_longmode
20219 -#include "verify_cpu_64.S"
20220 +#include "verify_cpu.S"
20221
20222 # Careful these need to be in the same 64K segment as the above;
20223 tidt:
20224 @@ -138,7 +138,7 @@ tidt:
20225 # so the kernel can live anywhere
20226 .balign 4
20227 tgdt:
20228 - .short tgdt_end - tgdt # gdt limit
20229 + .short tgdt_end - tgdt - 1 # gdt limit
20230 .long tgdt - r_base
20231 .short 0
20232 .quad 0x00cf9b000000ffff # __KERNEL32_CS
20233 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
20234 index 7e37dce..ec3f8e5 100644
20235 --- a/arch/x86/kernel/traps.c
20236 +++ b/arch/x86/kernel/traps.c
20237 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
20238
20239 /* Do we ignore FPU interrupts ? */
20240 char ignore_fpu_irq;
20241 -
20242 -/*
20243 - * The IDT has to be page-aligned to simplify the Pentium
20244 - * F0 0F bug workaround.
20245 - */
20246 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
20247 #endif
20248
20249 DECLARE_BITMAP(used_vectors, NR_VECTORS);
20250 @@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
20251 static inline void
20252 die_if_kernel(const char *str, struct pt_regs *regs, long err)
20253 {
20254 - if (!user_mode_vm(regs))
20255 + if (!user_mode(regs))
20256 die(str, regs, err);
20257 }
20258 #endif
20259
20260 static void __kprobes
20261 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20262 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
20263 long error_code, siginfo_t *info)
20264 {
20265 struct task_struct *tsk = current;
20266
20267 #ifdef CONFIG_X86_32
20268 - if (regs->flags & X86_VM_MASK) {
20269 + if (v8086_mode(regs)) {
20270 /*
20271 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
20272 * On nmi (interrupt 2), do_trap should not be called.
20273 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20274 }
20275 #endif
20276
20277 - if (!user_mode(regs))
20278 + if (!user_mode_novm(regs))
20279 goto kernel_trap;
20280
20281 #ifdef CONFIG_X86_32
20282 @@ -158,7 +152,7 @@ trap_signal:
20283 printk_ratelimit()) {
20284 printk(KERN_INFO
20285 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
20286 - tsk->comm, tsk->pid, str,
20287 + tsk->comm, task_pid_nr(tsk), str,
20288 regs->ip, regs->sp, error_code);
20289 print_vma_addr(" in ", regs->ip);
20290 printk("\n");
20291 @@ -175,8 +169,20 @@ kernel_trap:
20292 if (!fixup_exception(regs)) {
20293 tsk->thread.error_code = error_code;
20294 tsk->thread.trap_no = trapnr;
20295 +
20296 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20297 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
20298 + str = "PAX: suspicious stack segment fault";
20299 +#endif
20300 +
20301 die(str, regs, error_code);
20302 }
20303 +
20304 +#ifdef CONFIG_PAX_REFCOUNT
20305 + if (trapnr == 4)
20306 + pax_report_refcount_overflow(regs);
20307 +#endif
20308 +
20309 return;
20310
20311 #ifdef CONFIG_X86_32
20312 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
20313 conditional_sti(regs);
20314
20315 #ifdef CONFIG_X86_32
20316 - if (regs->flags & X86_VM_MASK)
20317 + if (v8086_mode(regs))
20318 goto gp_in_vm86;
20319 #endif
20320
20321 tsk = current;
20322 - if (!user_mode(regs))
20323 + if (!user_mode_novm(regs))
20324 goto gp_in_kernel;
20325
20326 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20327 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
20328 + struct mm_struct *mm = tsk->mm;
20329 + unsigned long limit;
20330 +
20331 + down_write(&mm->mmap_sem);
20332 + limit = mm->context.user_cs_limit;
20333 + if (limit < TASK_SIZE) {
20334 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
20335 + up_write(&mm->mmap_sem);
20336 + return;
20337 + }
20338 + up_write(&mm->mmap_sem);
20339 + }
20340 +#endif
20341 +
20342 tsk->thread.error_code = error_code;
20343 tsk->thread.trap_no = 13;
20344
20345 @@ -305,6 +327,13 @@ gp_in_kernel:
20346 if (notify_die(DIE_GPF, "general protection fault", regs,
20347 error_code, 13, SIGSEGV) == NOTIFY_STOP)
20348 return;
20349 +
20350 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20351 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
20352 + die("PAX: suspicious general protection fault", regs, error_code);
20353 + else
20354 +#endif
20355 +
20356 die("general protection fault", regs, error_code);
20357 }
20358
20359 @@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
20360 dotraplinkage notrace __kprobes void
20361 do_nmi(struct pt_regs *regs, long error_code)
20362 {
20363 +
20364 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20365 + if (!user_mode(regs)) {
20366 + unsigned long cs = regs->cs & 0xFFFF;
20367 + unsigned long ip = ktva_ktla(regs->ip);
20368 +
20369 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
20370 + regs->ip = ip;
20371 + }
20372 +#endif
20373 +
20374 nmi_enter();
20375
20376 inc_irq_stat(__nmi_count);
20377 @@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20378 }
20379
20380 #ifdef CONFIG_X86_32
20381 - if (regs->flags & X86_VM_MASK)
20382 + if (v8086_mode(regs))
20383 goto debug_vm86;
20384 #endif
20385
20386 @@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20387 * kernel space (but re-enable TF when returning to user mode).
20388 */
20389 if (condition & DR_STEP) {
20390 - if (!user_mode(regs))
20391 + if (!user_mode_novm(regs))
20392 goto clear_TF_reenable;
20393 }
20394
20395 @@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
20396 * Handle strange cache flush from user space exception
20397 * in all other cases. This is undocumented behaviour.
20398 */
20399 - if (regs->flags & X86_VM_MASK) {
20400 + if (v8086_mode(regs)) {
20401 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
20402 return;
20403 }
20404 @@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
20405 void __math_state_restore(void)
20406 {
20407 struct thread_info *thread = current_thread_info();
20408 - struct task_struct *tsk = thread->task;
20409 + struct task_struct *tsk = current;
20410
20411 /*
20412 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
20413 @@ -825,8 +865,7 @@ void __math_state_restore(void)
20414 */
20415 asmlinkage void math_state_restore(void)
20416 {
20417 - struct thread_info *thread = current_thread_info();
20418 - struct task_struct *tsk = thread->task;
20419 + struct task_struct *tsk = current;
20420
20421 if (!tsk_used_math(tsk)) {
20422 local_irq_enable();
20423 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20424 new file mode 100644
20425 index 0000000..50c5edd
20426 --- /dev/null
20427 +++ b/arch/x86/kernel/verify_cpu.S
20428 @@ -0,0 +1,140 @@
20429 +/*
20430 + *
20431 + * verify_cpu.S - Code for cpu long mode and SSE verification. This
20432 + * code has been borrowed from boot/setup.S and was introduced by
20433 + * Andi Kleen.
20434 + *
20435 + * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20436 + * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20437 + * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20438 + * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
20439 + *
20440 + * This source code is licensed under the GNU General Public License,
20441 + * Version 2. See the file COPYING for more details.
20442 + *
20443 + * This is a common code for verification whether CPU supports
20444 + * long mode and SSE or not. It is not called directly instead this
20445 + * file is included at various places and compiled in that context.
20446 + * This file is expected to run in 32bit code. Currently:
20447 + *
20448 + * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20449 + * arch/x86/kernel/trampoline_64.S: secondary processor verification
20450 + * arch/x86/kernel/head_32.S: processor startup
20451 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20452 + *
20453 + * verify_cpu, returns the status of longmode and SSE in register %eax.
20454 + * 0: Success 1: Failure
20455 + *
20456 + * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
20457 + *
20458 + * The caller needs to check for the error code and take the action
20459 + * appropriately. Either display a message or halt.
20460 + */
20461 +
20462 +#include <asm/cpufeature.h>
20463 +#include <asm/msr-index.h>
20464 +
20465 +verify_cpu:
20466 + pushfl # Save caller passed flags
20467 + pushl $0 # Kill any dangerous flags
20468 + popfl
20469 +
20470 + pushfl # standard way to check for cpuid
20471 + popl %eax
20472 + movl %eax,%ebx
20473 + xorl $0x200000,%eax
20474 + pushl %eax
20475 + popfl
20476 + pushfl
20477 + popl %eax
20478 + cmpl %eax,%ebx
20479 + jz verify_cpu_no_longmode # cpu has no cpuid
20480 +
20481 + movl $0x0,%eax # See if cpuid 1 is implemented
20482 + cpuid
20483 + cmpl $0x1,%eax
20484 + jb verify_cpu_no_longmode # no cpuid 1
20485 +
20486 + xor %di,%di
20487 + cmpl $0x68747541,%ebx # AuthenticAMD
20488 + jnz verify_cpu_noamd
20489 + cmpl $0x69746e65,%edx
20490 + jnz verify_cpu_noamd
20491 + cmpl $0x444d4163,%ecx
20492 + jnz verify_cpu_noamd
20493 + mov $1,%di # cpu is from AMD
20494 + jmp verify_cpu_check
20495 +
20496 +verify_cpu_noamd:
20497 + cmpl $0x756e6547,%ebx # GenuineIntel?
20498 + jnz verify_cpu_check
20499 + cmpl $0x49656e69,%edx
20500 + jnz verify_cpu_check
20501 + cmpl $0x6c65746e,%ecx
20502 + jnz verify_cpu_check
20503 +
20504 + # only call IA32_MISC_ENABLE when:
20505 + # family > 6 || (family == 6 && model >= 0xd)
20506 + movl $0x1, %eax # check CPU family and model
20507 + cpuid
20508 + movl %eax, %ecx
20509 +
20510 + andl $0x0ff00f00, %eax # mask family and extended family
20511 + shrl $8, %eax
20512 + cmpl $6, %eax
20513 + ja verify_cpu_clear_xd # family > 6, ok
20514 + jb verify_cpu_check # family < 6, skip
20515 +
20516 + andl $0x000f00f0, %ecx # mask model and extended model
20517 + shrl $4, %ecx
20518 + cmpl $0xd, %ecx
20519 + jb verify_cpu_check # family == 6, model < 0xd, skip
20520 +
20521 +verify_cpu_clear_xd:
20522 + movl $MSR_IA32_MISC_ENABLE, %ecx
20523 + rdmsr
20524 + btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
20525 + jnc verify_cpu_check # only write MSR if bit was changed
20526 + wrmsr
20527 +
20528 +verify_cpu_check:
20529 + movl $0x1,%eax # Does the cpu have what it takes
20530 + cpuid
20531 + andl $REQUIRED_MASK0,%edx
20532 + xorl $REQUIRED_MASK0,%edx
20533 + jnz verify_cpu_no_longmode
20534 +
20535 + movl $0x80000000,%eax # See if extended cpuid is implemented
20536 + cpuid
20537 + cmpl $0x80000001,%eax
20538 + jb verify_cpu_no_longmode # no extended cpuid
20539 +
20540 + movl $0x80000001,%eax # Does the cpu have what it takes
20541 + cpuid
20542 + andl $REQUIRED_MASK1,%edx
20543 + xorl $REQUIRED_MASK1,%edx
20544 + jnz verify_cpu_no_longmode
20545 +
20546 +verify_cpu_sse_test:
20547 + movl $1,%eax
20548 + cpuid
20549 + andl $SSE_MASK,%edx
20550 + cmpl $SSE_MASK,%edx
20551 + je verify_cpu_sse_ok
20552 + test %di,%di
20553 + jz verify_cpu_no_longmode # only try to force SSE on AMD
20554 + movl $MSR_K7_HWCR,%ecx
20555 + rdmsr
20556 + btr $15,%eax # enable SSE
20557 + wrmsr
20558 + xor %di,%di # don't loop
20559 + jmp verify_cpu_sse_test # try again
20560 +
20561 +verify_cpu_no_longmode:
20562 + popfl # Restore caller passed flags
20563 + movl $1,%eax
20564 + ret
20565 +verify_cpu_sse_ok:
20566 + popfl # Restore caller passed flags
20567 + xorl %eax, %eax
20568 + ret
20569 diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
20570 deleted file mode 100644
20571 index 45b6f8a..0000000
20572 --- a/arch/x86/kernel/verify_cpu_64.S
20573 +++ /dev/null
20574 @@ -1,105 +0,0 @@
20575 -/*
20576 - *
20577 - * verify_cpu.S - Code for cpu long mode and SSE verification. This
20578 - * code has been borrowed from boot/setup.S and was introduced by
20579 - * Andi Kleen.
20580 - *
20581 - * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20582 - * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20583 - * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20584 - *
20585 - * This source code is licensed under the GNU General Public License,
20586 - * Version 2. See the file COPYING for more details.
20587 - *
20588 - * This is a common code for verification whether CPU supports
20589 - * long mode and SSE or not. It is not called directly instead this
20590 - * file is included at various places and compiled in that context.
20591 - * Following are the current usage.
20592 - *
20593 - * This file is included by both 16bit and 32bit code.
20594 - *
20595 - * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
20596 - * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
20597 - * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
20598 - * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
20599 - *
20600 - * verify_cpu, returns the status of cpu check in register %eax.
20601 - * 0: Success 1: Failure
20602 - *
20603 - * The caller needs to check for the error code and take the action
20604 - * appropriately. Either display a message or halt.
20605 - */
20606 -
20607 -#include <asm/cpufeature.h>
20608 -
20609 -verify_cpu:
20610 - pushfl # Save caller passed flags
20611 - pushl $0 # Kill any dangerous flags
20612 - popfl
20613 -
20614 - pushfl # standard way to check for cpuid
20615 - popl %eax
20616 - movl %eax,%ebx
20617 - xorl $0x200000,%eax
20618 - pushl %eax
20619 - popfl
20620 - pushfl
20621 - popl %eax
20622 - cmpl %eax,%ebx
20623 - jz verify_cpu_no_longmode # cpu has no cpuid
20624 -
20625 - movl $0x0,%eax # See if cpuid 1 is implemented
20626 - cpuid
20627 - cmpl $0x1,%eax
20628 - jb verify_cpu_no_longmode # no cpuid 1
20629 -
20630 - xor %di,%di
20631 - cmpl $0x68747541,%ebx # AuthenticAMD
20632 - jnz verify_cpu_noamd
20633 - cmpl $0x69746e65,%edx
20634 - jnz verify_cpu_noamd
20635 - cmpl $0x444d4163,%ecx
20636 - jnz verify_cpu_noamd
20637 - mov $1,%di # cpu is from AMD
20638 -
20639 -verify_cpu_noamd:
20640 - movl $0x1,%eax # Does the cpu have what it takes
20641 - cpuid
20642 - andl $REQUIRED_MASK0,%edx
20643 - xorl $REQUIRED_MASK0,%edx
20644 - jnz verify_cpu_no_longmode
20645 -
20646 - movl $0x80000000,%eax # See if extended cpuid is implemented
20647 - cpuid
20648 - cmpl $0x80000001,%eax
20649 - jb verify_cpu_no_longmode # no extended cpuid
20650 -
20651 - movl $0x80000001,%eax # Does the cpu have what it takes
20652 - cpuid
20653 - andl $REQUIRED_MASK1,%edx
20654 - xorl $REQUIRED_MASK1,%edx
20655 - jnz verify_cpu_no_longmode
20656 -
20657 -verify_cpu_sse_test:
20658 - movl $1,%eax
20659 - cpuid
20660 - andl $SSE_MASK,%edx
20661 - cmpl $SSE_MASK,%edx
20662 - je verify_cpu_sse_ok
20663 - test %di,%di
20664 - jz verify_cpu_no_longmode # only try to force SSE on AMD
20665 - movl $0xc0010015,%ecx # HWCR
20666 - rdmsr
20667 - btr $15,%eax # enable SSE
20668 - wrmsr
20669 - xor %di,%di # don't loop
20670 - jmp verify_cpu_sse_test # try again
20671 -
20672 -verify_cpu_no_longmode:
20673 - popfl # Restore caller passed flags
20674 - movl $1,%eax
20675 - ret
20676 -verify_cpu_sse_ok:
20677 - popfl # Restore caller passed flags
20678 - xorl %eax, %eax
20679 - ret
20680 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
20681 index 9c4e625..c992817 100644
20682 --- a/arch/x86/kernel/vm86_32.c
20683 +++ b/arch/x86/kernel/vm86_32.c
20684 @@ -41,6 +41,7 @@
20685 #include <linux/ptrace.h>
20686 #include <linux/audit.h>
20687 #include <linux/stddef.h>
20688 +#include <linux/grsecurity.h>
20689
20690 #include <asm/uaccess.h>
20691 #include <asm/io.h>
20692 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20693 do_exit(SIGSEGV);
20694 }
20695
20696 - tss = &per_cpu(init_tss, get_cpu());
20697 + tss = init_tss + get_cpu();
20698 current->thread.sp0 = current->thread.saved_sp0;
20699 current->thread.sysenter_cs = __KERNEL_CS;
20700 load_sp0(tss, &current->thread);
20701 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
20702 struct task_struct *tsk;
20703 int tmp, ret = -EPERM;
20704
20705 +#ifdef CONFIG_GRKERNSEC_VM86
20706 + if (!capable(CAP_SYS_RAWIO)) {
20707 + gr_handle_vm86();
20708 + goto out;
20709 + }
20710 +#endif
20711 +
20712 tsk = current;
20713 if (tsk->thread.saved_sp0)
20714 goto out;
20715 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
20716 int tmp, ret;
20717 struct vm86plus_struct __user *v86;
20718
20719 +#ifdef CONFIG_GRKERNSEC_VM86
20720 + if (!capable(CAP_SYS_RAWIO)) {
20721 + gr_handle_vm86();
20722 + ret = -EPERM;
20723 + goto out;
20724 + }
20725 +#endif
20726 +
20727 tsk = current;
20728 switch (regs->bx) {
20729 case VM86_REQUEST_IRQ:
20730 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20731 tsk->thread.saved_fs = info->regs32->fs;
20732 tsk->thread.saved_gs = get_user_gs(info->regs32);
20733
20734 - tss = &per_cpu(init_tss, get_cpu());
20735 + tss = init_tss + get_cpu();
20736 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20737 if (cpu_has_sep)
20738 tsk->thread.sysenter_cs = 0;
20739 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20740 goto cannot_handle;
20741 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20742 goto cannot_handle;
20743 - intr_ptr = (unsigned long __user *) (i << 2);
20744 + intr_ptr = (__force unsigned long __user *) (i << 2);
20745 if (get_user(segoffs, intr_ptr))
20746 goto cannot_handle;
20747 if ((segoffs >> 16) == BIOSSEG)
20748 diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
20749 index d430e4c..831f817 100644
20750 --- a/arch/x86/kernel/vmi_32.c
20751 +++ b/arch/x86/kernel/vmi_32.c
20752 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
20753 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
20754
20755 #define call_vrom_func(rom,func) \
20756 - (((VROMFUNC *)(rom->func))())
20757 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
20758
20759 #define call_vrom_long_func(rom,func,arg) \
20760 - (((VROMLONGFUNC *)(rom->func)) (arg))
20761 +({\
20762 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
20763 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
20764 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
20765 + __reloc;\
20766 +})
20767
20768 -static struct vrom_header *vmi_rom;
20769 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
20770 static int disable_pge;
20771 static int disable_pse;
20772 static int disable_sep;
20773 @@ -76,10 +81,10 @@ static struct {
20774 void (*set_initial_ap_state)(int, int);
20775 void (*halt)(void);
20776 void (*set_lazy_mode)(int mode);
20777 -} vmi_ops;
20778 +} __no_const vmi_ops __read_only;
20779
20780 /* Cached VMI operations */
20781 -struct vmi_timer_ops vmi_timer_ops;
20782 +struct vmi_timer_ops vmi_timer_ops __read_only;
20783
20784 /*
20785 * VMI patching routines.
20786 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
20787 static inline void patch_offset(void *insnbuf,
20788 unsigned long ip, unsigned long dest)
20789 {
20790 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
20791 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
20792 }
20793
20794 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20795 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20796 {
20797 u64 reloc;
20798 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
20799 +
20800 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
20801 switch(rel->type) {
20802 case VMI_RELOCATION_CALL_REL:
20803 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
20804
20805 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
20806 {
20807 - const pte_t pte = { .pte = 0 };
20808 + const pte_t pte = __pte(0ULL);
20809 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
20810 }
20811
20812 static void vmi_pmd_clear(pmd_t *pmd)
20813 {
20814 - const pte_t pte = { .pte = 0 };
20815 + const pte_t pte = __pte(0ULL);
20816 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
20817 }
20818 #endif
20819 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
20820 ap.ss = __KERNEL_DS;
20821 ap.esp = (unsigned long) start_esp;
20822
20823 - ap.ds = __USER_DS;
20824 - ap.es = __USER_DS;
20825 + ap.ds = __KERNEL_DS;
20826 + ap.es = __KERNEL_DS;
20827 ap.fs = __KERNEL_PERCPU;
20828 - ap.gs = __KERNEL_STACK_CANARY;
20829 + savesegment(gs, ap.gs);
20830
20831 ap.eflags = 0;
20832
20833 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
20834 paravirt_leave_lazy_mmu();
20835 }
20836
20837 +#ifdef CONFIG_PAX_KERNEXEC
20838 +static unsigned long vmi_pax_open_kernel(void)
20839 +{
20840 + return 0;
20841 +}
20842 +
20843 +static unsigned long vmi_pax_close_kernel(void)
20844 +{
20845 + return 0;
20846 +}
20847 +#endif
20848 +
20849 static inline int __init check_vmi_rom(struct vrom_header *rom)
20850 {
20851 struct pci_header *pci;
20852 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
20853 return 0;
20854 if (rom->vrom_signature != VMI_SIGNATURE)
20855 return 0;
20856 + if (rom->rom_length * 512 > sizeof(*rom)) {
20857 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
20858 + return 0;
20859 + }
20860 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
20861 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
20862 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
20863 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
20864 struct vrom_header *romstart;
20865 romstart = (struct vrom_header *)isa_bus_to_virt(base);
20866 if (check_vmi_rom(romstart)) {
20867 - vmi_rom = romstart;
20868 + vmi_rom = *romstart;
20869 return 1;
20870 }
20871 }
20872 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
20873
20874 para_fill(pv_irq_ops.safe_halt, Halt);
20875
20876 +#ifdef CONFIG_PAX_KERNEXEC
20877 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
20878 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
20879 +#endif
20880 +
20881 /*
20882 * Alternative instruction rewriting doesn't happen soon enough
20883 * to convert VMI_IRET to a call instead of a jump; so we have
20884 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
20885
20886 void __init vmi_init(void)
20887 {
20888 - if (!vmi_rom)
20889 + if (!vmi_rom.rom_signature)
20890 probe_vmi_rom();
20891 else
20892 - check_vmi_rom(vmi_rom);
20893 + check_vmi_rom(&vmi_rom);
20894
20895 /* In case probing for or validating the ROM failed, basil */
20896 - if (!vmi_rom)
20897 + if (!vmi_rom.rom_signature)
20898 return;
20899
20900 - reserve_top_address(-vmi_rom->virtual_top);
20901 + reserve_top_address(-vmi_rom.virtual_top);
20902
20903 #ifdef CONFIG_X86_IO_APIC
20904 /* This is virtual hardware; timer routing is wired correctly */
20905 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
20906 {
20907 unsigned long flags;
20908
20909 - if (!vmi_rom)
20910 + if (!vmi_rom.rom_signature)
20911 return;
20912
20913 local_irq_save(flags);
20914 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20915 index 3c68fe2..12c8280 100644
20916 --- a/arch/x86/kernel/vmlinux.lds.S
20917 +++ b/arch/x86/kernel/vmlinux.lds.S
20918 @@ -26,6 +26,13 @@
20919 #include <asm/page_types.h>
20920 #include <asm/cache.h>
20921 #include <asm/boot.h>
20922 +#include <asm/segment.h>
20923 +
20924 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20925 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20926 +#else
20927 +#define __KERNEL_TEXT_OFFSET 0
20928 +#endif
20929
20930 #undef i386 /* in case the preprocessor is a 32bit one */
20931
20932 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
20933 #ifdef CONFIG_X86_32
20934 OUTPUT_ARCH(i386)
20935 ENTRY(phys_startup_32)
20936 -jiffies = jiffies_64;
20937 #else
20938 OUTPUT_ARCH(i386:x86-64)
20939 ENTRY(phys_startup_64)
20940 -jiffies_64 = jiffies;
20941 #endif
20942
20943 PHDRS {
20944 text PT_LOAD FLAGS(5); /* R_E */
20945 - data PT_LOAD FLAGS(7); /* RWE */
20946 +#ifdef CONFIG_X86_32
20947 + module PT_LOAD FLAGS(5); /* R_E */
20948 +#endif
20949 +#ifdef CONFIG_XEN
20950 + rodata PT_LOAD FLAGS(5); /* R_E */
20951 +#else
20952 + rodata PT_LOAD FLAGS(4); /* R__ */
20953 +#endif
20954 + data PT_LOAD FLAGS(6); /* RW_ */
20955 #ifdef CONFIG_X86_64
20956 user PT_LOAD FLAGS(5); /* R_E */
20957 +#endif
20958 + init.begin PT_LOAD FLAGS(6); /* RW_ */
20959 #ifdef CONFIG_SMP
20960 percpu PT_LOAD FLAGS(6); /* RW_ */
20961 #endif
20962 + text.init PT_LOAD FLAGS(5); /* R_E */
20963 + text.exit PT_LOAD FLAGS(5); /* R_E */
20964 init PT_LOAD FLAGS(7); /* RWE */
20965 -#endif
20966 note PT_NOTE FLAGS(0); /* ___ */
20967 }
20968
20969 SECTIONS
20970 {
20971 #ifdef CONFIG_X86_32
20972 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20973 - phys_startup_32 = startup_32 - LOAD_OFFSET;
20974 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20975 #else
20976 - . = __START_KERNEL;
20977 - phys_startup_64 = startup_64 - LOAD_OFFSET;
20978 + . = __START_KERNEL;
20979 #endif
20980
20981 /* Text and read-only data */
20982 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
20983 - _text = .;
20984 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20985 /* bootstrapping code */
20986 +#ifdef CONFIG_X86_32
20987 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20988 +#else
20989 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20990 +#endif
20991 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20992 + _text = .;
20993 HEAD_TEXT
20994 #ifdef CONFIG_X86_32
20995 . = ALIGN(PAGE_SIZE);
20996 @@ -82,28 +102,71 @@ SECTIONS
20997 IRQENTRY_TEXT
20998 *(.fixup)
20999 *(.gnu.warning)
21000 - /* End of text section */
21001 - _etext = .;
21002 } :text = 0x9090
21003
21004 - NOTES :text :note
21005 + . += __KERNEL_TEXT_OFFSET;
21006
21007 - EXCEPTION_TABLE(16) :text = 0x9090
21008 +#ifdef CONFIG_X86_32
21009 + . = ALIGN(PAGE_SIZE);
21010 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
21011 + *(.vmi.rom)
21012 + } :module
21013 +
21014 + . = ALIGN(PAGE_SIZE);
21015 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
21016 +
21017 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
21018 + MODULES_EXEC_VADDR = .;
21019 + BYTE(0)
21020 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
21021 + . = ALIGN(HPAGE_SIZE);
21022 + MODULES_EXEC_END = . - 1;
21023 +#endif
21024 +
21025 + } :module
21026 +#endif
21027 +
21028 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
21029 + /* End of text section */
21030 + _etext = . - __KERNEL_TEXT_OFFSET;
21031 + }
21032 +
21033 +#ifdef CONFIG_X86_32
21034 + . = ALIGN(PAGE_SIZE);
21035 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
21036 + *(.idt)
21037 + . = ALIGN(PAGE_SIZE);
21038 + *(.empty_zero_page)
21039 + *(.swapper_pg_fixmap)
21040 + *(.swapper_pg_pmd)
21041 + *(.swapper_pg_dir)
21042 + *(.trampoline_pg_dir)
21043 + } :rodata
21044 +#endif
21045 +
21046 + . = ALIGN(PAGE_SIZE);
21047 + NOTES :rodata :note
21048 +
21049 + EXCEPTION_TABLE(16) :rodata
21050
21051 RO_DATA(PAGE_SIZE)
21052
21053 /* Data */
21054 .data : AT(ADDR(.data) - LOAD_OFFSET) {
21055 +
21056 +#ifdef CONFIG_PAX_KERNEXEC
21057 + . = ALIGN(HPAGE_SIZE);
21058 +#else
21059 + . = ALIGN(PAGE_SIZE);
21060 +#endif
21061 +
21062 /* Start of data section */
21063 _sdata = .;
21064
21065 /* init_task */
21066 INIT_TASK_DATA(THREAD_SIZE)
21067
21068 -#ifdef CONFIG_X86_32
21069 - /* 32 bit has nosave before _edata */
21070 NOSAVE_DATA
21071 -#endif
21072
21073 PAGE_ALIGNED_DATA(PAGE_SIZE)
21074
21075 @@ -112,6 +175,8 @@ SECTIONS
21076 DATA_DATA
21077 CONSTRUCTORS
21078
21079 + jiffies = jiffies_64;
21080 +
21081 /* rarely changed data like cpu maps */
21082 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
21083
21084 @@ -166,12 +231,6 @@ SECTIONS
21085 }
21086 vgetcpu_mode = VVIRT(.vgetcpu_mode);
21087
21088 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
21089 - .jiffies : AT(VLOAD(.jiffies)) {
21090 - *(.jiffies)
21091 - }
21092 - jiffies = VVIRT(.jiffies);
21093 -
21094 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
21095 *(.vsyscall_3)
21096 }
21097 @@ -187,12 +246,19 @@ SECTIONS
21098 #endif /* CONFIG_X86_64 */
21099
21100 /* Init code and data - will be freed after init */
21101 - . = ALIGN(PAGE_SIZE);
21102 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
21103 + BYTE(0)
21104 +
21105 +#ifdef CONFIG_PAX_KERNEXEC
21106 + . = ALIGN(HPAGE_SIZE);
21107 +#else
21108 + . = ALIGN(PAGE_SIZE);
21109 +#endif
21110 +
21111 __init_begin = .; /* paired with __init_end */
21112 - }
21113 + } :init.begin
21114
21115 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
21116 +#ifdef CONFIG_SMP
21117 /*
21118 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
21119 * output PHDR, so the next output section - .init.text - should
21120 @@ -201,12 +267,27 @@ SECTIONS
21121 PERCPU_VADDR(0, :percpu)
21122 #endif
21123
21124 - INIT_TEXT_SECTION(PAGE_SIZE)
21125 -#ifdef CONFIG_X86_64
21126 - :init
21127 -#endif
21128 + . = ALIGN(PAGE_SIZE);
21129 + init_begin = .;
21130 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
21131 + VMLINUX_SYMBOL(_sinittext) = .;
21132 + INIT_TEXT
21133 + VMLINUX_SYMBOL(_einittext) = .;
21134 + . = ALIGN(PAGE_SIZE);
21135 + } :text.init
21136
21137 - INIT_DATA_SECTION(16)
21138 + /*
21139 + * .exit.text is discard at runtime, not link time, to deal with
21140 + * references from .altinstructions and .eh_frame
21141 + */
21142 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
21143 + EXIT_TEXT
21144 + . = ALIGN(16);
21145 + } :text.exit
21146 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
21147 +
21148 + . = ALIGN(PAGE_SIZE);
21149 + INIT_DATA_SECTION(16) :init
21150
21151 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
21152 __x86_cpu_dev_start = .;
21153 @@ -232,19 +313,11 @@ SECTIONS
21154 *(.altinstr_replacement)
21155 }
21156
21157 - /*
21158 - * .exit.text is discard at runtime, not link time, to deal with
21159 - * references from .altinstructions and .eh_frame
21160 - */
21161 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
21162 - EXIT_TEXT
21163 - }
21164 -
21165 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
21166 EXIT_DATA
21167 }
21168
21169 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
21170 +#ifndef CONFIG_SMP
21171 PERCPU(PAGE_SIZE)
21172 #endif
21173
21174 @@ -267,12 +340,6 @@ SECTIONS
21175 . = ALIGN(PAGE_SIZE);
21176 }
21177
21178 -#ifdef CONFIG_X86_64
21179 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
21180 - NOSAVE_DATA
21181 - }
21182 -#endif
21183 -
21184 /* BSS */
21185 . = ALIGN(PAGE_SIZE);
21186 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
21187 @@ -288,6 +355,7 @@ SECTIONS
21188 __brk_base = .;
21189 . += 64 * 1024; /* 64k alignment slop space */
21190 *(.brk_reservation) /* areas brk users have reserved */
21191 + . = ALIGN(HPAGE_SIZE);
21192 __brk_limit = .;
21193 }
21194
21195 @@ -316,13 +384,12 @@ SECTIONS
21196 * for the boot processor.
21197 */
21198 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
21199 -INIT_PER_CPU(gdt_page);
21200 INIT_PER_CPU(irq_stack_union);
21201
21202 /*
21203 * Build-time check on the image size:
21204 */
21205 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
21206 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
21207 "kernel image bigger than KERNEL_IMAGE_SIZE");
21208
21209 #ifdef CONFIG_SMP
21210 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
21211 index 62f39d7..3bc46a1 100644
21212 --- a/arch/x86/kernel/vsyscall_64.c
21213 +++ b/arch/x86/kernel/vsyscall_64.c
21214 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
21215
21216 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
21217 /* copy vsyscall data */
21218 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
21219 vsyscall_gtod_data.clock.vread = clock->vread;
21220 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
21221 vsyscall_gtod_data.clock.mask = clock->mask;
21222 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
21223 We do this here because otherwise user space would do it on
21224 its own in a likely inferior way (no access to jiffies).
21225 If you don't like it pass NULL. */
21226 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
21227 + if (tcache && tcache->blob[0] == (j = jiffies)) {
21228 p = tcache->blob[1];
21229 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
21230 /* Load per CPU data from RDTSCP */
21231 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
21232 index 3909e3b..5433a97 100644
21233 --- a/arch/x86/kernel/x8664_ksyms_64.c
21234 +++ b/arch/x86/kernel/x8664_ksyms_64.c
21235 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
21236
21237 EXPORT_SYMBOL(copy_user_generic);
21238 EXPORT_SYMBOL(__copy_user_nocache);
21239 -EXPORT_SYMBOL(copy_from_user);
21240 -EXPORT_SYMBOL(copy_to_user);
21241 EXPORT_SYMBOL(__copy_from_user_inatomic);
21242
21243 EXPORT_SYMBOL(copy_page);
21244 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
21245 index c5ee17e..d63218f 100644
21246 --- a/arch/x86/kernel/xsave.c
21247 +++ b/arch/x86/kernel/xsave.c
21248 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
21249 fx_sw_user->xstate_size > fx_sw_user->extended_size)
21250 return -1;
21251
21252 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
21253 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
21254 fx_sw_user->extended_size -
21255 FP_XSTATE_MAGIC2_SIZE));
21256 /*
21257 @@ -196,7 +196,7 @@ fx_only:
21258 * the other extended state.
21259 */
21260 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
21261 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
21262 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
21263 }
21264
21265 /*
21266 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
21267 if (task_thread_info(tsk)->status & TS_XSAVE)
21268 err = restore_user_xstate(buf);
21269 else
21270 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
21271 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
21272 buf);
21273 if (unlikely(err)) {
21274 /*
21275 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
21276 index 1350e43..a94b011 100644
21277 --- a/arch/x86/kvm/emulate.c
21278 +++ b/arch/x86/kvm/emulate.c
21279 @@ -81,8 +81,8 @@
21280 #define Src2CL (1<<29)
21281 #define Src2ImmByte (2<<29)
21282 #define Src2One (3<<29)
21283 -#define Src2Imm16 (4<<29)
21284 -#define Src2Mask (7<<29)
21285 +#define Src2Imm16 (4U<<29)
21286 +#define Src2Mask (7U<<29)
21287
21288 enum {
21289 Group1_80, Group1_81, Group1_82, Group1_83,
21290 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
21291
21292 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
21293 do { \
21294 + unsigned long _tmp; \
21295 __asm__ __volatile__ ( \
21296 _PRE_EFLAGS("0", "4", "2") \
21297 _op _suffix " %"_x"3,%1; " \
21298 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
21299 /* Raw emulation: instruction has two explicit operands. */
21300 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
21301 do { \
21302 - unsigned long _tmp; \
21303 - \
21304 switch ((_dst).bytes) { \
21305 case 2: \
21306 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
21307 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
21308
21309 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
21310 do { \
21311 - unsigned long _tmp; \
21312 switch ((_dst).bytes) { \
21313 case 1: \
21314 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
21315 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21316 index 8dfeaaa..4daa395 100644
21317 --- a/arch/x86/kvm/lapic.c
21318 +++ b/arch/x86/kvm/lapic.c
21319 @@ -52,7 +52,7 @@
21320 #define APIC_BUS_CYCLE_NS 1
21321
21322 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21323 -#define apic_debug(fmt, arg...)
21324 +#define apic_debug(fmt, arg...) do {} while (0)
21325
21326 #define APIC_LVT_NUM 6
21327 /* 14 is the version for Xeon and Pentium 8.4.8*/
21328 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21329 index 3bc2707..dd157e2 100644
21330 --- a/arch/x86/kvm/paging_tmpl.h
21331 +++ b/arch/x86/kvm/paging_tmpl.h
21332 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21333 int level = PT_PAGE_TABLE_LEVEL;
21334 unsigned long mmu_seq;
21335
21336 + pax_track_stack();
21337 +
21338 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
21339 kvm_mmu_audit(vcpu, "pre page fault");
21340
21341 @@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21342 kvm_mmu_free_some_pages(vcpu);
21343 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
21344 level, &write_pt, pfn);
21345 + (void)sptep;
21346 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
21347 sptep, *sptep, write_pt);
21348
21349 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21350 index 7c6e63e..c5d92c1 100644
21351 --- a/arch/x86/kvm/svm.c
21352 +++ b/arch/x86/kvm/svm.c
21353 @@ -2486,7 +2486,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21354 int cpu = raw_smp_processor_id();
21355
21356 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
21357 +
21358 + pax_open_kernel();
21359 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
21360 + pax_close_kernel();
21361 +
21362 load_TR_desc();
21363 }
21364
21365 @@ -2947,7 +2951,7 @@ static bool svm_gb_page_enable(void)
21366 return true;
21367 }
21368
21369 -static struct kvm_x86_ops svm_x86_ops = {
21370 +static const struct kvm_x86_ops svm_x86_ops = {
21371 .cpu_has_kvm_support = has_svm,
21372 .disabled_by_bios = is_disabled,
21373 .hardware_setup = svm_hardware_setup,
21374 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21375 index e6d925f..e7a4af8 100644
21376 --- a/arch/x86/kvm/vmx.c
21377 +++ b/arch/x86/kvm/vmx.c
21378 @@ -570,7 +570,11 @@ static void reload_tss(void)
21379
21380 kvm_get_gdt(&gdt);
21381 descs = (void *)gdt.base;
21382 +
21383 + pax_open_kernel();
21384 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
21385 + pax_close_kernel();
21386 +
21387 load_TR_desc();
21388 }
21389
21390 @@ -1410,8 +1414,11 @@ static __init int hardware_setup(void)
21391 if (!cpu_has_vmx_flexpriority())
21392 flexpriority_enabled = 0;
21393
21394 - if (!cpu_has_vmx_tpr_shadow())
21395 - kvm_x86_ops->update_cr8_intercept = NULL;
21396 + if (!cpu_has_vmx_tpr_shadow()) {
21397 + pax_open_kernel();
21398 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
21399 + pax_close_kernel();
21400 + }
21401
21402 if (enable_ept && !cpu_has_vmx_ept_2m_page())
21403 kvm_disable_largepages();
21404 @@ -2362,7 +2369,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
21405 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
21406
21407 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
21408 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
21409 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
21410 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
21411 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
21412 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
21413 @@ -3718,6 +3725,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21414 "jmp .Lkvm_vmx_return \n\t"
21415 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
21416 ".Lkvm_vmx_return: "
21417 +
21418 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21419 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
21420 + ".Lkvm_vmx_return2: "
21421 +#endif
21422 +
21423 /* Save guest registers, load host registers, keep flags */
21424 "xchg %0, (%%"R"sp) \n\t"
21425 "mov %%"R"ax, %c[rax](%0) \n\t"
21426 @@ -3764,8 +3777,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21427 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
21428 #endif
21429 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
21430 +
21431 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21432 + ,[cs]"i"(__KERNEL_CS)
21433 +#endif
21434 +
21435 : "cc", "memory"
21436 - , R"bx", R"di", R"si"
21437 + , R"ax", R"bx", R"di", R"si"
21438 #ifdef CONFIG_X86_64
21439 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
21440 #endif
21441 @@ -3782,7 +3800,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21442 if (vmx->rmode.irq.pending)
21443 fixup_rmode_irq(vmx);
21444
21445 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
21446 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
21447 +
21448 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21449 + loadsegment(fs, __KERNEL_PERCPU);
21450 +#endif
21451 +
21452 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21453 + __set_fs(current_thread_info()->addr_limit);
21454 +#endif
21455 +
21456 vmx->launched = 1;
21457
21458 vmx_complete_interrupts(vmx);
21459 @@ -3957,7 +3984,7 @@ static bool vmx_gb_page_enable(void)
21460 return false;
21461 }
21462
21463 -static struct kvm_x86_ops vmx_x86_ops = {
21464 +static const struct kvm_x86_ops vmx_x86_ops = {
21465 .cpu_has_kvm_support = cpu_has_kvm_support,
21466 .disabled_by_bios = vmx_disabled_by_bios,
21467 .hardware_setup = hardware_setup,
21468 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
21469 index df1cefb..5e882ad 100644
21470 --- a/arch/x86/kvm/x86.c
21471 +++ b/arch/x86/kvm/x86.c
21472 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
21473 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
21474 struct kvm_cpuid_entry2 __user *entries);
21475
21476 -struct kvm_x86_ops *kvm_x86_ops;
21477 +const struct kvm_x86_ops *kvm_x86_ops;
21478 EXPORT_SYMBOL_GPL(kvm_x86_ops);
21479
21480 int ignore_msrs = 0;
21481 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21482 struct kvm_cpuid2 *cpuid,
21483 struct kvm_cpuid_entry2 __user *entries)
21484 {
21485 - int r;
21486 + int r, i;
21487
21488 r = -E2BIG;
21489 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21490 goto out;
21491 r = -EFAULT;
21492 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21493 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21494 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21495 goto out;
21496 + for (i = 0; i < cpuid->nent; ++i) {
21497 + struct kvm_cpuid_entry2 cpuid_entry;
21498 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21499 + goto out;
21500 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
21501 + }
21502 vcpu->arch.cpuid_nent = cpuid->nent;
21503 kvm_apic_set_version(vcpu);
21504 return 0;
21505 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21506 struct kvm_cpuid2 *cpuid,
21507 struct kvm_cpuid_entry2 __user *entries)
21508 {
21509 - int r;
21510 + int r, i;
21511
21512 vcpu_load(vcpu);
21513 r = -E2BIG;
21514 if (cpuid->nent < vcpu->arch.cpuid_nent)
21515 goto out;
21516 r = -EFAULT;
21517 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21518 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21519 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21520 goto out;
21521 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21522 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21523 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21524 + goto out;
21525 + }
21526 return 0;
21527
21528 out:
21529 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
21530 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
21531 struct kvm_interrupt *irq)
21532 {
21533 - if (irq->irq < 0 || irq->irq >= 256)
21534 + if (irq->irq >= 256)
21535 return -EINVAL;
21536 if (irqchip_in_kernel(vcpu->kvm))
21537 return -ENXIO;
21538 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
21539 .notifier_call = kvmclock_cpufreq_notifier
21540 };
21541
21542 -int kvm_arch_init(void *opaque)
21543 +int kvm_arch_init(const void *opaque)
21544 {
21545 int r, cpu;
21546 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
21547 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
21548
21549 if (kvm_x86_ops) {
21550 printk(KERN_ERR "kvm: already loaded the other module\n");
21551 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
21552 index 7e59dc1..b88c98f 100644
21553 --- a/arch/x86/lguest/boot.c
21554 +++ b/arch/x86/lguest/boot.c
21555 @@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
21556 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
21557 * Launcher to reboot us.
21558 */
21559 -static void lguest_restart(char *reason)
21560 +static __noreturn void lguest_restart(char *reason)
21561 {
21562 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
21563 + BUG();
21564 }
21565
21566 /*G:050
21567 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
21568 index 824fa0b..c619e96 100644
21569 --- a/arch/x86/lib/atomic64_32.c
21570 +++ b/arch/x86/lib/atomic64_32.c
21571 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
21572 }
21573 EXPORT_SYMBOL(atomic64_cmpxchg);
21574
21575 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
21576 +{
21577 + return cmpxchg8b(&ptr->counter, old_val, new_val);
21578 +}
21579 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
21580 +
21581 /**
21582 * atomic64_xchg - xchg atomic64 variable
21583 * @ptr: pointer to type atomic64_t
21584 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
21585 EXPORT_SYMBOL(atomic64_xchg);
21586
21587 /**
21588 + * atomic64_xchg_unchecked - xchg atomic64 variable
21589 + * @ptr: pointer to type atomic64_unchecked_t
21590 + * @new_val: value to assign
21591 + *
21592 + * Atomically xchgs the value of @ptr to @new_val and returns
21593 + * the old value.
21594 + */
21595 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21596 +{
21597 + /*
21598 + * Try first with a (possibly incorrect) assumption about
21599 + * what we have there. We'll do two loops most likely,
21600 + * but we'll get an ownership MESI transaction straight away
21601 + * instead of a read transaction followed by a
21602 + * flush-for-ownership transaction:
21603 + */
21604 + u64 old_val, real_val = 0;
21605 +
21606 + do {
21607 + old_val = real_val;
21608 +
21609 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21610 +
21611 + } while (real_val != old_val);
21612 +
21613 + return old_val;
21614 +}
21615 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
21616 +
21617 +/**
21618 * atomic64_set - set atomic64 variable
21619 * @ptr: pointer to type atomic64_t
21620 * @new_val: value to assign
21621 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
21622 EXPORT_SYMBOL(atomic64_set);
21623
21624 /**
21625 -EXPORT_SYMBOL(atomic64_read);
21626 + * atomic64_unchecked_set - set atomic64 variable
21627 + * @ptr: pointer to type atomic64_unchecked_t
21628 + * @new_val: value to assign
21629 + *
21630 + * Atomically sets the value of @ptr to @new_val.
21631 + */
21632 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21633 +{
21634 + atomic64_xchg_unchecked(ptr, new_val);
21635 +}
21636 +EXPORT_SYMBOL(atomic64_set_unchecked);
21637 +
21638 +/**
21639 * atomic64_add_return - add and return
21640 * @delta: integer value to add
21641 * @ptr: pointer to type atomic64_t
21642 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
21643 }
21644 EXPORT_SYMBOL(atomic64_add_return);
21645
21646 +/**
21647 + * atomic64_add_return_unchecked - add and return
21648 + * @delta: integer value to add
21649 + * @ptr: pointer to type atomic64_unchecked_t
21650 + *
21651 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
21652 + */
21653 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21654 +{
21655 + /*
21656 + * Try first with a (possibly incorrect) assumption about
21657 + * what we have there. We'll do two loops most likely,
21658 + * but we'll get an ownership MESI transaction straight away
21659 + * instead of a read transaction followed by a
21660 + * flush-for-ownership transaction:
21661 + */
21662 + u64 old_val, new_val, real_val = 0;
21663 +
21664 + do {
21665 + old_val = real_val;
21666 + new_val = old_val + delta;
21667 +
21668 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21669 +
21670 + } while (real_val != old_val);
21671 +
21672 + return new_val;
21673 +}
21674 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
21675 +
21676 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
21677 {
21678 return atomic64_add_return(-delta, ptr);
21679 }
21680 EXPORT_SYMBOL(atomic64_sub_return);
21681
21682 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21683 +{
21684 + return atomic64_add_return_unchecked(-delta, ptr);
21685 +}
21686 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
21687 +
21688 u64 atomic64_inc_return(atomic64_t *ptr)
21689 {
21690 return atomic64_add_return(1, ptr);
21691 }
21692 EXPORT_SYMBOL(atomic64_inc_return);
21693
21694 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
21695 +{
21696 + return atomic64_add_return_unchecked(1, ptr);
21697 +}
21698 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
21699 +
21700 u64 atomic64_dec_return(atomic64_t *ptr)
21701 {
21702 return atomic64_sub_return(1, ptr);
21703 }
21704 EXPORT_SYMBOL(atomic64_dec_return);
21705
21706 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
21707 +{
21708 + return atomic64_sub_return_unchecked(1, ptr);
21709 +}
21710 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
21711 +
21712 /**
21713 * atomic64_add - add integer to atomic64 variable
21714 * @delta: integer value to add
21715 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
21716 EXPORT_SYMBOL(atomic64_add);
21717
21718 /**
21719 + * atomic64_add_unchecked - add integer to atomic64 variable
21720 + * @delta: integer value to add
21721 + * @ptr: pointer to type atomic64_unchecked_t
21722 + *
21723 + * Atomically adds @delta to @ptr.
21724 + */
21725 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21726 +{
21727 + atomic64_add_return_unchecked(delta, ptr);
21728 +}
21729 +EXPORT_SYMBOL(atomic64_add_unchecked);
21730 +
21731 +/**
21732 * atomic64_sub - subtract the atomic64 variable
21733 * @delta: integer value to subtract
21734 * @ptr: pointer to type atomic64_t
21735 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
21736 EXPORT_SYMBOL(atomic64_sub);
21737
21738 /**
21739 + * atomic64_sub_unchecked - subtract the atomic64 variable
21740 + * @delta: integer value to subtract
21741 + * @ptr: pointer to type atomic64_unchecked_t
21742 + *
21743 + * Atomically subtracts @delta from @ptr.
21744 + */
21745 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21746 +{
21747 + atomic64_add_unchecked(-delta, ptr);
21748 +}
21749 +EXPORT_SYMBOL(atomic64_sub_unchecked);
21750 +
21751 +/**
21752 * atomic64_sub_and_test - subtract value from variable and test result
21753 * @delta: integer value to subtract
21754 * @ptr: pointer to type atomic64_t
21755 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
21756 EXPORT_SYMBOL(atomic64_inc);
21757
21758 /**
21759 + * atomic64_inc_unchecked - increment atomic64 variable
21760 + * @ptr: pointer to type atomic64_unchecked_t
21761 + *
21762 + * Atomically increments @ptr by 1.
21763 + */
21764 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
21765 +{
21766 + atomic64_add_unchecked(1, ptr);
21767 +}
21768 +EXPORT_SYMBOL(atomic64_inc_unchecked);
21769 +
21770 +/**
21771 * atomic64_dec - decrement atomic64 variable
21772 * @ptr: pointer to type atomic64_t
21773 *
21774 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
21775 EXPORT_SYMBOL(atomic64_dec);
21776
21777 /**
21778 + * atomic64_dec_unchecked - decrement atomic64 variable
21779 + * @ptr: pointer to type atomic64_unchecked_t
21780 + *
21781 + * Atomically decrements @ptr by 1.
21782 + */
21783 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
21784 +{
21785 + atomic64_sub_unchecked(1, ptr);
21786 +}
21787 +EXPORT_SYMBOL(atomic64_dec_unchecked);
21788 +
21789 +/**
21790 * atomic64_dec_and_test - decrement and test
21791 * @ptr: pointer to type atomic64_t
21792 *
21793 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21794 index adbccd0..98f96c8 100644
21795 --- a/arch/x86/lib/checksum_32.S
21796 +++ b/arch/x86/lib/checksum_32.S
21797 @@ -28,7 +28,8 @@
21798 #include <linux/linkage.h>
21799 #include <asm/dwarf2.h>
21800 #include <asm/errno.h>
21801 -
21802 +#include <asm/segment.h>
21803 +
21804 /*
21805 * computes a partial checksum, e.g. for TCP/UDP fragments
21806 */
21807 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21808
21809 #define ARGBASE 16
21810 #define FP 12
21811 -
21812 -ENTRY(csum_partial_copy_generic)
21813 +
21814 +ENTRY(csum_partial_copy_generic_to_user)
21815 CFI_STARTPROC
21816 +
21817 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21818 + pushl %gs
21819 + CFI_ADJUST_CFA_OFFSET 4
21820 + popl %es
21821 + CFI_ADJUST_CFA_OFFSET -4
21822 + jmp csum_partial_copy_generic
21823 +#endif
21824 +
21825 +ENTRY(csum_partial_copy_generic_from_user)
21826 +
21827 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21828 + pushl %gs
21829 + CFI_ADJUST_CFA_OFFSET 4
21830 + popl %ds
21831 + CFI_ADJUST_CFA_OFFSET -4
21832 +#endif
21833 +
21834 +ENTRY(csum_partial_copy_generic)
21835 subl $4,%esp
21836 CFI_ADJUST_CFA_OFFSET 4
21837 pushl %edi
21838 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
21839 jmp 4f
21840 SRC(1: movw (%esi), %bx )
21841 addl $2, %esi
21842 -DST( movw %bx, (%edi) )
21843 +DST( movw %bx, %es:(%edi) )
21844 addl $2, %edi
21845 addw %bx, %ax
21846 adcl $0, %eax
21847 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
21848 SRC(1: movl (%esi), %ebx )
21849 SRC( movl 4(%esi), %edx )
21850 adcl %ebx, %eax
21851 -DST( movl %ebx, (%edi) )
21852 +DST( movl %ebx, %es:(%edi) )
21853 adcl %edx, %eax
21854 -DST( movl %edx, 4(%edi) )
21855 +DST( movl %edx, %es:4(%edi) )
21856
21857 SRC( movl 8(%esi), %ebx )
21858 SRC( movl 12(%esi), %edx )
21859 adcl %ebx, %eax
21860 -DST( movl %ebx, 8(%edi) )
21861 +DST( movl %ebx, %es:8(%edi) )
21862 adcl %edx, %eax
21863 -DST( movl %edx, 12(%edi) )
21864 +DST( movl %edx, %es:12(%edi) )
21865
21866 SRC( movl 16(%esi), %ebx )
21867 SRC( movl 20(%esi), %edx )
21868 adcl %ebx, %eax
21869 -DST( movl %ebx, 16(%edi) )
21870 +DST( movl %ebx, %es:16(%edi) )
21871 adcl %edx, %eax
21872 -DST( movl %edx, 20(%edi) )
21873 +DST( movl %edx, %es:20(%edi) )
21874
21875 SRC( movl 24(%esi), %ebx )
21876 SRC( movl 28(%esi), %edx )
21877 adcl %ebx, %eax
21878 -DST( movl %ebx, 24(%edi) )
21879 +DST( movl %ebx, %es:24(%edi) )
21880 adcl %edx, %eax
21881 -DST( movl %edx, 28(%edi) )
21882 +DST( movl %edx, %es:28(%edi) )
21883
21884 lea 32(%esi), %esi
21885 lea 32(%edi), %edi
21886 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
21887 shrl $2, %edx # This clears CF
21888 SRC(3: movl (%esi), %ebx )
21889 adcl %ebx, %eax
21890 -DST( movl %ebx, (%edi) )
21891 +DST( movl %ebx, %es:(%edi) )
21892 lea 4(%esi), %esi
21893 lea 4(%edi), %edi
21894 dec %edx
21895 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
21896 jb 5f
21897 SRC( movw (%esi), %cx )
21898 leal 2(%esi), %esi
21899 -DST( movw %cx, (%edi) )
21900 +DST( movw %cx, %es:(%edi) )
21901 leal 2(%edi), %edi
21902 je 6f
21903 shll $16,%ecx
21904 SRC(5: movb (%esi), %cl )
21905 -DST( movb %cl, (%edi) )
21906 +DST( movb %cl, %es:(%edi) )
21907 6: addl %ecx, %eax
21908 adcl $0, %eax
21909 7:
21910 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
21911
21912 6001:
21913 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21914 - movl $-EFAULT, (%ebx)
21915 + movl $-EFAULT, %ss:(%ebx)
21916
21917 # zero the complete destination - computing the rest
21918 # is too much work
21919 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
21920
21921 6002:
21922 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21923 - movl $-EFAULT,(%ebx)
21924 + movl $-EFAULT,%ss:(%ebx)
21925 jmp 5000b
21926
21927 .previous
21928
21929 + pushl %ss
21930 + CFI_ADJUST_CFA_OFFSET 4
21931 + popl %ds
21932 + CFI_ADJUST_CFA_OFFSET -4
21933 + pushl %ss
21934 + CFI_ADJUST_CFA_OFFSET 4
21935 + popl %es
21936 + CFI_ADJUST_CFA_OFFSET -4
21937 popl %ebx
21938 CFI_ADJUST_CFA_OFFSET -4
21939 CFI_RESTORE ebx
21940 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
21941 CFI_ADJUST_CFA_OFFSET -4
21942 ret
21943 CFI_ENDPROC
21944 -ENDPROC(csum_partial_copy_generic)
21945 +ENDPROC(csum_partial_copy_generic_to_user)
21946
21947 #else
21948
21949 /* Version for PentiumII/PPro */
21950
21951 #define ROUND1(x) \
21952 + nop; nop; nop; \
21953 SRC(movl x(%esi), %ebx ) ; \
21954 addl %ebx, %eax ; \
21955 - DST(movl %ebx, x(%edi) ) ;
21956 + DST(movl %ebx, %es:x(%edi)) ;
21957
21958 #define ROUND(x) \
21959 + nop; nop; nop; \
21960 SRC(movl x(%esi), %ebx ) ; \
21961 adcl %ebx, %eax ; \
21962 - DST(movl %ebx, x(%edi) ) ;
21963 + DST(movl %ebx, %es:x(%edi)) ;
21964
21965 #define ARGBASE 12
21966 -
21967 -ENTRY(csum_partial_copy_generic)
21968 +
21969 +ENTRY(csum_partial_copy_generic_to_user)
21970 CFI_STARTPROC
21971 +
21972 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21973 + pushl %gs
21974 + CFI_ADJUST_CFA_OFFSET 4
21975 + popl %es
21976 + CFI_ADJUST_CFA_OFFSET -4
21977 + jmp csum_partial_copy_generic
21978 +#endif
21979 +
21980 +ENTRY(csum_partial_copy_generic_from_user)
21981 +
21982 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21983 + pushl %gs
21984 + CFI_ADJUST_CFA_OFFSET 4
21985 + popl %ds
21986 + CFI_ADJUST_CFA_OFFSET -4
21987 +#endif
21988 +
21989 +ENTRY(csum_partial_copy_generic)
21990 pushl %ebx
21991 CFI_ADJUST_CFA_OFFSET 4
21992 CFI_REL_OFFSET ebx, 0
21993 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
21994 subl %ebx, %edi
21995 lea -1(%esi),%edx
21996 andl $-32,%edx
21997 - lea 3f(%ebx,%ebx), %ebx
21998 + lea 3f(%ebx,%ebx,2), %ebx
21999 testl %esi, %esi
22000 jmp *%ebx
22001 1: addl $64,%esi
22002 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
22003 jb 5f
22004 SRC( movw (%esi), %dx )
22005 leal 2(%esi), %esi
22006 -DST( movw %dx, (%edi) )
22007 +DST( movw %dx, %es:(%edi) )
22008 leal 2(%edi), %edi
22009 je 6f
22010 shll $16,%edx
22011 5:
22012 SRC( movb (%esi), %dl )
22013 -DST( movb %dl, (%edi) )
22014 +DST( movb %dl, %es:(%edi) )
22015 6: addl %edx, %eax
22016 adcl $0, %eax
22017 7:
22018 .section .fixup, "ax"
22019 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
22020 - movl $-EFAULT, (%ebx)
22021 + movl $-EFAULT, %ss:(%ebx)
22022 # zero the complete destination (computing the rest is too much work)
22023 movl ARGBASE+8(%esp),%edi # dst
22024 movl ARGBASE+12(%esp),%ecx # len
22025 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
22026 rep; stosb
22027 jmp 7b
22028 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
22029 - movl $-EFAULT, (%ebx)
22030 + movl $-EFAULT, %ss:(%ebx)
22031 jmp 7b
22032 .previous
22033
22034 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22035 + pushl %ss
22036 + CFI_ADJUST_CFA_OFFSET 4
22037 + popl %ds
22038 + CFI_ADJUST_CFA_OFFSET -4
22039 + pushl %ss
22040 + CFI_ADJUST_CFA_OFFSET 4
22041 + popl %es
22042 + CFI_ADJUST_CFA_OFFSET -4
22043 +#endif
22044 +
22045 popl %esi
22046 CFI_ADJUST_CFA_OFFSET -4
22047 CFI_RESTORE esi
22048 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
22049 CFI_RESTORE ebx
22050 ret
22051 CFI_ENDPROC
22052 -ENDPROC(csum_partial_copy_generic)
22053 +ENDPROC(csum_partial_copy_generic_to_user)
22054
22055 #undef ROUND
22056 #undef ROUND1
22057 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
22058 index ebeafcc..1e3a402 100644
22059 --- a/arch/x86/lib/clear_page_64.S
22060 +++ b/arch/x86/lib/clear_page_64.S
22061 @@ -1,5 +1,6 @@
22062 #include <linux/linkage.h>
22063 #include <asm/dwarf2.h>
22064 +#include <asm/alternative-asm.h>
22065
22066 /*
22067 * Zero a page.
22068 @@ -10,6 +11,7 @@ ENTRY(clear_page_c)
22069 movl $4096/8,%ecx
22070 xorl %eax,%eax
22071 rep stosq
22072 + pax_force_retaddr
22073 ret
22074 CFI_ENDPROC
22075 ENDPROC(clear_page_c)
22076 @@ -33,6 +35,7 @@ ENTRY(clear_page)
22077 leaq 64(%rdi),%rdi
22078 jnz .Lloop
22079 nop
22080 + pax_force_retaddr
22081 ret
22082 CFI_ENDPROC
22083 .Lclear_page_end:
22084 @@ -43,7 +46,7 @@ ENDPROC(clear_page)
22085
22086 #include <asm/cpufeature.h>
22087
22088 - .section .altinstr_replacement,"ax"
22089 + .section .altinstr_replacement,"a"
22090 1: .byte 0xeb /* jmp <disp8> */
22091 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
22092 2:
22093 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
22094 index 727a5d4..333818a 100644
22095 --- a/arch/x86/lib/copy_page_64.S
22096 +++ b/arch/x86/lib/copy_page_64.S
22097 @@ -2,12 +2,14 @@
22098
22099 #include <linux/linkage.h>
22100 #include <asm/dwarf2.h>
22101 +#include <asm/alternative-asm.h>
22102
22103 ALIGN
22104 copy_page_c:
22105 CFI_STARTPROC
22106 movl $4096/8,%ecx
22107 rep movsq
22108 + pax_force_retaddr
22109 ret
22110 CFI_ENDPROC
22111 ENDPROC(copy_page_c)
22112 @@ -38,7 +40,7 @@ ENTRY(copy_page)
22113 movq 16 (%rsi), %rdx
22114 movq 24 (%rsi), %r8
22115 movq 32 (%rsi), %r9
22116 - movq 40 (%rsi), %r10
22117 + movq 40 (%rsi), %r13
22118 movq 48 (%rsi), %r11
22119 movq 56 (%rsi), %r12
22120
22121 @@ -49,7 +51,7 @@ ENTRY(copy_page)
22122 movq %rdx, 16 (%rdi)
22123 movq %r8, 24 (%rdi)
22124 movq %r9, 32 (%rdi)
22125 - movq %r10, 40 (%rdi)
22126 + movq %r13, 40 (%rdi)
22127 movq %r11, 48 (%rdi)
22128 movq %r12, 56 (%rdi)
22129
22130 @@ -68,7 +70,7 @@ ENTRY(copy_page)
22131 movq 16 (%rsi), %rdx
22132 movq 24 (%rsi), %r8
22133 movq 32 (%rsi), %r9
22134 - movq 40 (%rsi), %r10
22135 + movq 40 (%rsi), %r13
22136 movq 48 (%rsi), %r11
22137 movq 56 (%rsi), %r12
22138
22139 @@ -77,7 +79,7 @@ ENTRY(copy_page)
22140 movq %rdx, 16 (%rdi)
22141 movq %r8, 24 (%rdi)
22142 movq %r9, 32 (%rdi)
22143 - movq %r10, 40 (%rdi)
22144 + movq %r13, 40 (%rdi)
22145 movq %r11, 48 (%rdi)
22146 movq %r12, 56 (%rdi)
22147
22148 @@ -94,6 +96,7 @@ ENTRY(copy_page)
22149 CFI_RESTORE r13
22150 addq $3*8,%rsp
22151 CFI_ADJUST_CFA_OFFSET -3*8
22152 + pax_force_retaddr
22153 ret
22154 .Lcopy_page_end:
22155 CFI_ENDPROC
22156 @@ -104,7 +107,7 @@ ENDPROC(copy_page)
22157
22158 #include <asm/cpufeature.h>
22159
22160 - .section .altinstr_replacement,"ax"
22161 + .section .altinstr_replacement,"a"
22162 1: .byte 0xeb /* jmp <disp8> */
22163 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
22164 2:
22165 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
22166 index af8debd..40c75f3 100644
22167 --- a/arch/x86/lib/copy_user_64.S
22168 +++ b/arch/x86/lib/copy_user_64.S
22169 @@ -15,13 +15,15 @@
22170 #include <asm/asm-offsets.h>
22171 #include <asm/thread_info.h>
22172 #include <asm/cpufeature.h>
22173 +#include <asm/pgtable.h>
22174 +#include <asm/alternative-asm.h>
22175
22176 .macro ALTERNATIVE_JUMP feature,orig,alt
22177 0:
22178 .byte 0xe9 /* 32bit jump */
22179 .long \orig-1f /* by default jump to orig */
22180 1:
22181 - .section .altinstr_replacement,"ax"
22182 + .section .altinstr_replacement,"a"
22183 2: .byte 0xe9 /* near jump with 32bit immediate */
22184 .long \alt-1b /* offset */ /* or alternatively to alt */
22185 .previous
22186 @@ -64,55 +66,26 @@
22187 #endif
22188 .endm
22189
22190 -/* Standard copy_to_user with segment limit checking */
22191 -ENTRY(copy_to_user)
22192 - CFI_STARTPROC
22193 - GET_THREAD_INFO(%rax)
22194 - movq %rdi,%rcx
22195 - addq %rdx,%rcx
22196 - jc bad_to_user
22197 - cmpq TI_addr_limit(%rax),%rcx
22198 - ja bad_to_user
22199 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22200 - CFI_ENDPROC
22201 -ENDPROC(copy_to_user)
22202 -
22203 -/* Standard copy_from_user with segment limit checking */
22204 -ENTRY(copy_from_user)
22205 - CFI_STARTPROC
22206 - GET_THREAD_INFO(%rax)
22207 - movq %rsi,%rcx
22208 - addq %rdx,%rcx
22209 - jc bad_from_user
22210 - cmpq TI_addr_limit(%rax),%rcx
22211 - ja bad_from_user
22212 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22213 - CFI_ENDPROC
22214 -ENDPROC(copy_from_user)
22215 -
22216 ENTRY(copy_user_generic)
22217 CFI_STARTPROC
22218 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22219 CFI_ENDPROC
22220 ENDPROC(copy_user_generic)
22221
22222 -ENTRY(__copy_from_user_inatomic)
22223 - CFI_STARTPROC
22224 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22225 - CFI_ENDPROC
22226 -ENDPROC(__copy_from_user_inatomic)
22227 -
22228 .section .fixup,"ax"
22229 /* must zero dest */
22230 ENTRY(bad_from_user)
22231 bad_from_user:
22232 CFI_STARTPROC
22233 + testl %edx,%edx
22234 + js bad_to_user
22235 movl %edx,%ecx
22236 xorl %eax,%eax
22237 rep
22238 stosb
22239 bad_to_user:
22240 movl %edx,%eax
22241 + pax_force_retaddr
22242 ret
22243 CFI_ENDPROC
22244 ENDPROC(bad_from_user)
22245 @@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
22246 jz 17f
22247 1: movq (%rsi),%r8
22248 2: movq 1*8(%rsi),%r9
22249 -3: movq 2*8(%rsi),%r10
22250 +3: movq 2*8(%rsi),%rax
22251 4: movq 3*8(%rsi),%r11
22252 5: movq %r8,(%rdi)
22253 6: movq %r9,1*8(%rdi)
22254 -7: movq %r10,2*8(%rdi)
22255 +7: movq %rax,2*8(%rdi)
22256 8: movq %r11,3*8(%rdi)
22257 9: movq 4*8(%rsi),%r8
22258 10: movq 5*8(%rsi),%r9
22259 -11: movq 6*8(%rsi),%r10
22260 +11: movq 6*8(%rsi),%rax
22261 12: movq 7*8(%rsi),%r11
22262 13: movq %r8,4*8(%rdi)
22263 14: movq %r9,5*8(%rdi)
22264 -15: movq %r10,6*8(%rdi)
22265 +15: movq %rax,6*8(%rdi)
22266 16: movq %r11,7*8(%rdi)
22267 leaq 64(%rsi),%rsi
22268 leaq 64(%rdi),%rdi
22269 @@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
22270 decl %ecx
22271 jnz 21b
22272 23: xor %eax,%eax
22273 + pax_force_retaddr
22274 ret
22275
22276 .section .fixup,"ax"
22277 @@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
22278 3: rep
22279 movsb
22280 4: xorl %eax,%eax
22281 + pax_force_retaddr
22282 ret
22283
22284 .section .fixup,"ax"
22285 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
22286 index cb0c112..e3a6895 100644
22287 --- a/arch/x86/lib/copy_user_nocache_64.S
22288 +++ b/arch/x86/lib/copy_user_nocache_64.S
22289 @@ -8,12 +8,14 @@
22290
22291 #include <linux/linkage.h>
22292 #include <asm/dwarf2.h>
22293 +#include <asm/alternative-asm.h>
22294
22295 #define FIX_ALIGNMENT 1
22296
22297 #include <asm/current.h>
22298 #include <asm/asm-offsets.h>
22299 #include <asm/thread_info.h>
22300 +#include <asm/pgtable.h>
22301
22302 .macro ALIGN_DESTINATION
22303 #ifdef FIX_ALIGNMENT
22304 @@ -50,6 +52,15 @@
22305 */
22306 ENTRY(__copy_user_nocache)
22307 CFI_STARTPROC
22308 +
22309 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22310 + mov $PAX_USER_SHADOW_BASE,%rcx
22311 + cmp %rcx,%rsi
22312 + jae 1f
22313 + add %rcx,%rsi
22314 +1:
22315 +#endif
22316 +
22317 cmpl $8,%edx
22318 jb 20f /* less then 8 bytes, go to byte copy loop */
22319 ALIGN_DESTINATION
22320 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
22321 jz 17f
22322 1: movq (%rsi),%r8
22323 2: movq 1*8(%rsi),%r9
22324 -3: movq 2*8(%rsi),%r10
22325 +3: movq 2*8(%rsi),%rax
22326 4: movq 3*8(%rsi),%r11
22327 5: movnti %r8,(%rdi)
22328 6: movnti %r9,1*8(%rdi)
22329 -7: movnti %r10,2*8(%rdi)
22330 +7: movnti %rax,2*8(%rdi)
22331 8: movnti %r11,3*8(%rdi)
22332 9: movq 4*8(%rsi),%r8
22333 10: movq 5*8(%rsi),%r9
22334 -11: movq 6*8(%rsi),%r10
22335 +11: movq 6*8(%rsi),%rax
22336 12: movq 7*8(%rsi),%r11
22337 13: movnti %r8,4*8(%rdi)
22338 14: movnti %r9,5*8(%rdi)
22339 -15: movnti %r10,6*8(%rdi)
22340 +15: movnti %rax,6*8(%rdi)
22341 16: movnti %r11,7*8(%rdi)
22342 leaq 64(%rsi),%rsi
22343 leaq 64(%rdi),%rdi
22344 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
22345 jnz 21b
22346 23: xorl %eax,%eax
22347 sfence
22348 + pax_force_retaddr
22349 ret
22350
22351 .section .fixup,"ax"
22352 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
22353 index f0dba36..48cb4d6 100644
22354 --- a/arch/x86/lib/csum-copy_64.S
22355 +++ b/arch/x86/lib/csum-copy_64.S
22356 @@ -8,6 +8,7 @@
22357 #include <linux/linkage.h>
22358 #include <asm/dwarf2.h>
22359 #include <asm/errno.h>
22360 +#include <asm/alternative-asm.h>
22361
22362 /*
22363 * Checksum copy with exception handling.
22364 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
22365 CFI_RESTORE rbp
22366 addq $7*8,%rsp
22367 CFI_ADJUST_CFA_OFFSET -7*8
22368 + pax_force_retaddr 0, 1
22369 ret
22370 CFI_RESTORE_STATE
22371
22372 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
22373 index 459b58a..9570bc7 100644
22374 --- a/arch/x86/lib/csum-wrappers_64.c
22375 +++ b/arch/x86/lib/csum-wrappers_64.c
22376 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
22377 len -= 2;
22378 }
22379 }
22380 - isum = csum_partial_copy_generic((__force const void *)src,
22381 +
22382 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22383 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22384 + src += PAX_USER_SHADOW_BASE;
22385 +#endif
22386 +
22387 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
22388 dst, len, isum, errp, NULL);
22389 if (unlikely(*errp))
22390 goto out_err;
22391 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
22392 }
22393
22394 *errp = 0;
22395 - return csum_partial_copy_generic(src, (void __force *)dst,
22396 +
22397 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22398 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
22399 + dst += PAX_USER_SHADOW_BASE;
22400 +#endif
22401 +
22402 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
22403 len, isum, NULL, errp);
22404 }
22405 EXPORT_SYMBOL(csum_partial_copy_to_user);
22406 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
22407 index 51f1504..ddac4c1 100644
22408 --- a/arch/x86/lib/getuser.S
22409 +++ b/arch/x86/lib/getuser.S
22410 @@ -33,15 +33,38 @@
22411 #include <asm/asm-offsets.h>
22412 #include <asm/thread_info.h>
22413 #include <asm/asm.h>
22414 +#include <asm/segment.h>
22415 +#include <asm/pgtable.h>
22416 +#include <asm/alternative-asm.h>
22417 +
22418 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22419 +#define __copyuser_seg gs;
22420 +#else
22421 +#define __copyuser_seg
22422 +#endif
22423
22424 .text
22425 ENTRY(__get_user_1)
22426 CFI_STARTPROC
22427 +
22428 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22429 GET_THREAD_INFO(%_ASM_DX)
22430 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22431 jae bad_get_user
22432 -1: movzb (%_ASM_AX),%edx
22433 +
22434 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22435 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22436 + cmp %_ASM_DX,%_ASM_AX
22437 + jae 1234f
22438 + add %_ASM_DX,%_ASM_AX
22439 +1234:
22440 +#endif
22441 +
22442 +#endif
22443 +
22444 +1: __copyuser_seg movzb (%_ASM_AX),%edx
22445 xor %eax,%eax
22446 + pax_force_retaddr
22447 ret
22448 CFI_ENDPROC
22449 ENDPROC(__get_user_1)
22450 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
22451 ENTRY(__get_user_2)
22452 CFI_STARTPROC
22453 add $1,%_ASM_AX
22454 +
22455 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22456 jc bad_get_user
22457 GET_THREAD_INFO(%_ASM_DX)
22458 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22459 jae bad_get_user
22460 -2: movzwl -1(%_ASM_AX),%edx
22461 +
22462 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22463 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22464 + cmp %_ASM_DX,%_ASM_AX
22465 + jae 1234f
22466 + add %_ASM_DX,%_ASM_AX
22467 +1234:
22468 +#endif
22469 +
22470 +#endif
22471 +
22472 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
22473 xor %eax,%eax
22474 + pax_force_retaddr
22475 ret
22476 CFI_ENDPROC
22477 ENDPROC(__get_user_2)
22478 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
22479 ENTRY(__get_user_4)
22480 CFI_STARTPROC
22481 add $3,%_ASM_AX
22482 +
22483 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22484 jc bad_get_user
22485 GET_THREAD_INFO(%_ASM_DX)
22486 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22487 jae bad_get_user
22488 -3: mov -3(%_ASM_AX),%edx
22489 +
22490 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22491 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22492 + cmp %_ASM_DX,%_ASM_AX
22493 + jae 1234f
22494 + add %_ASM_DX,%_ASM_AX
22495 +1234:
22496 +#endif
22497 +
22498 +#endif
22499 +
22500 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
22501 xor %eax,%eax
22502 + pax_force_retaddr
22503 ret
22504 CFI_ENDPROC
22505 ENDPROC(__get_user_4)
22506 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
22507 GET_THREAD_INFO(%_ASM_DX)
22508 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22509 jae bad_get_user
22510 +
22511 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22512 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22513 + cmp %_ASM_DX,%_ASM_AX
22514 + jae 1234f
22515 + add %_ASM_DX,%_ASM_AX
22516 +1234:
22517 +#endif
22518 +
22519 4: movq -7(%_ASM_AX),%_ASM_DX
22520 xor %eax,%eax
22521 + pax_force_retaddr
22522 ret
22523 CFI_ENDPROC
22524 ENDPROC(__get_user_8)
22525 @@ -91,6 +152,7 @@ bad_get_user:
22526 CFI_STARTPROC
22527 xor %edx,%edx
22528 mov $(-EFAULT),%_ASM_AX
22529 + pax_force_retaddr
22530 ret
22531 CFI_ENDPROC
22532 END(bad_get_user)
22533 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
22534 index 05a95e7..326f2fa 100644
22535 --- a/arch/x86/lib/iomap_copy_64.S
22536 +++ b/arch/x86/lib/iomap_copy_64.S
22537 @@ -17,6 +17,7 @@
22538
22539 #include <linux/linkage.h>
22540 #include <asm/dwarf2.h>
22541 +#include <asm/alternative-asm.h>
22542
22543 /*
22544 * override generic version in lib/iomap_copy.c
22545 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
22546 CFI_STARTPROC
22547 movl %edx,%ecx
22548 rep movsd
22549 + pax_force_retaddr
22550 ret
22551 CFI_ENDPROC
22552 ENDPROC(__iowrite32_copy)
22553 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
22554 index ad5441e..610e351 100644
22555 --- a/arch/x86/lib/memcpy_64.S
22556 +++ b/arch/x86/lib/memcpy_64.S
22557 @@ -4,6 +4,7 @@
22558
22559 #include <asm/cpufeature.h>
22560 #include <asm/dwarf2.h>
22561 +#include <asm/alternative-asm.h>
22562
22563 /*
22564 * memcpy - Copy a memory block.
22565 @@ -34,6 +35,7 @@ memcpy_c:
22566 rep movsq
22567 movl %edx, %ecx
22568 rep movsb
22569 + pax_force_retaddr
22570 ret
22571 CFI_ENDPROC
22572 ENDPROC(memcpy_c)
22573 @@ -118,6 +120,7 @@ ENTRY(memcpy)
22574 jnz .Lloop_1
22575
22576 .Lend:
22577 + pax_force_retaddr 0, 1
22578 ret
22579 CFI_ENDPROC
22580 ENDPROC(memcpy)
22581 @@ -128,7 +131,7 @@ ENDPROC(__memcpy)
22582 * It is also a lot simpler. Use this when possible:
22583 */
22584
22585 - .section .altinstr_replacement, "ax"
22586 + .section .altinstr_replacement, "a"
22587 1: .byte 0xeb /* jmp <disp8> */
22588 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
22589 2:
22590 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22591 index 2c59481..7e9ba4e 100644
22592 --- a/arch/x86/lib/memset_64.S
22593 +++ b/arch/x86/lib/memset_64.S
22594 @@ -2,6 +2,7 @@
22595
22596 #include <linux/linkage.h>
22597 #include <asm/dwarf2.h>
22598 +#include <asm/alternative-asm.h>
22599
22600 /*
22601 * ISO C memset - set a memory block to a byte value.
22602 @@ -28,6 +29,7 @@ memset_c:
22603 movl %r8d,%ecx
22604 rep stosb
22605 movq %r9,%rax
22606 + pax_force_retaddr
22607 ret
22608 CFI_ENDPROC
22609 ENDPROC(memset_c)
22610 @@ -35,13 +37,13 @@ ENDPROC(memset_c)
22611 ENTRY(memset)
22612 ENTRY(__memset)
22613 CFI_STARTPROC
22614 - movq %rdi,%r10
22615 movq %rdx,%r11
22616
22617 /* expand byte value */
22618 movzbl %sil,%ecx
22619 movabs $0x0101010101010101,%rax
22620 mul %rcx /* with rax, clobbers rdx */
22621 + movq %rdi,%rdx
22622
22623 /* align dst */
22624 movl %edi,%r9d
22625 @@ -95,7 +97,8 @@ ENTRY(__memset)
22626 jnz .Lloop_1
22627
22628 .Lende:
22629 - movq %r10,%rax
22630 + movq %rdx,%rax
22631 + pax_force_retaddr
22632 ret
22633
22634 CFI_RESTORE_STATE
22635 @@ -118,7 +121,7 @@ ENDPROC(__memset)
22636
22637 #include <asm/cpufeature.h>
22638
22639 - .section .altinstr_replacement,"ax"
22640 + .section .altinstr_replacement,"a"
22641 1: .byte 0xeb /* jmp <disp8> */
22642 .byte (memset_c - memset) - (2f - 1b) /* offset */
22643 2:
22644 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22645 index c9f2d9b..e7fd2c0 100644
22646 --- a/arch/x86/lib/mmx_32.c
22647 +++ b/arch/x86/lib/mmx_32.c
22648 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22649 {
22650 void *p;
22651 int i;
22652 + unsigned long cr0;
22653
22654 if (unlikely(in_interrupt()))
22655 return __memcpy(to, from, len);
22656 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22657 kernel_fpu_begin();
22658
22659 __asm__ __volatile__ (
22660 - "1: prefetch (%0)\n" /* This set is 28 bytes */
22661 - " prefetch 64(%0)\n"
22662 - " prefetch 128(%0)\n"
22663 - " prefetch 192(%0)\n"
22664 - " prefetch 256(%0)\n"
22665 + "1: prefetch (%1)\n" /* This set is 28 bytes */
22666 + " prefetch 64(%1)\n"
22667 + " prefetch 128(%1)\n"
22668 + " prefetch 192(%1)\n"
22669 + " prefetch 256(%1)\n"
22670 "2: \n"
22671 ".section .fixup, \"ax\"\n"
22672 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22673 + "3: \n"
22674 +
22675 +#ifdef CONFIG_PAX_KERNEXEC
22676 + " movl %%cr0, %0\n"
22677 + " movl %0, %%eax\n"
22678 + " andl $0xFFFEFFFF, %%eax\n"
22679 + " movl %%eax, %%cr0\n"
22680 +#endif
22681 +
22682 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22683 +
22684 +#ifdef CONFIG_PAX_KERNEXEC
22685 + " movl %0, %%cr0\n"
22686 +#endif
22687 +
22688 " jmp 2b\n"
22689 ".previous\n"
22690 _ASM_EXTABLE(1b, 3b)
22691 - : : "r" (from));
22692 + : "=&r" (cr0) : "r" (from) : "ax");
22693
22694 for ( ; i > 5; i--) {
22695 __asm__ __volatile__ (
22696 - "1: prefetch 320(%0)\n"
22697 - "2: movq (%0), %%mm0\n"
22698 - " movq 8(%0), %%mm1\n"
22699 - " movq 16(%0), %%mm2\n"
22700 - " movq 24(%0), %%mm3\n"
22701 - " movq %%mm0, (%1)\n"
22702 - " movq %%mm1, 8(%1)\n"
22703 - " movq %%mm2, 16(%1)\n"
22704 - " movq %%mm3, 24(%1)\n"
22705 - " movq 32(%0), %%mm0\n"
22706 - " movq 40(%0), %%mm1\n"
22707 - " movq 48(%0), %%mm2\n"
22708 - " movq 56(%0), %%mm3\n"
22709 - " movq %%mm0, 32(%1)\n"
22710 - " movq %%mm1, 40(%1)\n"
22711 - " movq %%mm2, 48(%1)\n"
22712 - " movq %%mm3, 56(%1)\n"
22713 + "1: prefetch 320(%1)\n"
22714 + "2: movq (%1), %%mm0\n"
22715 + " movq 8(%1), %%mm1\n"
22716 + " movq 16(%1), %%mm2\n"
22717 + " movq 24(%1), %%mm3\n"
22718 + " movq %%mm0, (%2)\n"
22719 + " movq %%mm1, 8(%2)\n"
22720 + " movq %%mm2, 16(%2)\n"
22721 + " movq %%mm3, 24(%2)\n"
22722 + " movq 32(%1), %%mm0\n"
22723 + " movq 40(%1), %%mm1\n"
22724 + " movq 48(%1), %%mm2\n"
22725 + " movq 56(%1), %%mm3\n"
22726 + " movq %%mm0, 32(%2)\n"
22727 + " movq %%mm1, 40(%2)\n"
22728 + " movq %%mm2, 48(%2)\n"
22729 + " movq %%mm3, 56(%2)\n"
22730 ".section .fixup, \"ax\"\n"
22731 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22732 + "3:\n"
22733 +
22734 +#ifdef CONFIG_PAX_KERNEXEC
22735 + " movl %%cr0, %0\n"
22736 + " movl %0, %%eax\n"
22737 + " andl $0xFFFEFFFF, %%eax\n"
22738 + " movl %%eax, %%cr0\n"
22739 +#endif
22740 +
22741 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22742 +
22743 +#ifdef CONFIG_PAX_KERNEXEC
22744 + " movl %0, %%cr0\n"
22745 +#endif
22746 +
22747 " jmp 2b\n"
22748 ".previous\n"
22749 _ASM_EXTABLE(1b, 3b)
22750 - : : "r" (from), "r" (to) : "memory");
22751 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22752
22753 from += 64;
22754 to += 64;
22755 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22756 static void fast_copy_page(void *to, void *from)
22757 {
22758 int i;
22759 + unsigned long cr0;
22760
22761 kernel_fpu_begin();
22762
22763 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22764 * but that is for later. -AV
22765 */
22766 __asm__ __volatile__(
22767 - "1: prefetch (%0)\n"
22768 - " prefetch 64(%0)\n"
22769 - " prefetch 128(%0)\n"
22770 - " prefetch 192(%0)\n"
22771 - " prefetch 256(%0)\n"
22772 + "1: prefetch (%1)\n"
22773 + " prefetch 64(%1)\n"
22774 + " prefetch 128(%1)\n"
22775 + " prefetch 192(%1)\n"
22776 + " prefetch 256(%1)\n"
22777 "2: \n"
22778 ".section .fixup, \"ax\"\n"
22779 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22780 + "3: \n"
22781 +
22782 +#ifdef CONFIG_PAX_KERNEXEC
22783 + " movl %%cr0, %0\n"
22784 + " movl %0, %%eax\n"
22785 + " andl $0xFFFEFFFF, %%eax\n"
22786 + " movl %%eax, %%cr0\n"
22787 +#endif
22788 +
22789 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22790 +
22791 +#ifdef CONFIG_PAX_KERNEXEC
22792 + " movl %0, %%cr0\n"
22793 +#endif
22794 +
22795 " jmp 2b\n"
22796 ".previous\n"
22797 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22798 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22799
22800 for (i = 0; i < (4096-320)/64; i++) {
22801 __asm__ __volatile__ (
22802 - "1: prefetch 320(%0)\n"
22803 - "2: movq (%0), %%mm0\n"
22804 - " movntq %%mm0, (%1)\n"
22805 - " movq 8(%0), %%mm1\n"
22806 - " movntq %%mm1, 8(%1)\n"
22807 - " movq 16(%0), %%mm2\n"
22808 - " movntq %%mm2, 16(%1)\n"
22809 - " movq 24(%0), %%mm3\n"
22810 - " movntq %%mm3, 24(%1)\n"
22811 - " movq 32(%0), %%mm4\n"
22812 - " movntq %%mm4, 32(%1)\n"
22813 - " movq 40(%0), %%mm5\n"
22814 - " movntq %%mm5, 40(%1)\n"
22815 - " movq 48(%0), %%mm6\n"
22816 - " movntq %%mm6, 48(%1)\n"
22817 - " movq 56(%0), %%mm7\n"
22818 - " movntq %%mm7, 56(%1)\n"
22819 + "1: prefetch 320(%1)\n"
22820 + "2: movq (%1), %%mm0\n"
22821 + " movntq %%mm0, (%2)\n"
22822 + " movq 8(%1), %%mm1\n"
22823 + " movntq %%mm1, 8(%2)\n"
22824 + " movq 16(%1), %%mm2\n"
22825 + " movntq %%mm2, 16(%2)\n"
22826 + " movq 24(%1), %%mm3\n"
22827 + " movntq %%mm3, 24(%2)\n"
22828 + " movq 32(%1), %%mm4\n"
22829 + " movntq %%mm4, 32(%2)\n"
22830 + " movq 40(%1), %%mm5\n"
22831 + " movntq %%mm5, 40(%2)\n"
22832 + " movq 48(%1), %%mm6\n"
22833 + " movntq %%mm6, 48(%2)\n"
22834 + " movq 56(%1), %%mm7\n"
22835 + " movntq %%mm7, 56(%2)\n"
22836 ".section .fixup, \"ax\"\n"
22837 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22838 + "3:\n"
22839 +
22840 +#ifdef CONFIG_PAX_KERNEXEC
22841 + " movl %%cr0, %0\n"
22842 + " movl %0, %%eax\n"
22843 + " andl $0xFFFEFFFF, %%eax\n"
22844 + " movl %%eax, %%cr0\n"
22845 +#endif
22846 +
22847 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22848 +
22849 +#ifdef CONFIG_PAX_KERNEXEC
22850 + " movl %0, %%cr0\n"
22851 +#endif
22852 +
22853 " jmp 2b\n"
22854 ".previous\n"
22855 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22856 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22857
22858 from += 64;
22859 to += 64;
22860 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22861 static void fast_copy_page(void *to, void *from)
22862 {
22863 int i;
22864 + unsigned long cr0;
22865
22866 kernel_fpu_begin();
22867
22868 __asm__ __volatile__ (
22869 - "1: prefetch (%0)\n"
22870 - " prefetch 64(%0)\n"
22871 - " prefetch 128(%0)\n"
22872 - " prefetch 192(%0)\n"
22873 - " prefetch 256(%0)\n"
22874 + "1: prefetch (%1)\n"
22875 + " prefetch 64(%1)\n"
22876 + " prefetch 128(%1)\n"
22877 + " prefetch 192(%1)\n"
22878 + " prefetch 256(%1)\n"
22879 "2: \n"
22880 ".section .fixup, \"ax\"\n"
22881 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22882 + "3: \n"
22883 +
22884 +#ifdef CONFIG_PAX_KERNEXEC
22885 + " movl %%cr0, %0\n"
22886 + " movl %0, %%eax\n"
22887 + " andl $0xFFFEFFFF, %%eax\n"
22888 + " movl %%eax, %%cr0\n"
22889 +#endif
22890 +
22891 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22892 +
22893 +#ifdef CONFIG_PAX_KERNEXEC
22894 + " movl %0, %%cr0\n"
22895 +#endif
22896 +
22897 " jmp 2b\n"
22898 ".previous\n"
22899 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22900 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22901
22902 for (i = 0; i < 4096/64; i++) {
22903 __asm__ __volatile__ (
22904 - "1: prefetch 320(%0)\n"
22905 - "2: movq (%0), %%mm0\n"
22906 - " movq 8(%0), %%mm1\n"
22907 - " movq 16(%0), %%mm2\n"
22908 - " movq 24(%0), %%mm3\n"
22909 - " movq %%mm0, (%1)\n"
22910 - " movq %%mm1, 8(%1)\n"
22911 - " movq %%mm2, 16(%1)\n"
22912 - " movq %%mm3, 24(%1)\n"
22913 - " movq 32(%0), %%mm0\n"
22914 - " movq 40(%0), %%mm1\n"
22915 - " movq 48(%0), %%mm2\n"
22916 - " movq 56(%0), %%mm3\n"
22917 - " movq %%mm0, 32(%1)\n"
22918 - " movq %%mm1, 40(%1)\n"
22919 - " movq %%mm2, 48(%1)\n"
22920 - " movq %%mm3, 56(%1)\n"
22921 + "1: prefetch 320(%1)\n"
22922 + "2: movq (%1), %%mm0\n"
22923 + " movq 8(%1), %%mm1\n"
22924 + " movq 16(%1), %%mm2\n"
22925 + " movq 24(%1), %%mm3\n"
22926 + " movq %%mm0, (%2)\n"
22927 + " movq %%mm1, 8(%2)\n"
22928 + " movq %%mm2, 16(%2)\n"
22929 + " movq %%mm3, 24(%2)\n"
22930 + " movq 32(%1), %%mm0\n"
22931 + " movq 40(%1), %%mm1\n"
22932 + " movq 48(%1), %%mm2\n"
22933 + " movq 56(%1), %%mm3\n"
22934 + " movq %%mm0, 32(%2)\n"
22935 + " movq %%mm1, 40(%2)\n"
22936 + " movq %%mm2, 48(%2)\n"
22937 + " movq %%mm3, 56(%2)\n"
22938 ".section .fixup, \"ax\"\n"
22939 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22940 + "3:\n"
22941 +
22942 +#ifdef CONFIG_PAX_KERNEXEC
22943 + " movl %%cr0, %0\n"
22944 + " movl %0, %%eax\n"
22945 + " andl $0xFFFEFFFF, %%eax\n"
22946 + " movl %%eax, %%cr0\n"
22947 +#endif
22948 +
22949 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22950 +
22951 +#ifdef CONFIG_PAX_KERNEXEC
22952 + " movl %0, %%cr0\n"
22953 +#endif
22954 +
22955 " jmp 2b\n"
22956 ".previous\n"
22957 _ASM_EXTABLE(1b, 3b)
22958 - : : "r" (from), "r" (to) : "memory");
22959 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22960
22961 from += 64;
22962 to += 64;
22963 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22964 index 69fa106..adda88b 100644
22965 --- a/arch/x86/lib/msr-reg.S
22966 +++ b/arch/x86/lib/msr-reg.S
22967 @@ -3,6 +3,7 @@
22968 #include <asm/dwarf2.h>
22969 #include <asm/asm.h>
22970 #include <asm/msr.h>
22971 +#include <asm/alternative-asm.h>
22972
22973 #ifdef CONFIG_X86_64
22974 /*
22975 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22976 CFI_STARTPROC
22977 pushq_cfi %rbx
22978 pushq_cfi %rbp
22979 - movq %rdi, %r10 /* Save pointer */
22980 + movq %rdi, %r9 /* Save pointer */
22981 xorl %r11d, %r11d /* Return value */
22982 movl (%rdi), %eax
22983 movl 4(%rdi), %ecx
22984 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22985 movl 28(%rdi), %edi
22986 CFI_REMEMBER_STATE
22987 1: \op
22988 -2: movl %eax, (%r10)
22989 +2: movl %eax, (%r9)
22990 movl %r11d, %eax /* Return value */
22991 - movl %ecx, 4(%r10)
22992 - movl %edx, 8(%r10)
22993 - movl %ebx, 12(%r10)
22994 - movl %ebp, 20(%r10)
22995 - movl %esi, 24(%r10)
22996 - movl %edi, 28(%r10)
22997 + movl %ecx, 4(%r9)
22998 + movl %edx, 8(%r9)
22999 + movl %ebx, 12(%r9)
23000 + movl %ebp, 20(%r9)
23001 + movl %esi, 24(%r9)
23002 + movl %edi, 28(%r9)
23003 popq_cfi %rbp
23004 popq_cfi %rbx
23005 + pax_force_retaddr
23006 ret
23007 3:
23008 CFI_RESTORE_STATE
23009 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
23010 index 36b0d15..d381858 100644
23011 --- a/arch/x86/lib/putuser.S
23012 +++ b/arch/x86/lib/putuser.S
23013 @@ -15,7 +15,9 @@
23014 #include <asm/thread_info.h>
23015 #include <asm/errno.h>
23016 #include <asm/asm.h>
23017 -
23018 +#include <asm/segment.h>
23019 +#include <asm/pgtable.h>
23020 +#include <asm/alternative-asm.h>
23021
23022 /*
23023 * __put_user_X
23024 @@ -29,52 +31,119 @@
23025 * as they get called from within inline assembly.
23026 */
23027
23028 -#define ENTER CFI_STARTPROC ; \
23029 - GET_THREAD_INFO(%_ASM_BX)
23030 -#define EXIT ret ; \
23031 +#define ENTER CFI_STARTPROC
23032 +#define EXIT pax_force_retaddr; ret ; \
23033 CFI_ENDPROC
23034
23035 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23036 +#define _DEST %_ASM_CX,%_ASM_BX
23037 +#else
23038 +#define _DEST %_ASM_CX
23039 +#endif
23040 +
23041 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23042 +#define __copyuser_seg gs;
23043 +#else
23044 +#define __copyuser_seg
23045 +#endif
23046 +
23047 .text
23048 ENTRY(__put_user_1)
23049 ENTER
23050 +
23051 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23052 + GET_THREAD_INFO(%_ASM_BX)
23053 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
23054 jae bad_put_user
23055 -1: movb %al,(%_ASM_CX)
23056 +
23057 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23058 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23059 + cmp %_ASM_BX,%_ASM_CX
23060 + jb 1234f
23061 + xor %ebx,%ebx
23062 +1234:
23063 +#endif
23064 +
23065 +#endif
23066 +
23067 +1: __copyuser_seg movb %al,(_DEST)
23068 xor %eax,%eax
23069 EXIT
23070 ENDPROC(__put_user_1)
23071
23072 ENTRY(__put_user_2)
23073 ENTER
23074 +
23075 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23076 + GET_THREAD_INFO(%_ASM_BX)
23077 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23078 sub $1,%_ASM_BX
23079 cmp %_ASM_BX,%_ASM_CX
23080 jae bad_put_user
23081 -2: movw %ax,(%_ASM_CX)
23082 +
23083 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23084 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23085 + cmp %_ASM_BX,%_ASM_CX
23086 + jb 1234f
23087 + xor %ebx,%ebx
23088 +1234:
23089 +#endif
23090 +
23091 +#endif
23092 +
23093 +2: __copyuser_seg movw %ax,(_DEST)
23094 xor %eax,%eax
23095 EXIT
23096 ENDPROC(__put_user_2)
23097
23098 ENTRY(__put_user_4)
23099 ENTER
23100 +
23101 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23102 + GET_THREAD_INFO(%_ASM_BX)
23103 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23104 sub $3,%_ASM_BX
23105 cmp %_ASM_BX,%_ASM_CX
23106 jae bad_put_user
23107 -3: movl %eax,(%_ASM_CX)
23108 +
23109 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23110 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23111 + cmp %_ASM_BX,%_ASM_CX
23112 + jb 1234f
23113 + xor %ebx,%ebx
23114 +1234:
23115 +#endif
23116 +
23117 +#endif
23118 +
23119 +3: __copyuser_seg movl %eax,(_DEST)
23120 xor %eax,%eax
23121 EXIT
23122 ENDPROC(__put_user_4)
23123
23124 ENTRY(__put_user_8)
23125 ENTER
23126 +
23127 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23128 + GET_THREAD_INFO(%_ASM_BX)
23129 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23130 sub $7,%_ASM_BX
23131 cmp %_ASM_BX,%_ASM_CX
23132 jae bad_put_user
23133 -4: mov %_ASM_AX,(%_ASM_CX)
23134 +
23135 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23136 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23137 + cmp %_ASM_BX,%_ASM_CX
23138 + jb 1234f
23139 + xor %ebx,%ebx
23140 +1234:
23141 +#endif
23142 +
23143 +#endif
23144 +
23145 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
23146 #ifdef CONFIG_X86_32
23147 -5: movl %edx,4(%_ASM_CX)
23148 +5: __copyuser_seg movl %edx,4(_DEST)
23149 #endif
23150 xor %eax,%eax
23151 EXIT
23152 diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
23153 index 05ea55f..6345b9a 100644
23154 --- a/arch/x86/lib/rwlock_64.S
23155 +++ b/arch/x86/lib/rwlock_64.S
23156 @@ -2,6 +2,7 @@
23157
23158 #include <linux/linkage.h>
23159 #include <asm/rwlock.h>
23160 +#include <asm/asm.h>
23161 #include <asm/alternative-asm.h>
23162 #include <asm/dwarf2.h>
23163
23164 @@ -10,13 +11,34 @@ ENTRY(__write_lock_failed)
23165 CFI_STARTPROC
23166 LOCK_PREFIX
23167 addl $RW_LOCK_BIAS,(%rdi)
23168 +
23169 +#ifdef CONFIG_PAX_REFCOUNT
23170 + jno 1234f
23171 + LOCK_PREFIX
23172 + subl $RW_LOCK_BIAS,(%rdi)
23173 + int $4
23174 +1234:
23175 + _ASM_EXTABLE(1234b, 1234b)
23176 +#endif
23177 +
23178 1: rep
23179 nop
23180 cmpl $RW_LOCK_BIAS,(%rdi)
23181 jne 1b
23182 LOCK_PREFIX
23183 subl $RW_LOCK_BIAS,(%rdi)
23184 +
23185 +#ifdef CONFIG_PAX_REFCOUNT
23186 + jno 1234f
23187 + LOCK_PREFIX
23188 + addl $RW_LOCK_BIAS,(%rdi)
23189 + int $4
23190 +1234:
23191 + _ASM_EXTABLE(1234b, 1234b)
23192 +#endif
23193 +
23194 jnz __write_lock_failed
23195 + pax_force_retaddr
23196 ret
23197 CFI_ENDPROC
23198 END(__write_lock_failed)
23199 @@ -26,13 +48,34 @@ ENTRY(__read_lock_failed)
23200 CFI_STARTPROC
23201 LOCK_PREFIX
23202 incl (%rdi)
23203 +
23204 +#ifdef CONFIG_PAX_REFCOUNT
23205 + jno 1234f
23206 + LOCK_PREFIX
23207 + decl (%rdi)
23208 + int $4
23209 +1234:
23210 + _ASM_EXTABLE(1234b, 1234b)
23211 +#endif
23212 +
23213 1: rep
23214 nop
23215 cmpl $1,(%rdi)
23216 js 1b
23217 LOCK_PREFIX
23218 decl (%rdi)
23219 +
23220 +#ifdef CONFIG_PAX_REFCOUNT
23221 + jno 1234f
23222 + LOCK_PREFIX
23223 + incl (%rdi)
23224 + int $4
23225 +1234:
23226 + _ASM_EXTABLE(1234b, 1234b)
23227 +#endif
23228 +
23229 js __read_lock_failed
23230 + pax_force_retaddr
23231 ret
23232 CFI_ENDPROC
23233 END(__read_lock_failed)
23234 diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
23235 index 15acecf..f768b10 100644
23236 --- a/arch/x86/lib/rwsem_64.S
23237 +++ b/arch/x86/lib/rwsem_64.S
23238 @@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
23239 call rwsem_down_read_failed
23240 popq %rdx
23241 restore_common_regs
23242 + pax_force_retaddr
23243 ret
23244 ENDPROC(call_rwsem_down_read_failed)
23245
23246 @@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
23247 movq %rax,%rdi
23248 call rwsem_down_write_failed
23249 restore_common_regs
23250 + pax_force_retaddr
23251 ret
23252 ENDPROC(call_rwsem_down_write_failed)
23253
23254 @@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
23255 movq %rax,%rdi
23256 call rwsem_wake
23257 restore_common_regs
23258 -1: ret
23259 +1: pax_force_retaddr
23260 + ret
23261 ENDPROC(call_rwsem_wake)
23262
23263 /* Fix up special calling conventions */
23264 @@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
23265 call rwsem_downgrade_wake
23266 popq %rdx
23267 restore_common_regs
23268 + pax_force_retaddr
23269 ret
23270 ENDPROC(call_rwsem_downgrade_wake)
23271 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
23272 index bf9a7d5..fb06ab5 100644
23273 --- a/arch/x86/lib/thunk_64.S
23274 +++ b/arch/x86/lib/thunk_64.S
23275 @@ -10,7 +10,8 @@
23276 #include <asm/dwarf2.h>
23277 #include <asm/calling.h>
23278 #include <asm/rwlock.h>
23279 -
23280 + #include <asm/alternative-asm.h>
23281 +
23282 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
23283 .macro thunk name,func
23284 .globl \name
23285 @@ -70,6 +71,7 @@
23286 SAVE_ARGS
23287 restore:
23288 RESTORE_ARGS
23289 + pax_force_retaddr
23290 ret
23291 CFI_ENDPROC
23292
23293 @@ -77,5 +79,6 @@ restore:
23294 SAVE_ARGS
23295 restore_norax:
23296 RESTORE_ARGS 1
23297 + pax_force_retaddr
23298 ret
23299 CFI_ENDPROC
23300 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
23301 index 1f118d4..ec4a953 100644
23302 --- a/arch/x86/lib/usercopy_32.c
23303 +++ b/arch/x86/lib/usercopy_32.c
23304 @@ -43,7 +43,7 @@ do { \
23305 __asm__ __volatile__( \
23306 " testl %1,%1\n" \
23307 " jz 2f\n" \
23308 - "0: lodsb\n" \
23309 + "0: "__copyuser_seg"lodsb\n" \
23310 " stosb\n" \
23311 " testb %%al,%%al\n" \
23312 " jz 1f\n" \
23313 @@ -128,10 +128,12 @@ do { \
23314 int __d0; \
23315 might_fault(); \
23316 __asm__ __volatile__( \
23317 + __COPYUSER_SET_ES \
23318 "0: rep; stosl\n" \
23319 " movl %2,%0\n" \
23320 "1: rep; stosb\n" \
23321 "2:\n" \
23322 + __COPYUSER_RESTORE_ES \
23323 ".section .fixup,\"ax\"\n" \
23324 "3: lea 0(%2,%0,4),%0\n" \
23325 " jmp 2b\n" \
23326 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
23327 might_fault();
23328
23329 __asm__ __volatile__(
23330 + __COPYUSER_SET_ES
23331 " testl %0, %0\n"
23332 " jz 3f\n"
23333 " andl %0,%%ecx\n"
23334 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
23335 " subl %%ecx,%0\n"
23336 " addl %0,%%eax\n"
23337 "1:\n"
23338 + __COPYUSER_RESTORE_ES
23339 ".section .fixup,\"ax\"\n"
23340 "2: xorl %%eax,%%eax\n"
23341 " jmp 1b\n"
23342 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
23343
23344 #ifdef CONFIG_X86_INTEL_USERCOPY
23345 static unsigned long
23346 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
23347 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
23348 {
23349 int d0, d1;
23350 __asm__ __volatile__(
23351 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23352 " .align 2,0x90\n"
23353 "3: movl 0(%4), %%eax\n"
23354 "4: movl 4(%4), %%edx\n"
23355 - "5: movl %%eax, 0(%3)\n"
23356 - "6: movl %%edx, 4(%3)\n"
23357 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
23358 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
23359 "7: movl 8(%4), %%eax\n"
23360 "8: movl 12(%4),%%edx\n"
23361 - "9: movl %%eax, 8(%3)\n"
23362 - "10: movl %%edx, 12(%3)\n"
23363 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
23364 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
23365 "11: movl 16(%4), %%eax\n"
23366 "12: movl 20(%4), %%edx\n"
23367 - "13: movl %%eax, 16(%3)\n"
23368 - "14: movl %%edx, 20(%3)\n"
23369 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
23370 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
23371 "15: movl 24(%4), %%eax\n"
23372 "16: movl 28(%4), %%edx\n"
23373 - "17: movl %%eax, 24(%3)\n"
23374 - "18: movl %%edx, 28(%3)\n"
23375 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
23376 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
23377 "19: movl 32(%4), %%eax\n"
23378 "20: movl 36(%4), %%edx\n"
23379 - "21: movl %%eax, 32(%3)\n"
23380 - "22: movl %%edx, 36(%3)\n"
23381 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23382 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
23383 "23: movl 40(%4), %%eax\n"
23384 "24: movl 44(%4), %%edx\n"
23385 - "25: movl %%eax, 40(%3)\n"
23386 - "26: movl %%edx, 44(%3)\n"
23387 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23388 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
23389 "27: movl 48(%4), %%eax\n"
23390 "28: movl 52(%4), %%edx\n"
23391 - "29: movl %%eax, 48(%3)\n"
23392 - "30: movl %%edx, 52(%3)\n"
23393 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
23394 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
23395 "31: movl 56(%4), %%eax\n"
23396 "32: movl 60(%4), %%edx\n"
23397 - "33: movl %%eax, 56(%3)\n"
23398 - "34: movl %%edx, 60(%3)\n"
23399 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
23400 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
23401 " addl $-64, %0\n"
23402 " addl $64, %4\n"
23403 " addl $64, %3\n"
23404 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23405 " shrl $2, %0\n"
23406 " andl $3, %%eax\n"
23407 " cld\n"
23408 + __COPYUSER_SET_ES
23409 "99: rep; movsl\n"
23410 "36: movl %%eax, %0\n"
23411 "37: rep; movsb\n"
23412 "100:\n"
23413 + __COPYUSER_RESTORE_ES
23414 + ".section .fixup,\"ax\"\n"
23415 + "101: lea 0(%%eax,%0,4),%0\n"
23416 + " jmp 100b\n"
23417 + ".previous\n"
23418 + ".section __ex_table,\"a\"\n"
23419 + " .align 4\n"
23420 + " .long 1b,100b\n"
23421 + " .long 2b,100b\n"
23422 + " .long 3b,100b\n"
23423 + " .long 4b,100b\n"
23424 + " .long 5b,100b\n"
23425 + " .long 6b,100b\n"
23426 + " .long 7b,100b\n"
23427 + " .long 8b,100b\n"
23428 + " .long 9b,100b\n"
23429 + " .long 10b,100b\n"
23430 + " .long 11b,100b\n"
23431 + " .long 12b,100b\n"
23432 + " .long 13b,100b\n"
23433 + " .long 14b,100b\n"
23434 + " .long 15b,100b\n"
23435 + " .long 16b,100b\n"
23436 + " .long 17b,100b\n"
23437 + " .long 18b,100b\n"
23438 + " .long 19b,100b\n"
23439 + " .long 20b,100b\n"
23440 + " .long 21b,100b\n"
23441 + " .long 22b,100b\n"
23442 + " .long 23b,100b\n"
23443 + " .long 24b,100b\n"
23444 + " .long 25b,100b\n"
23445 + " .long 26b,100b\n"
23446 + " .long 27b,100b\n"
23447 + " .long 28b,100b\n"
23448 + " .long 29b,100b\n"
23449 + " .long 30b,100b\n"
23450 + " .long 31b,100b\n"
23451 + " .long 32b,100b\n"
23452 + " .long 33b,100b\n"
23453 + " .long 34b,100b\n"
23454 + " .long 35b,100b\n"
23455 + " .long 36b,100b\n"
23456 + " .long 37b,100b\n"
23457 + " .long 99b,101b\n"
23458 + ".previous"
23459 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
23460 + : "1"(to), "2"(from), "0"(size)
23461 + : "eax", "edx", "memory");
23462 + return size;
23463 +}
23464 +
23465 +static unsigned long
23466 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23467 +{
23468 + int d0, d1;
23469 + __asm__ __volatile__(
23470 + " .align 2,0x90\n"
23471 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23472 + " cmpl $67, %0\n"
23473 + " jbe 3f\n"
23474 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23475 + " .align 2,0x90\n"
23476 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23477 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23478 + "5: movl %%eax, 0(%3)\n"
23479 + "6: movl %%edx, 4(%3)\n"
23480 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23481 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23482 + "9: movl %%eax, 8(%3)\n"
23483 + "10: movl %%edx, 12(%3)\n"
23484 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23485 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23486 + "13: movl %%eax, 16(%3)\n"
23487 + "14: movl %%edx, 20(%3)\n"
23488 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23489 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23490 + "17: movl %%eax, 24(%3)\n"
23491 + "18: movl %%edx, 28(%3)\n"
23492 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23493 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23494 + "21: movl %%eax, 32(%3)\n"
23495 + "22: movl %%edx, 36(%3)\n"
23496 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23497 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23498 + "25: movl %%eax, 40(%3)\n"
23499 + "26: movl %%edx, 44(%3)\n"
23500 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23501 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23502 + "29: movl %%eax, 48(%3)\n"
23503 + "30: movl %%edx, 52(%3)\n"
23504 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23505 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23506 + "33: movl %%eax, 56(%3)\n"
23507 + "34: movl %%edx, 60(%3)\n"
23508 + " addl $-64, %0\n"
23509 + " addl $64, %4\n"
23510 + " addl $64, %3\n"
23511 + " cmpl $63, %0\n"
23512 + " ja 1b\n"
23513 + "35: movl %0, %%eax\n"
23514 + " shrl $2, %0\n"
23515 + " andl $3, %%eax\n"
23516 + " cld\n"
23517 + "99: rep; "__copyuser_seg" movsl\n"
23518 + "36: movl %%eax, %0\n"
23519 + "37: rep; "__copyuser_seg" movsb\n"
23520 + "100:\n"
23521 ".section .fixup,\"ax\"\n"
23522 "101: lea 0(%%eax,%0,4),%0\n"
23523 " jmp 100b\n"
23524 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23525 int d0, d1;
23526 __asm__ __volatile__(
23527 " .align 2,0x90\n"
23528 - "0: movl 32(%4), %%eax\n"
23529 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23530 " cmpl $67, %0\n"
23531 " jbe 2f\n"
23532 - "1: movl 64(%4), %%eax\n"
23533 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23534 " .align 2,0x90\n"
23535 - "2: movl 0(%4), %%eax\n"
23536 - "21: movl 4(%4), %%edx\n"
23537 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23538 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23539 " movl %%eax, 0(%3)\n"
23540 " movl %%edx, 4(%3)\n"
23541 - "3: movl 8(%4), %%eax\n"
23542 - "31: movl 12(%4),%%edx\n"
23543 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23544 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23545 " movl %%eax, 8(%3)\n"
23546 " movl %%edx, 12(%3)\n"
23547 - "4: movl 16(%4), %%eax\n"
23548 - "41: movl 20(%4), %%edx\n"
23549 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23550 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23551 " movl %%eax, 16(%3)\n"
23552 " movl %%edx, 20(%3)\n"
23553 - "10: movl 24(%4), %%eax\n"
23554 - "51: movl 28(%4), %%edx\n"
23555 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23556 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23557 " movl %%eax, 24(%3)\n"
23558 " movl %%edx, 28(%3)\n"
23559 - "11: movl 32(%4), %%eax\n"
23560 - "61: movl 36(%4), %%edx\n"
23561 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23562 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23563 " movl %%eax, 32(%3)\n"
23564 " movl %%edx, 36(%3)\n"
23565 - "12: movl 40(%4), %%eax\n"
23566 - "71: movl 44(%4), %%edx\n"
23567 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23568 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23569 " movl %%eax, 40(%3)\n"
23570 " movl %%edx, 44(%3)\n"
23571 - "13: movl 48(%4), %%eax\n"
23572 - "81: movl 52(%4), %%edx\n"
23573 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23574 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23575 " movl %%eax, 48(%3)\n"
23576 " movl %%edx, 52(%3)\n"
23577 - "14: movl 56(%4), %%eax\n"
23578 - "91: movl 60(%4), %%edx\n"
23579 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23580 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23581 " movl %%eax, 56(%3)\n"
23582 " movl %%edx, 60(%3)\n"
23583 " addl $-64, %0\n"
23584 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23585 " shrl $2, %0\n"
23586 " andl $3, %%eax\n"
23587 " cld\n"
23588 - "6: rep; movsl\n"
23589 + "6: rep; "__copyuser_seg" movsl\n"
23590 " movl %%eax,%0\n"
23591 - "7: rep; movsb\n"
23592 + "7: rep; "__copyuser_seg" movsb\n"
23593 "8:\n"
23594 ".section .fixup,\"ax\"\n"
23595 "9: lea 0(%%eax,%0,4),%0\n"
23596 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23597
23598 __asm__ __volatile__(
23599 " .align 2,0x90\n"
23600 - "0: movl 32(%4), %%eax\n"
23601 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23602 " cmpl $67, %0\n"
23603 " jbe 2f\n"
23604 - "1: movl 64(%4), %%eax\n"
23605 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23606 " .align 2,0x90\n"
23607 - "2: movl 0(%4), %%eax\n"
23608 - "21: movl 4(%4), %%edx\n"
23609 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23610 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23611 " movnti %%eax, 0(%3)\n"
23612 " movnti %%edx, 4(%3)\n"
23613 - "3: movl 8(%4), %%eax\n"
23614 - "31: movl 12(%4),%%edx\n"
23615 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23616 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23617 " movnti %%eax, 8(%3)\n"
23618 " movnti %%edx, 12(%3)\n"
23619 - "4: movl 16(%4), %%eax\n"
23620 - "41: movl 20(%4), %%edx\n"
23621 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23622 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23623 " movnti %%eax, 16(%3)\n"
23624 " movnti %%edx, 20(%3)\n"
23625 - "10: movl 24(%4), %%eax\n"
23626 - "51: movl 28(%4), %%edx\n"
23627 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23628 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23629 " movnti %%eax, 24(%3)\n"
23630 " movnti %%edx, 28(%3)\n"
23631 - "11: movl 32(%4), %%eax\n"
23632 - "61: movl 36(%4), %%edx\n"
23633 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23634 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23635 " movnti %%eax, 32(%3)\n"
23636 " movnti %%edx, 36(%3)\n"
23637 - "12: movl 40(%4), %%eax\n"
23638 - "71: movl 44(%4), %%edx\n"
23639 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23640 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23641 " movnti %%eax, 40(%3)\n"
23642 " movnti %%edx, 44(%3)\n"
23643 - "13: movl 48(%4), %%eax\n"
23644 - "81: movl 52(%4), %%edx\n"
23645 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23646 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23647 " movnti %%eax, 48(%3)\n"
23648 " movnti %%edx, 52(%3)\n"
23649 - "14: movl 56(%4), %%eax\n"
23650 - "91: movl 60(%4), %%edx\n"
23651 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23652 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23653 " movnti %%eax, 56(%3)\n"
23654 " movnti %%edx, 60(%3)\n"
23655 " addl $-64, %0\n"
23656 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23657 " shrl $2, %0\n"
23658 " andl $3, %%eax\n"
23659 " cld\n"
23660 - "6: rep; movsl\n"
23661 + "6: rep; "__copyuser_seg" movsl\n"
23662 " movl %%eax,%0\n"
23663 - "7: rep; movsb\n"
23664 + "7: rep; "__copyuser_seg" movsb\n"
23665 "8:\n"
23666 ".section .fixup,\"ax\"\n"
23667 "9: lea 0(%%eax,%0,4),%0\n"
23668 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
23669
23670 __asm__ __volatile__(
23671 " .align 2,0x90\n"
23672 - "0: movl 32(%4), %%eax\n"
23673 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23674 " cmpl $67, %0\n"
23675 " jbe 2f\n"
23676 - "1: movl 64(%4), %%eax\n"
23677 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23678 " .align 2,0x90\n"
23679 - "2: movl 0(%4), %%eax\n"
23680 - "21: movl 4(%4), %%edx\n"
23681 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23682 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23683 " movnti %%eax, 0(%3)\n"
23684 " movnti %%edx, 4(%3)\n"
23685 - "3: movl 8(%4), %%eax\n"
23686 - "31: movl 12(%4),%%edx\n"
23687 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23688 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23689 " movnti %%eax, 8(%3)\n"
23690 " movnti %%edx, 12(%3)\n"
23691 - "4: movl 16(%4), %%eax\n"
23692 - "41: movl 20(%4), %%edx\n"
23693 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23694 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23695 " movnti %%eax, 16(%3)\n"
23696 " movnti %%edx, 20(%3)\n"
23697 - "10: movl 24(%4), %%eax\n"
23698 - "51: movl 28(%4), %%edx\n"
23699 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23700 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23701 " movnti %%eax, 24(%3)\n"
23702 " movnti %%edx, 28(%3)\n"
23703 - "11: movl 32(%4), %%eax\n"
23704 - "61: movl 36(%4), %%edx\n"
23705 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23706 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23707 " movnti %%eax, 32(%3)\n"
23708 " movnti %%edx, 36(%3)\n"
23709 - "12: movl 40(%4), %%eax\n"
23710 - "71: movl 44(%4), %%edx\n"
23711 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23712 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23713 " movnti %%eax, 40(%3)\n"
23714 " movnti %%edx, 44(%3)\n"
23715 - "13: movl 48(%4), %%eax\n"
23716 - "81: movl 52(%4), %%edx\n"
23717 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23718 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23719 " movnti %%eax, 48(%3)\n"
23720 " movnti %%edx, 52(%3)\n"
23721 - "14: movl 56(%4), %%eax\n"
23722 - "91: movl 60(%4), %%edx\n"
23723 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23724 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23725 " movnti %%eax, 56(%3)\n"
23726 " movnti %%edx, 60(%3)\n"
23727 " addl $-64, %0\n"
23728 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23729 " shrl $2, %0\n"
23730 " andl $3, %%eax\n"
23731 " cld\n"
23732 - "6: rep; movsl\n"
23733 + "6: rep; "__copyuser_seg" movsl\n"
23734 " movl %%eax,%0\n"
23735 - "7: rep; movsb\n"
23736 + "7: rep; "__copyuser_seg" movsb\n"
23737 "8:\n"
23738 ".section .fixup,\"ax\"\n"
23739 "9: lea 0(%%eax,%0,4),%0\n"
23740 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23741 */
23742 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23743 unsigned long size);
23744 -unsigned long __copy_user_intel(void __user *to, const void *from,
23745 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23746 + unsigned long size);
23747 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23748 unsigned long size);
23749 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23750 const void __user *from, unsigned long size);
23751 #endif /* CONFIG_X86_INTEL_USERCOPY */
23752
23753 /* Generic arbitrary sized copy. */
23754 -#define __copy_user(to, from, size) \
23755 +#define __copy_user(to, from, size, prefix, set, restore) \
23756 do { \
23757 int __d0, __d1, __d2; \
23758 __asm__ __volatile__( \
23759 + set \
23760 " cmp $7,%0\n" \
23761 " jbe 1f\n" \
23762 " movl %1,%0\n" \
23763 " negl %0\n" \
23764 " andl $7,%0\n" \
23765 " subl %0,%3\n" \
23766 - "4: rep; movsb\n" \
23767 + "4: rep; "prefix"movsb\n" \
23768 " movl %3,%0\n" \
23769 " shrl $2,%0\n" \
23770 " andl $3,%3\n" \
23771 " .align 2,0x90\n" \
23772 - "0: rep; movsl\n" \
23773 + "0: rep; "prefix"movsl\n" \
23774 " movl %3,%0\n" \
23775 - "1: rep; movsb\n" \
23776 + "1: rep; "prefix"movsb\n" \
23777 "2:\n" \
23778 + restore \
23779 ".section .fixup,\"ax\"\n" \
23780 "5: addl %3,%0\n" \
23781 " jmp 2b\n" \
23782 @@ -682,14 +799,14 @@ do { \
23783 " negl %0\n" \
23784 " andl $7,%0\n" \
23785 " subl %0,%3\n" \
23786 - "4: rep; movsb\n" \
23787 + "4: rep; "__copyuser_seg"movsb\n" \
23788 " movl %3,%0\n" \
23789 " shrl $2,%0\n" \
23790 " andl $3,%3\n" \
23791 " .align 2,0x90\n" \
23792 - "0: rep; movsl\n" \
23793 + "0: rep; "__copyuser_seg"movsl\n" \
23794 " movl %3,%0\n" \
23795 - "1: rep; movsb\n" \
23796 + "1: rep; "__copyuser_seg"movsb\n" \
23797 "2:\n" \
23798 ".section .fixup,\"ax\"\n" \
23799 "5: addl %3,%0\n" \
23800 @@ -775,9 +892,9 @@ survive:
23801 }
23802 #endif
23803 if (movsl_is_ok(to, from, n))
23804 - __copy_user(to, from, n);
23805 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23806 else
23807 - n = __copy_user_intel(to, from, n);
23808 + n = __generic_copy_to_user_intel(to, from, n);
23809 return n;
23810 }
23811 EXPORT_SYMBOL(__copy_to_user_ll);
23812 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23813 unsigned long n)
23814 {
23815 if (movsl_is_ok(to, from, n))
23816 - __copy_user(to, from, n);
23817 + __copy_user(to, from, n, __copyuser_seg, "", "");
23818 else
23819 - n = __copy_user_intel((void __user *)to,
23820 - (const void *)from, n);
23821 + n = __generic_copy_from_user_intel(to, from, n);
23822 return n;
23823 }
23824 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23825 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23826 if (n > 64 && cpu_has_xmm2)
23827 n = __copy_user_intel_nocache(to, from, n);
23828 else
23829 - __copy_user(to, from, n);
23830 + __copy_user(to, from, n, __copyuser_seg, "", "");
23831 #else
23832 - __copy_user(to, from, n);
23833 + __copy_user(to, from, n, __copyuser_seg, "", "");
23834 #endif
23835 return n;
23836 }
23837 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23838
23839 -/**
23840 - * copy_to_user: - Copy a block of data into user space.
23841 - * @to: Destination address, in user space.
23842 - * @from: Source address, in kernel space.
23843 - * @n: Number of bytes to copy.
23844 - *
23845 - * Context: User context only. This function may sleep.
23846 - *
23847 - * Copy data from kernel space to user space.
23848 - *
23849 - * Returns number of bytes that could not be copied.
23850 - * On success, this will be zero.
23851 - */
23852 -unsigned long
23853 -copy_to_user(void __user *to, const void *from, unsigned long n)
23854 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23855 +void __set_fs(mm_segment_t x)
23856 {
23857 - if (access_ok(VERIFY_WRITE, to, n))
23858 - n = __copy_to_user(to, from, n);
23859 - return n;
23860 + switch (x.seg) {
23861 + case 0:
23862 + loadsegment(gs, 0);
23863 + break;
23864 + case TASK_SIZE_MAX:
23865 + loadsegment(gs, __USER_DS);
23866 + break;
23867 + case -1UL:
23868 + loadsegment(gs, __KERNEL_DS);
23869 + break;
23870 + default:
23871 + BUG();
23872 + }
23873 + return;
23874 }
23875 -EXPORT_SYMBOL(copy_to_user);
23876 +EXPORT_SYMBOL(__set_fs);
23877
23878 -/**
23879 - * copy_from_user: - Copy a block of data from user space.
23880 - * @to: Destination address, in kernel space.
23881 - * @from: Source address, in user space.
23882 - * @n: Number of bytes to copy.
23883 - *
23884 - * Context: User context only. This function may sleep.
23885 - *
23886 - * Copy data from user space to kernel space.
23887 - *
23888 - * Returns number of bytes that could not be copied.
23889 - * On success, this will be zero.
23890 - *
23891 - * If some data could not be copied, this function will pad the copied
23892 - * data to the requested size using zero bytes.
23893 - */
23894 -unsigned long
23895 -copy_from_user(void *to, const void __user *from, unsigned long n)
23896 +void set_fs(mm_segment_t x)
23897 {
23898 - if (access_ok(VERIFY_READ, from, n))
23899 - n = __copy_from_user(to, from, n);
23900 - else
23901 - memset(to, 0, n);
23902 - return n;
23903 + current_thread_info()->addr_limit = x;
23904 + __set_fs(x);
23905 }
23906 -EXPORT_SYMBOL(copy_from_user);
23907 +EXPORT_SYMBOL(set_fs);
23908 +#endif
23909 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23910 index b7c2849..8633ad8 100644
23911 --- a/arch/x86/lib/usercopy_64.c
23912 +++ b/arch/x86/lib/usercopy_64.c
23913 @@ -42,6 +42,12 @@ long
23914 __strncpy_from_user(char *dst, const char __user *src, long count)
23915 {
23916 long res;
23917 +
23918 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23919 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23920 + src += PAX_USER_SHADOW_BASE;
23921 +#endif
23922 +
23923 __do_strncpy_from_user(dst, src, count, res);
23924 return res;
23925 }
23926 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23927 {
23928 long __d0;
23929 might_fault();
23930 +
23931 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23932 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23933 + addr += PAX_USER_SHADOW_BASE;
23934 +#endif
23935 +
23936 /* no memory constraint because it doesn't change any memory gcc knows
23937 about */
23938 asm volatile(
23939 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
23940 }
23941 EXPORT_SYMBOL(strlen_user);
23942
23943 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23944 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23945 {
23946 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23947 - return copy_user_generic((__force void *)to, (__force void *)from, len);
23948 - }
23949 - return len;
23950 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23951 +
23952 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23953 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23954 + to += PAX_USER_SHADOW_BASE;
23955 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23956 + from += PAX_USER_SHADOW_BASE;
23957 +#endif
23958 +
23959 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23960 + }
23961 + return len;
23962 }
23963 EXPORT_SYMBOL(copy_in_user);
23964
23965 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23966 * it is not necessary to optimize tail handling.
23967 */
23968 unsigned long
23969 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23970 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23971 {
23972 char c;
23973 unsigned zero_len;
23974 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23975 index 61b41ca..5fef66a 100644
23976 --- a/arch/x86/mm/extable.c
23977 +++ b/arch/x86/mm/extable.c
23978 @@ -1,14 +1,71 @@
23979 #include <linux/module.h>
23980 #include <linux/spinlock.h>
23981 +#include <linux/sort.h>
23982 #include <asm/uaccess.h>
23983 +#include <asm/pgtable.h>
23984
23985 +/*
23986 + * The exception table needs to be sorted so that the binary
23987 + * search that we use to find entries in it works properly.
23988 + * This is used both for the kernel exception table and for
23989 + * the exception tables of modules that get loaded.
23990 + */
23991 +static int cmp_ex(const void *a, const void *b)
23992 +{
23993 + const struct exception_table_entry *x = a, *y = b;
23994 +
23995 + /* avoid overflow */
23996 + if (x->insn > y->insn)
23997 + return 1;
23998 + if (x->insn < y->insn)
23999 + return -1;
24000 + return 0;
24001 +}
24002 +
24003 +static void swap_ex(void *a, void *b, int size)
24004 +{
24005 + struct exception_table_entry t, *x = a, *y = b;
24006 +
24007 + t = *x;
24008 +
24009 + pax_open_kernel();
24010 + *x = *y;
24011 + *y = t;
24012 + pax_close_kernel();
24013 +}
24014 +
24015 +void sort_extable(struct exception_table_entry *start,
24016 + struct exception_table_entry *finish)
24017 +{
24018 + sort(start, finish - start, sizeof(struct exception_table_entry),
24019 + cmp_ex, swap_ex);
24020 +}
24021 +
24022 +#ifdef CONFIG_MODULES
24023 +/*
24024 + * If the exception table is sorted, any referring to the module init
24025 + * will be at the beginning or the end.
24026 + */
24027 +void trim_init_extable(struct module *m)
24028 +{
24029 + /*trim the beginning*/
24030 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
24031 + m->extable++;
24032 + m->num_exentries--;
24033 + }
24034 + /*trim the end*/
24035 + while (m->num_exentries &&
24036 + within_module_init(m->extable[m->num_exentries-1].insn, m))
24037 + m->num_exentries--;
24038 +}
24039 +#endif /* CONFIG_MODULES */
24040
24041 int fixup_exception(struct pt_regs *regs)
24042 {
24043 const struct exception_table_entry *fixup;
24044
24045 #ifdef CONFIG_PNPBIOS
24046 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
24047 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
24048 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
24049 extern u32 pnp_bios_is_utter_crap;
24050 pnp_bios_is_utter_crap = 1;
24051 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
24052 index 8ac0d76..ca501e2 100644
24053 --- a/arch/x86/mm/fault.c
24054 +++ b/arch/x86/mm/fault.c
24055 @@ -11,10 +11,19 @@
24056 #include <linux/kprobes.h> /* __kprobes, ... */
24057 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
24058 #include <linux/perf_event.h> /* perf_sw_event */
24059 +#include <linux/unistd.h>
24060 +#include <linux/compiler.h>
24061
24062 #include <asm/traps.h> /* dotraplinkage, ... */
24063 #include <asm/pgalloc.h> /* pgd_*(), ... */
24064 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
24065 +#include <asm/vsyscall.h>
24066 +#include <asm/tlbflush.h>
24067 +
24068 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24069 +#include <asm/stacktrace.h>
24070 +#include "../kernel/dumpstack.h"
24071 +#endif
24072
24073 /*
24074 * Page fault error code bits:
24075 @@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
24076 int ret = 0;
24077
24078 /* kprobe_running() needs smp_processor_id() */
24079 - if (kprobes_built_in() && !user_mode_vm(regs)) {
24080 + if (kprobes_built_in() && !user_mode(regs)) {
24081 preempt_disable();
24082 if (kprobe_running() && kprobe_fault_handler(regs, 14))
24083 ret = 1;
24084 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
24085 return !instr_lo || (instr_lo>>1) == 1;
24086 case 0x00:
24087 /* Prefetch instruction is 0x0F0D or 0x0F18 */
24088 - if (probe_kernel_address(instr, opcode))
24089 + if (user_mode(regs)) {
24090 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24091 + return 0;
24092 + } else if (probe_kernel_address(instr, opcode))
24093 return 0;
24094
24095 *prefetch = (instr_lo == 0xF) &&
24096 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
24097 while (instr < max_instr) {
24098 unsigned char opcode;
24099
24100 - if (probe_kernel_address(instr, opcode))
24101 + if (user_mode(regs)) {
24102 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24103 + break;
24104 + } else if (probe_kernel_address(instr, opcode))
24105 break;
24106
24107 instr++;
24108 @@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
24109 force_sig_info(si_signo, &info, tsk);
24110 }
24111
24112 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24113 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
24114 +#endif
24115 +
24116 +#ifdef CONFIG_PAX_EMUTRAMP
24117 +static int pax_handle_fetch_fault(struct pt_regs *regs);
24118 +#endif
24119 +
24120 +#ifdef CONFIG_PAX_PAGEEXEC
24121 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
24122 +{
24123 + pgd_t *pgd;
24124 + pud_t *pud;
24125 + pmd_t *pmd;
24126 +
24127 + pgd = pgd_offset(mm, address);
24128 + if (!pgd_present(*pgd))
24129 + return NULL;
24130 + pud = pud_offset(pgd, address);
24131 + if (!pud_present(*pud))
24132 + return NULL;
24133 + pmd = pmd_offset(pud, address);
24134 + if (!pmd_present(*pmd))
24135 + return NULL;
24136 + return pmd;
24137 +}
24138 +#endif
24139 +
24140 DEFINE_SPINLOCK(pgd_lock);
24141 LIST_HEAD(pgd_list);
24142
24143 @@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
24144 address += PMD_SIZE) {
24145
24146 unsigned long flags;
24147 +
24148 +#ifdef CONFIG_PAX_PER_CPU_PGD
24149 + unsigned long cpu;
24150 +#else
24151 struct page *page;
24152 +#endif
24153
24154 spin_lock_irqsave(&pgd_lock, flags);
24155 +
24156 +#ifdef CONFIG_PAX_PER_CPU_PGD
24157 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24158 + pgd_t *pgd = get_cpu_pgd(cpu);
24159 +#else
24160 list_for_each_entry(page, &pgd_list, lru) {
24161 - if (!vmalloc_sync_one(page_address(page), address))
24162 + pgd_t *pgd = page_address(page);
24163 +#endif
24164 +
24165 + if (!vmalloc_sync_one(pgd, address))
24166 break;
24167 }
24168 spin_unlock_irqrestore(&pgd_lock, flags);
24169 @@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
24170 * an interrupt in the middle of a task switch..
24171 */
24172 pgd_paddr = read_cr3();
24173 +
24174 +#ifdef CONFIG_PAX_PER_CPU_PGD
24175 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
24176 +#endif
24177 +
24178 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
24179 if (!pmd_k)
24180 return -1;
24181 @@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
24182
24183 const pgd_t *pgd_ref = pgd_offset_k(address);
24184 unsigned long flags;
24185 +
24186 +#ifdef CONFIG_PAX_PER_CPU_PGD
24187 + unsigned long cpu;
24188 +#else
24189 struct page *page;
24190 +#endif
24191
24192 if (pgd_none(*pgd_ref))
24193 continue;
24194
24195 spin_lock_irqsave(&pgd_lock, flags);
24196 +
24197 +#ifdef CONFIG_PAX_PER_CPU_PGD
24198 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24199 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
24200 +#else
24201 list_for_each_entry(page, &pgd_list, lru) {
24202 pgd_t *pgd;
24203 pgd = (pgd_t *)page_address(page) + pgd_index(address);
24204 +#endif
24205 +
24206 if (pgd_none(*pgd))
24207 set_pgd(pgd, *pgd_ref);
24208 else
24209 @@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
24210 * happen within a race in page table update. In the later
24211 * case just flush:
24212 */
24213 +
24214 +#ifdef CONFIG_PAX_PER_CPU_PGD
24215 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
24216 + pgd = pgd_offset_cpu(smp_processor_id(), address);
24217 +#else
24218 pgd = pgd_offset(current->active_mm, address);
24219 +#endif
24220 +
24221 pgd_ref = pgd_offset_k(address);
24222 if (pgd_none(*pgd_ref))
24223 return -1;
24224 @@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
24225 static int is_errata100(struct pt_regs *regs, unsigned long address)
24226 {
24227 #ifdef CONFIG_X86_64
24228 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
24229 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
24230 return 1;
24231 #endif
24232 return 0;
24233 @@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
24234 }
24235
24236 static const char nx_warning[] = KERN_CRIT
24237 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
24238 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
24239
24240 static void
24241 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24242 @@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24243 if (!oops_may_print())
24244 return;
24245
24246 - if (error_code & PF_INSTR) {
24247 + if (nx_enabled && (error_code & PF_INSTR)) {
24248 unsigned int level;
24249
24250 pte_t *pte = lookup_address(address, &level);
24251
24252 if (pte && pte_present(*pte) && !pte_exec(*pte))
24253 - printk(nx_warning, current_uid());
24254 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
24255 }
24256
24257 +#ifdef CONFIG_PAX_KERNEXEC
24258 + if (init_mm.start_code <= address && address < init_mm.end_code) {
24259 + if (current->signal->curr_ip)
24260 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24261 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
24262 + else
24263 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24264 + current->comm, task_pid_nr(current), current_uid(), current_euid());
24265 + }
24266 +#endif
24267 +
24268 printk(KERN_ALERT "BUG: unable to handle kernel ");
24269 if (address < PAGE_SIZE)
24270 printk(KERN_CONT "NULL pointer dereference");
24271 @@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24272 {
24273 struct task_struct *tsk = current;
24274
24275 +#ifdef CONFIG_X86_64
24276 + struct mm_struct *mm = tsk->mm;
24277 +
24278 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
24279 + if (regs->ip == (unsigned long)vgettimeofday) {
24280 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
24281 + return;
24282 + } else if (regs->ip == (unsigned long)vtime) {
24283 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
24284 + return;
24285 + } else if (regs->ip == (unsigned long)vgetcpu) {
24286 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
24287 + return;
24288 + }
24289 + }
24290 +#endif
24291 +
24292 /* User mode accesses just cause a SIGSEGV */
24293 if (error_code & PF_USER) {
24294 /*
24295 @@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24296 if (is_errata100(regs, address))
24297 return;
24298
24299 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24300 + if (pax_is_fetch_fault(regs, error_code, address)) {
24301 +
24302 +#ifdef CONFIG_PAX_EMUTRAMP
24303 + switch (pax_handle_fetch_fault(regs)) {
24304 + case 2:
24305 + return;
24306 + }
24307 +#endif
24308 +
24309 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24310 + do_group_exit(SIGKILL);
24311 + }
24312 +#endif
24313 +
24314 if (unlikely(show_unhandled_signals))
24315 show_signal_msg(regs, error_code, address, tsk);
24316
24317 @@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
24318 if (fault & VM_FAULT_HWPOISON) {
24319 printk(KERN_ERR
24320 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
24321 - tsk->comm, tsk->pid, address);
24322 + tsk->comm, task_pid_nr(tsk), address);
24323 code = BUS_MCEERR_AR;
24324 }
24325 #endif
24326 @@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
24327 return 1;
24328 }
24329
24330 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24331 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
24332 +{
24333 + pte_t *pte;
24334 + pmd_t *pmd;
24335 + spinlock_t *ptl;
24336 + unsigned char pte_mask;
24337 +
24338 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
24339 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
24340 + return 0;
24341 +
24342 + /* PaX: it's our fault, let's handle it if we can */
24343 +
24344 + /* PaX: take a look at read faults before acquiring any locks */
24345 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
24346 + /* instruction fetch attempt from a protected page in user mode */
24347 + up_read(&mm->mmap_sem);
24348 +
24349 +#ifdef CONFIG_PAX_EMUTRAMP
24350 + switch (pax_handle_fetch_fault(regs)) {
24351 + case 2:
24352 + return 1;
24353 + }
24354 +#endif
24355 +
24356 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24357 + do_group_exit(SIGKILL);
24358 + }
24359 +
24360 + pmd = pax_get_pmd(mm, address);
24361 + if (unlikely(!pmd))
24362 + return 0;
24363 +
24364 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
24365 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
24366 + pte_unmap_unlock(pte, ptl);
24367 + return 0;
24368 + }
24369 +
24370 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
24371 + /* write attempt to a protected page in user mode */
24372 + pte_unmap_unlock(pte, ptl);
24373 + return 0;
24374 + }
24375 +
24376 +#ifdef CONFIG_SMP
24377 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
24378 +#else
24379 + if (likely(address > get_limit(regs->cs)))
24380 +#endif
24381 + {
24382 + set_pte(pte, pte_mkread(*pte));
24383 + __flush_tlb_one(address);
24384 + pte_unmap_unlock(pte, ptl);
24385 + up_read(&mm->mmap_sem);
24386 + return 1;
24387 + }
24388 +
24389 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24390 +
24391 + /*
24392 + * PaX: fill DTLB with user rights and retry
24393 + */
24394 + __asm__ __volatile__ (
24395 + "orb %2,(%1)\n"
24396 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24397 +/*
24398 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24399 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24400 + * page fault when examined during a TLB load attempt. this is true not only
24401 + * for PTEs holding a non-present entry but also present entries that will
24402 + * raise a page fault (such as those set up by PaX, or the copy-on-write
24403 + * mechanism). in effect it means that we do *not* need to flush the TLBs
24404 + * for our target pages since their PTEs are simply not in the TLBs at all.
24405 +
24406 + * the best thing in omitting it is that we gain around 15-20% speed in the
24407 + * fast path of the page fault handler and can get rid of tracing since we
24408 + * can no longer flush unintended entries.
24409 + */
24410 + "invlpg (%0)\n"
24411 +#endif
24412 + __copyuser_seg"testb $0,(%0)\n"
24413 + "xorb %3,(%1)\n"
24414 + :
24415 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
24416 + : "memory", "cc");
24417 + pte_unmap_unlock(pte, ptl);
24418 + up_read(&mm->mmap_sem);
24419 + return 1;
24420 +}
24421 +#endif
24422 +
24423 /*
24424 * Handle a spurious fault caused by a stale TLB entry.
24425 *
24426 @@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
24427 static inline int
24428 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
24429 {
24430 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
24431 + return 1;
24432 +
24433 if (write) {
24434 /* write, present and write, not present: */
24435 if (unlikely(!(vma->vm_flags & VM_WRITE)))
24436 @@ -956,16 +1175,30 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24437 {
24438 struct vm_area_struct *vma;
24439 struct task_struct *tsk;
24440 - unsigned long address;
24441 struct mm_struct *mm;
24442 int write;
24443 int fault;
24444
24445 - tsk = current;
24446 - mm = tsk->mm;
24447 -
24448 /* Get the faulting address: */
24449 - address = read_cr2();
24450 + unsigned long address = read_cr2();
24451 +
24452 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24453 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
24454 + if (!search_exception_tables(regs->ip)) {
24455 + bad_area_nosemaphore(regs, error_code, address);
24456 + return;
24457 + }
24458 + if (address < PAX_USER_SHADOW_BASE) {
24459 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
24460 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
24461 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
24462 + } else
24463 + address -= PAX_USER_SHADOW_BASE;
24464 + }
24465 +#endif
24466 +
24467 + tsk = current;
24468 + mm = tsk->mm;
24469
24470 /*
24471 * Detect and handle instructions that would cause a page fault for
24472 @@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24473 * User-mode registers count as a user access even for any
24474 * potential system fault or CPU buglet:
24475 */
24476 - if (user_mode_vm(regs)) {
24477 + if (user_mode(regs)) {
24478 local_irq_enable();
24479 error_code |= PF_USER;
24480 } else {
24481 @@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24482 might_sleep();
24483 }
24484
24485 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24486 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24487 + return;
24488 +#endif
24489 +
24490 vma = find_vma(mm, address);
24491 if (unlikely(!vma)) {
24492 bad_area(regs, error_code, address);
24493 @@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24494 bad_area(regs, error_code, address);
24495 return;
24496 }
24497 - if (error_code & PF_USER) {
24498 - /*
24499 - * Accessing the stack below %sp is always a bug.
24500 - * The large cushion allows instructions like enter
24501 - * and pusha to work. ("enter $65535, $31" pushes
24502 - * 32 pointers and then decrements %sp by 65535.)
24503 - */
24504 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24505 - bad_area(regs, error_code, address);
24506 - return;
24507 - }
24508 + /*
24509 + * Accessing the stack below %sp is always a bug.
24510 + * The large cushion allows instructions like enter
24511 + * and pusha to work. ("enter $65535, $31" pushes
24512 + * 32 pointers and then decrements %sp by 65535.)
24513 + */
24514 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24515 + bad_area(regs, error_code, address);
24516 + return;
24517 }
24518 +
24519 +#ifdef CONFIG_PAX_SEGMEXEC
24520 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24521 + bad_area(regs, error_code, address);
24522 + return;
24523 + }
24524 +#endif
24525 +
24526 if (unlikely(expand_stack(vma, address))) {
24527 bad_area(regs, error_code, address);
24528 return;
24529 @@ -1146,3 +1390,292 @@ good_area:
24530
24531 up_read(&mm->mmap_sem);
24532 }
24533 +
24534 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24535 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24536 +{
24537 + struct mm_struct *mm = current->mm;
24538 + unsigned long ip = regs->ip;
24539 +
24540 + if (v8086_mode(regs))
24541 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24542 +
24543 +#ifdef CONFIG_PAX_PAGEEXEC
24544 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24545 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24546 + return true;
24547 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24548 + return true;
24549 + return false;
24550 + }
24551 +#endif
24552 +
24553 +#ifdef CONFIG_PAX_SEGMEXEC
24554 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24555 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24556 + return true;
24557 + return false;
24558 + }
24559 +#endif
24560 +
24561 + return false;
24562 +}
24563 +#endif
24564 +
24565 +#ifdef CONFIG_PAX_EMUTRAMP
24566 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24567 +{
24568 + int err;
24569 +
24570 + do { /* PaX: libffi trampoline emulation */
24571 + unsigned char mov, jmp;
24572 + unsigned int addr1, addr2;
24573 +
24574 +#ifdef CONFIG_X86_64
24575 + if ((regs->ip + 9) >> 32)
24576 + break;
24577 +#endif
24578 +
24579 + err = get_user(mov, (unsigned char __user *)regs->ip);
24580 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24581 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24582 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24583 +
24584 + if (err)
24585 + break;
24586 +
24587 + if (mov == 0xB8 && jmp == 0xE9) {
24588 + regs->ax = addr1;
24589 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24590 + return 2;
24591 + }
24592 + } while (0);
24593 +
24594 + do { /* PaX: gcc trampoline emulation #1 */
24595 + unsigned char mov1, mov2;
24596 + unsigned short jmp;
24597 + unsigned int addr1, addr2;
24598 +
24599 +#ifdef CONFIG_X86_64
24600 + if ((regs->ip + 11) >> 32)
24601 + break;
24602 +#endif
24603 +
24604 + err = get_user(mov1, (unsigned char __user *)regs->ip);
24605 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24606 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24607 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24608 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24609 +
24610 + if (err)
24611 + break;
24612 +
24613 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24614 + regs->cx = addr1;
24615 + regs->ax = addr2;
24616 + regs->ip = addr2;
24617 + return 2;
24618 + }
24619 + } while (0);
24620 +
24621 + do { /* PaX: gcc trampoline emulation #2 */
24622 + unsigned char mov, jmp;
24623 + unsigned int addr1, addr2;
24624 +
24625 +#ifdef CONFIG_X86_64
24626 + if ((regs->ip + 9) >> 32)
24627 + break;
24628 +#endif
24629 +
24630 + err = get_user(mov, (unsigned char __user *)regs->ip);
24631 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24632 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24633 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24634 +
24635 + if (err)
24636 + break;
24637 +
24638 + if (mov == 0xB9 && jmp == 0xE9) {
24639 + regs->cx = addr1;
24640 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24641 + return 2;
24642 + }
24643 + } while (0);
24644 +
24645 + return 1; /* PaX in action */
24646 +}
24647 +
24648 +#ifdef CONFIG_X86_64
24649 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24650 +{
24651 + int err;
24652 +
24653 + do { /* PaX: libffi trampoline emulation */
24654 + unsigned short mov1, mov2, jmp1;
24655 + unsigned char stcclc, jmp2;
24656 + unsigned long addr1, addr2;
24657 +
24658 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24659 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24660 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24661 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24662 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24663 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24664 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24665 +
24666 + if (err)
24667 + break;
24668 +
24669 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24670 + regs->r11 = addr1;
24671 + regs->r10 = addr2;
24672 + if (stcclc == 0xF8)
24673 + regs->flags &= ~X86_EFLAGS_CF;
24674 + else
24675 + regs->flags |= X86_EFLAGS_CF;
24676 + regs->ip = addr1;
24677 + return 2;
24678 + }
24679 + } while (0);
24680 +
24681 + do { /* PaX: gcc trampoline emulation #1 */
24682 + unsigned short mov1, mov2, jmp1;
24683 + unsigned char jmp2;
24684 + unsigned int addr1;
24685 + unsigned long addr2;
24686 +
24687 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24688 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24689 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24690 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24691 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24692 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24693 +
24694 + if (err)
24695 + break;
24696 +
24697 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24698 + regs->r11 = addr1;
24699 + regs->r10 = addr2;
24700 + regs->ip = addr1;
24701 + return 2;
24702 + }
24703 + } while (0);
24704 +
24705 + do { /* PaX: gcc trampoline emulation #2 */
24706 + unsigned short mov1, mov2, jmp1;
24707 + unsigned char jmp2;
24708 + unsigned long addr1, addr2;
24709 +
24710 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24711 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24712 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24713 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24714 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24715 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24716 +
24717 + if (err)
24718 + break;
24719 +
24720 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24721 + regs->r11 = addr1;
24722 + regs->r10 = addr2;
24723 + regs->ip = addr1;
24724 + return 2;
24725 + }
24726 + } while (0);
24727 +
24728 + return 1; /* PaX in action */
24729 +}
24730 +#endif
24731 +
24732 +/*
24733 + * PaX: decide what to do with offenders (regs->ip = fault address)
24734 + *
24735 + * returns 1 when task should be killed
24736 + * 2 when gcc trampoline was detected
24737 + */
24738 +static int pax_handle_fetch_fault(struct pt_regs *regs)
24739 +{
24740 + if (v8086_mode(regs))
24741 + return 1;
24742 +
24743 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24744 + return 1;
24745 +
24746 +#ifdef CONFIG_X86_32
24747 + return pax_handle_fetch_fault_32(regs);
24748 +#else
24749 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24750 + return pax_handle_fetch_fault_32(regs);
24751 + else
24752 + return pax_handle_fetch_fault_64(regs);
24753 +#endif
24754 +}
24755 +#endif
24756 +
24757 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24758 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24759 +{
24760 + long i;
24761 +
24762 + printk(KERN_ERR "PAX: bytes at PC: ");
24763 + for (i = 0; i < 20; i++) {
24764 + unsigned char c;
24765 + if (get_user(c, (unsigned char __force_user *)pc+i))
24766 + printk(KERN_CONT "?? ");
24767 + else
24768 + printk(KERN_CONT "%02x ", c);
24769 + }
24770 + printk("\n");
24771 +
24772 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24773 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
24774 + unsigned long c;
24775 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
24776 +#ifdef CONFIG_X86_32
24777 + printk(KERN_CONT "???????? ");
24778 +#else
24779 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24780 + printk(KERN_CONT "???????? ???????? ");
24781 + else
24782 + printk(KERN_CONT "???????????????? ");
24783 +#endif
24784 + } else {
24785 +#ifdef CONFIG_X86_64
24786 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24787 + printk(KERN_CONT "%08x ", (unsigned int)c);
24788 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24789 + } else
24790 +#endif
24791 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24792 + }
24793 + }
24794 + printk("\n");
24795 +}
24796 +#endif
24797 +
24798 +/**
24799 + * probe_kernel_write(): safely attempt to write to a location
24800 + * @dst: address to write to
24801 + * @src: pointer to the data that shall be written
24802 + * @size: size of the data chunk
24803 + *
24804 + * Safely write to address @dst from the buffer at @src. If a kernel fault
24805 + * happens, handle that and return -EFAULT.
24806 + */
24807 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24808 +{
24809 + long ret;
24810 + mm_segment_t old_fs = get_fs();
24811 +
24812 + set_fs(KERNEL_DS);
24813 + pagefault_disable();
24814 + pax_open_kernel();
24815 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24816 + pax_close_kernel();
24817 + pagefault_enable();
24818 + set_fs(old_fs);
24819 +
24820 + return ret ? -EFAULT : 0;
24821 +}
24822 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24823 index 71da1bc..7a16bf4 100644
24824 --- a/arch/x86/mm/gup.c
24825 +++ b/arch/x86/mm/gup.c
24826 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24827 addr = start;
24828 len = (unsigned long) nr_pages << PAGE_SHIFT;
24829 end = start + len;
24830 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24831 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24832 (void __user *)start, len)))
24833 return 0;
24834
24835 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24836 index 63a6ba6..79abd7a 100644
24837 --- a/arch/x86/mm/highmem_32.c
24838 +++ b/arch/x86/mm/highmem_32.c
24839 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
24840 idx = type + KM_TYPE_NR*smp_processor_id();
24841 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24842 BUG_ON(!pte_none(*(kmap_pte-idx)));
24843 +
24844 + pax_open_kernel();
24845 set_pte(kmap_pte-idx, mk_pte(page, prot));
24846 + pax_close_kernel();
24847
24848 return (void *)vaddr;
24849 }
24850 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24851 index f46c3407..6ff9a26 100644
24852 --- a/arch/x86/mm/hugetlbpage.c
24853 +++ b/arch/x86/mm/hugetlbpage.c
24854 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24855 struct hstate *h = hstate_file(file);
24856 struct mm_struct *mm = current->mm;
24857 struct vm_area_struct *vma;
24858 - unsigned long start_addr;
24859 + unsigned long start_addr, pax_task_size = TASK_SIZE;
24860 +
24861 +#ifdef CONFIG_PAX_SEGMEXEC
24862 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24863 + pax_task_size = SEGMEXEC_TASK_SIZE;
24864 +#endif
24865 +
24866 + pax_task_size -= PAGE_SIZE;
24867
24868 if (len > mm->cached_hole_size) {
24869 - start_addr = mm->free_area_cache;
24870 + start_addr = mm->free_area_cache;
24871 } else {
24872 - start_addr = TASK_UNMAPPED_BASE;
24873 - mm->cached_hole_size = 0;
24874 + start_addr = mm->mmap_base;
24875 + mm->cached_hole_size = 0;
24876 }
24877
24878 full_search:
24879 @@ -281,26 +288,27 @@ full_search:
24880
24881 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24882 /* At this point: (!vma || addr < vma->vm_end). */
24883 - if (TASK_SIZE - len < addr) {
24884 + if (pax_task_size - len < addr) {
24885 /*
24886 * Start a new search - just in case we missed
24887 * some holes.
24888 */
24889 - if (start_addr != TASK_UNMAPPED_BASE) {
24890 - start_addr = TASK_UNMAPPED_BASE;
24891 + if (start_addr != mm->mmap_base) {
24892 + start_addr = mm->mmap_base;
24893 mm->cached_hole_size = 0;
24894 goto full_search;
24895 }
24896 return -ENOMEM;
24897 }
24898 - if (!vma || addr + len <= vma->vm_start) {
24899 - mm->free_area_cache = addr + len;
24900 - return addr;
24901 - }
24902 + if (check_heap_stack_gap(vma, addr, len))
24903 + break;
24904 if (addr + mm->cached_hole_size < vma->vm_start)
24905 mm->cached_hole_size = vma->vm_start - addr;
24906 addr = ALIGN(vma->vm_end, huge_page_size(h));
24907 }
24908 +
24909 + mm->free_area_cache = addr + len;
24910 + return addr;
24911 }
24912
24913 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24914 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24915 {
24916 struct hstate *h = hstate_file(file);
24917 struct mm_struct *mm = current->mm;
24918 - struct vm_area_struct *vma, *prev_vma;
24919 - unsigned long base = mm->mmap_base, addr = addr0;
24920 + struct vm_area_struct *vma;
24921 + unsigned long base = mm->mmap_base, addr;
24922 unsigned long largest_hole = mm->cached_hole_size;
24923 - int first_time = 1;
24924
24925 /* don't allow allocations above current base */
24926 if (mm->free_area_cache > base)
24927 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24928 largest_hole = 0;
24929 mm->free_area_cache = base;
24930 }
24931 -try_again:
24932 +
24933 /* make sure it can fit in the remaining address space */
24934 if (mm->free_area_cache < len)
24935 goto fail;
24936
24937 /* either no address requested or cant fit in requested address hole */
24938 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
24939 + addr = (mm->free_area_cache - len);
24940 do {
24941 + addr &= huge_page_mask(h);
24942 + vma = find_vma(mm, addr);
24943 /*
24944 * Lookup failure means no vma is above this address,
24945 * i.e. return with success:
24946 - */
24947 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
24948 - return addr;
24949 -
24950 - /*
24951 * new region fits between prev_vma->vm_end and
24952 * vma->vm_start, use it:
24953 */
24954 - if (addr + len <= vma->vm_start &&
24955 - (!prev_vma || (addr >= prev_vma->vm_end))) {
24956 + if (check_heap_stack_gap(vma, addr, len)) {
24957 /* remember the address as a hint for next time */
24958 - mm->cached_hole_size = largest_hole;
24959 - return (mm->free_area_cache = addr);
24960 - } else {
24961 - /* pull free_area_cache down to the first hole */
24962 - if (mm->free_area_cache == vma->vm_end) {
24963 - mm->free_area_cache = vma->vm_start;
24964 - mm->cached_hole_size = largest_hole;
24965 - }
24966 + mm->cached_hole_size = largest_hole;
24967 + return (mm->free_area_cache = addr);
24968 + }
24969 + /* pull free_area_cache down to the first hole */
24970 + if (mm->free_area_cache == vma->vm_end) {
24971 + mm->free_area_cache = vma->vm_start;
24972 + mm->cached_hole_size = largest_hole;
24973 }
24974
24975 /* remember the largest hole we saw so far */
24976 if (addr + largest_hole < vma->vm_start)
24977 - largest_hole = vma->vm_start - addr;
24978 + largest_hole = vma->vm_start - addr;
24979
24980 /* try just below the current vma->vm_start */
24981 - addr = (vma->vm_start - len) & huge_page_mask(h);
24982 - } while (len <= vma->vm_start);
24983 + addr = skip_heap_stack_gap(vma, len);
24984 + } while (!IS_ERR_VALUE(addr));
24985
24986 fail:
24987 /*
24988 - * if hint left us with no space for the requested
24989 - * mapping then try again:
24990 - */
24991 - if (first_time) {
24992 - mm->free_area_cache = base;
24993 - largest_hole = 0;
24994 - first_time = 0;
24995 - goto try_again;
24996 - }
24997 - /*
24998 * A failed mmap() very likely causes application failure,
24999 * so fall back to the bottom-up function here. This scenario
25000 * can happen with large stack limits and large mmap()
25001 * allocations.
25002 */
25003 - mm->free_area_cache = TASK_UNMAPPED_BASE;
25004 +
25005 +#ifdef CONFIG_PAX_SEGMEXEC
25006 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25007 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
25008 + else
25009 +#endif
25010 +
25011 + mm->mmap_base = TASK_UNMAPPED_BASE;
25012 +
25013 +#ifdef CONFIG_PAX_RANDMMAP
25014 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25015 + mm->mmap_base += mm->delta_mmap;
25016 +#endif
25017 +
25018 + mm->free_area_cache = mm->mmap_base;
25019 mm->cached_hole_size = ~0UL;
25020 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
25021 len, pgoff, flags);
25022 @@ -387,6 +393,7 @@ fail:
25023 /*
25024 * Restore the topdown base:
25025 */
25026 + mm->mmap_base = base;
25027 mm->free_area_cache = base;
25028 mm->cached_hole_size = ~0UL;
25029
25030 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25031 struct hstate *h = hstate_file(file);
25032 struct mm_struct *mm = current->mm;
25033 struct vm_area_struct *vma;
25034 + unsigned long pax_task_size = TASK_SIZE;
25035
25036 if (len & ~huge_page_mask(h))
25037 return -EINVAL;
25038 - if (len > TASK_SIZE)
25039 +
25040 +#ifdef CONFIG_PAX_SEGMEXEC
25041 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25042 + pax_task_size = SEGMEXEC_TASK_SIZE;
25043 +#endif
25044 +
25045 + pax_task_size -= PAGE_SIZE;
25046 +
25047 + if (len > pax_task_size)
25048 return -ENOMEM;
25049
25050 if (flags & MAP_FIXED) {
25051 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25052 if (addr) {
25053 addr = ALIGN(addr, huge_page_size(h));
25054 vma = find_vma(mm, addr);
25055 - if (TASK_SIZE - len >= addr &&
25056 - (!vma || addr + len <= vma->vm_start))
25057 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
25058 return addr;
25059 }
25060 if (mm->get_unmapped_area == arch_get_unmapped_area)
25061 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
25062 index 73ffd55..f61c2a7 100644
25063 --- a/arch/x86/mm/init.c
25064 +++ b/arch/x86/mm/init.c
25065 @@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
25066 * cause a hotspot and fill up ZONE_DMA. The page tables
25067 * need roughly 0.5KB per GB.
25068 */
25069 -#ifdef CONFIG_X86_32
25070 - start = 0x7000;
25071 -#else
25072 - start = 0x8000;
25073 -#endif
25074 + start = 0x100000;
25075 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
25076 tables, PAGE_SIZE);
25077 if (e820_table_start == -1UL)
25078 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
25079 #endif
25080
25081 set_nx();
25082 - if (nx_enabled)
25083 + if (nx_enabled && cpu_has_nx)
25084 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
25085
25086 /* Enable PSE if available */
25087 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
25088 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
25089 * mmio resources as well as potential bios/acpi data regions.
25090 */
25091 +
25092 int devmem_is_allowed(unsigned long pagenr)
25093 {
25094 +#ifdef CONFIG_GRKERNSEC_KMEM
25095 + /* allow BDA */
25096 + if (!pagenr)
25097 + return 1;
25098 + /* allow EBDA */
25099 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
25100 + return 1;
25101 + /* allow ISA/video mem */
25102 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25103 + return 1;
25104 + /* throw out everything else below 1MB */
25105 + if (pagenr <= 256)
25106 + return 0;
25107 +#else
25108 if (pagenr <= 256)
25109 return 1;
25110 +#endif
25111 +
25112 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
25113 return 0;
25114 if (!page_is_ram(pagenr))
25115 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
25116
25117 void free_initmem(void)
25118 {
25119 +
25120 +#ifdef CONFIG_PAX_KERNEXEC
25121 +#ifdef CONFIG_X86_32
25122 + /* PaX: limit KERNEL_CS to actual size */
25123 + unsigned long addr, limit;
25124 + struct desc_struct d;
25125 + int cpu;
25126 +
25127 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
25128 + limit = (limit - 1UL) >> PAGE_SHIFT;
25129 +
25130 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
25131 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
25132 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
25133 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
25134 + }
25135 +
25136 + /* PaX: make KERNEL_CS read-only */
25137 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
25138 + if (!paravirt_enabled())
25139 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
25140 +/*
25141 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
25142 + pgd = pgd_offset_k(addr);
25143 + pud = pud_offset(pgd, addr);
25144 + pmd = pmd_offset(pud, addr);
25145 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25146 + }
25147 +*/
25148 +#ifdef CONFIG_X86_PAE
25149 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
25150 +/*
25151 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
25152 + pgd = pgd_offset_k(addr);
25153 + pud = pud_offset(pgd, addr);
25154 + pmd = pmd_offset(pud, addr);
25155 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25156 + }
25157 +*/
25158 +#endif
25159 +
25160 +#ifdef CONFIG_MODULES
25161 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
25162 +#endif
25163 +
25164 +#else
25165 + pgd_t *pgd;
25166 + pud_t *pud;
25167 + pmd_t *pmd;
25168 + unsigned long addr, end;
25169 +
25170 + /* PaX: make kernel code/rodata read-only, rest non-executable */
25171 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
25172 + pgd = pgd_offset_k(addr);
25173 + pud = pud_offset(pgd, addr);
25174 + pmd = pmd_offset(pud, addr);
25175 + if (!pmd_present(*pmd))
25176 + continue;
25177 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
25178 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25179 + else
25180 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25181 + }
25182 +
25183 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
25184 + end = addr + KERNEL_IMAGE_SIZE;
25185 + for (; addr < end; addr += PMD_SIZE) {
25186 + pgd = pgd_offset_k(addr);
25187 + pud = pud_offset(pgd, addr);
25188 + pmd = pmd_offset(pud, addr);
25189 + if (!pmd_present(*pmd))
25190 + continue;
25191 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
25192 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25193 + }
25194 +#endif
25195 +
25196 + flush_tlb_all();
25197 +#endif
25198 +
25199 free_init_pages("unused kernel memory",
25200 (unsigned long)(&__init_begin),
25201 (unsigned long)(&__init_end));
25202 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
25203 index 30938c1..bda3d5d 100644
25204 --- a/arch/x86/mm/init_32.c
25205 +++ b/arch/x86/mm/init_32.c
25206 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
25207 }
25208
25209 /*
25210 - * Creates a middle page table and puts a pointer to it in the
25211 - * given global directory entry. This only returns the gd entry
25212 - * in non-PAE compilation mode, since the middle layer is folded.
25213 - */
25214 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
25215 -{
25216 - pud_t *pud;
25217 - pmd_t *pmd_table;
25218 -
25219 -#ifdef CONFIG_X86_PAE
25220 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
25221 - if (after_bootmem)
25222 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
25223 - else
25224 - pmd_table = (pmd_t *)alloc_low_page();
25225 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
25226 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
25227 - pud = pud_offset(pgd, 0);
25228 - BUG_ON(pmd_table != pmd_offset(pud, 0));
25229 -
25230 - return pmd_table;
25231 - }
25232 -#endif
25233 - pud = pud_offset(pgd, 0);
25234 - pmd_table = pmd_offset(pud, 0);
25235 -
25236 - return pmd_table;
25237 -}
25238 -
25239 -/*
25240 * Create a page table and place a pointer to it in a middle page
25241 * directory entry:
25242 */
25243 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
25244 page_table = (pte_t *)alloc_low_page();
25245
25246 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
25247 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25248 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
25249 +#else
25250 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
25251 +#endif
25252 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
25253 }
25254
25255 return pte_offset_kernel(pmd, 0);
25256 }
25257
25258 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
25259 +{
25260 + pud_t *pud;
25261 + pmd_t *pmd_table;
25262 +
25263 + pud = pud_offset(pgd, 0);
25264 + pmd_table = pmd_offset(pud, 0);
25265 +
25266 + return pmd_table;
25267 +}
25268 +
25269 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
25270 {
25271 int pgd_idx = pgd_index(vaddr);
25272 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25273 int pgd_idx, pmd_idx;
25274 unsigned long vaddr;
25275 pgd_t *pgd;
25276 + pud_t *pud;
25277 pmd_t *pmd;
25278 pte_t *pte = NULL;
25279
25280 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25281 pgd = pgd_base + pgd_idx;
25282
25283 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
25284 - pmd = one_md_table_init(pgd);
25285 - pmd = pmd + pmd_index(vaddr);
25286 + pud = pud_offset(pgd, vaddr);
25287 + pmd = pmd_offset(pud, vaddr);
25288 +
25289 +#ifdef CONFIG_X86_PAE
25290 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25291 +#endif
25292 +
25293 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
25294 pmd++, pmd_idx++) {
25295 pte = page_table_kmap_check(one_page_table_init(pmd),
25296 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25297 }
25298 }
25299
25300 -static inline int is_kernel_text(unsigned long addr)
25301 +static inline int is_kernel_text(unsigned long start, unsigned long end)
25302 {
25303 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
25304 - return 1;
25305 - return 0;
25306 + if ((start > ktla_ktva((unsigned long)_etext) ||
25307 + end <= ktla_ktva((unsigned long)_stext)) &&
25308 + (start > ktla_ktva((unsigned long)_einittext) ||
25309 + end <= ktla_ktva((unsigned long)_sinittext)) &&
25310 +
25311 +#ifdef CONFIG_ACPI_SLEEP
25312 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
25313 +#endif
25314 +
25315 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
25316 + return 0;
25317 + return 1;
25318 }
25319
25320 /*
25321 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
25322 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
25323 unsigned long start_pfn, end_pfn;
25324 pgd_t *pgd_base = swapper_pg_dir;
25325 - int pgd_idx, pmd_idx, pte_ofs;
25326 + unsigned int pgd_idx, pmd_idx, pte_ofs;
25327 unsigned long pfn;
25328 pgd_t *pgd;
25329 + pud_t *pud;
25330 pmd_t *pmd;
25331 pte_t *pte;
25332 unsigned pages_2m, pages_4k;
25333 @@ -278,8 +279,13 @@ repeat:
25334 pfn = start_pfn;
25335 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25336 pgd = pgd_base + pgd_idx;
25337 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
25338 - pmd = one_md_table_init(pgd);
25339 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
25340 + pud = pud_offset(pgd, 0);
25341 + pmd = pmd_offset(pud, 0);
25342 +
25343 +#ifdef CONFIG_X86_PAE
25344 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25345 +#endif
25346
25347 if (pfn >= end_pfn)
25348 continue;
25349 @@ -291,14 +297,13 @@ repeat:
25350 #endif
25351 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
25352 pmd++, pmd_idx++) {
25353 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
25354 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
25355
25356 /*
25357 * Map with big pages if possible, otherwise
25358 * create normal page tables:
25359 */
25360 if (use_pse) {
25361 - unsigned int addr2;
25362 pgprot_t prot = PAGE_KERNEL_LARGE;
25363 /*
25364 * first pass will use the same initial
25365 @@ -308,11 +313,7 @@ repeat:
25366 __pgprot(PTE_IDENT_ATTR |
25367 _PAGE_PSE);
25368
25369 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
25370 - PAGE_OFFSET + PAGE_SIZE-1;
25371 -
25372 - if (is_kernel_text(addr) ||
25373 - is_kernel_text(addr2))
25374 + if (is_kernel_text(address, address + PMD_SIZE))
25375 prot = PAGE_KERNEL_LARGE_EXEC;
25376
25377 pages_2m++;
25378 @@ -329,7 +330,7 @@ repeat:
25379 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25380 pte += pte_ofs;
25381 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
25382 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
25383 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
25384 pgprot_t prot = PAGE_KERNEL;
25385 /*
25386 * first pass will use the same initial
25387 @@ -337,7 +338,7 @@ repeat:
25388 */
25389 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
25390
25391 - if (is_kernel_text(addr))
25392 + if (is_kernel_text(address, address + PAGE_SIZE))
25393 prot = PAGE_KERNEL_EXEC;
25394
25395 pages_4k++;
25396 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
25397
25398 pud = pud_offset(pgd, va);
25399 pmd = pmd_offset(pud, va);
25400 - if (!pmd_present(*pmd))
25401 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
25402 break;
25403
25404 pte = pte_offset_kernel(pmd, va);
25405 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
25406
25407 static void __init pagetable_init(void)
25408 {
25409 - pgd_t *pgd_base = swapper_pg_dir;
25410 -
25411 - permanent_kmaps_init(pgd_base);
25412 + permanent_kmaps_init(swapper_pg_dir);
25413 }
25414
25415 #ifdef CONFIG_ACPI_SLEEP
25416 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
25417 * ACPI suspend needs this for resume, because things like the intel-agp
25418 * driver might have split up a kernel 4MB mapping.
25419 */
25420 -char swsusp_pg_dir[PAGE_SIZE]
25421 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
25422 __attribute__ ((aligned(PAGE_SIZE)));
25423
25424 static inline void save_pg_dir(void)
25425 {
25426 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
25427 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
25428 }
25429 #else /* !CONFIG_ACPI_SLEEP */
25430 static inline void save_pg_dir(void)
25431 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
25432 flush_tlb_all();
25433 }
25434
25435 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25436 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25437 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25438
25439 /* user-defined highmem size */
25440 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
25441 * Initialize the boot-time allocator (with low memory only):
25442 */
25443 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
25444 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25445 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25446 PAGE_SIZE);
25447 if (bootmap == -1L)
25448 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
25449 @@ -864,6 +863,12 @@ void __init mem_init(void)
25450
25451 pci_iommu_alloc();
25452
25453 +#ifdef CONFIG_PAX_PER_CPU_PGD
25454 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25455 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25456 + KERNEL_PGD_PTRS);
25457 +#endif
25458 +
25459 #ifdef CONFIG_FLATMEM
25460 BUG_ON(!mem_map);
25461 #endif
25462 @@ -881,7 +886,7 @@ void __init mem_init(void)
25463 set_highmem_pages_init();
25464
25465 codesize = (unsigned long) &_etext - (unsigned long) &_text;
25466 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
25467 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
25468 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25469
25470 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
25471 @@ -923,10 +928,10 @@ void __init mem_init(void)
25472 ((unsigned long)&__init_end -
25473 (unsigned long)&__init_begin) >> 10,
25474
25475 - (unsigned long)&_etext, (unsigned long)&_edata,
25476 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25477 + (unsigned long)&_sdata, (unsigned long)&_edata,
25478 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25479
25480 - (unsigned long)&_text, (unsigned long)&_etext,
25481 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25482 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25483
25484 /*
25485 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
25486 if (!kernel_set_to_readonly)
25487 return;
25488
25489 + start = ktla_ktva(start);
25490 pr_debug("Set kernel text: %lx - %lx for read write\n",
25491 start, start+size);
25492
25493 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
25494 if (!kernel_set_to_readonly)
25495 return;
25496
25497 + start = ktla_ktva(start);
25498 pr_debug("Set kernel text: %lx - %lx for read only\n",
25499 start, start+size);
25500
25501 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
25502 unsigned long start = PFN_ALIGN(_text);
25503 unsigned long size = PFN_ALIGN(_etext) - start;
25504
25505 + start = ktla_ktva(start);
25506 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25507 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25508 size >> 10);
25509 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25510 index 7d095ad..25d2549 100644
25511 --- a/arch/x86/mm/init_64.c
25512 +++ b/arch/x86/mm/init_64.c
25513 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25514 pmd = fill_pmd(pud, vaddr);
25515 pte = fill_pte(pmd, vaddr);
25516
25517 + pax_open_kernel();
25518 set_pte(pte, new_pte);
25519 + pax_close_kernel();
25520
25521 /*
25522 * It's enough to flush this one mapping.
25523 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25524 pgd = pgd_offset_k((unsigned long)__va(phys));
25525 if (pgd_none(*pgd)) {
25526 pud = (pud_t *) spp_getpage();
25527 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25528 - _PAGE_USER));
25529 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25530 }
25531 pud = pud_offset(pgd, (unsigned long)__va(phys));
25532 if (pud_none(*pud)) {
25533 pmd = (pmd_t *) spp_getpage();
25534 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25535 - _PAGE_USER));
25536 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25537 }
25538 pmd = pmd_offset(pud, phys);
25539 BUG_ON(!pmd_none(*pmd));
25540 @@ -675,6 +675,12 @@ void __init mem_init(void)
25541
25542 pci_iommu_alloc();
25543
25544 +#ifdef CONFIG_PAX_PER_CPU_PGD
25545 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25546 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25547 + KERNEL_PGD_PTRS);
25548 +#endif
25549 +
25550 /* clear_bss() already clear the empty_zero_page */
25551
25552 reservedpages = 0;
25553 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
25554 static struct vm_area_struct gate_vma = {
25555 .vm_start = VSYSCALL_START,
25556 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25557 - .vm_page_prot = PAGE_READONLY_EXEC,
25558 - .vm_flags = VM_READ | VM_EXEC
25559 + .vm_page_prot = PAGE_READONLY,
25560 + .vm_flags = VM_READ
25561 };
25562
25563 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
25564 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
25565
25566 const char *arch_vma_name(struct vm_area_struct *vma)
25567 {
25568 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25569 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25570 return "[vdso]";
25571 if (vma == &gate_vma)
25572 return "[vsyscall]";
25573 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25574 index 84e236c..69bd3f6 100644
25575 --- a/arch/x86/mm/iomap_32.c
25576 +++ b/arch/x86/mm/iomap_32.c
25577 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
25578 debug_kmap_atomic(type);
25579 idx = type + KM_TYPE_NR * smp_processor_id();
25580 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25581 +
25582 + pax_open_kernel();
25583 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25584 + pax_close_kernel();
25585 +
25586 arch_flush_lazy_mmu_mode();
25587
25588 return (void *)vaddr;
25589 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25590 index 2feb9bd..ab91e7b 100644
25591 --- a/arch/x86/mm/ioremap.c
25592 +++ b/arch/x86/mm/ioremap.c
25593 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
25594 * Second special case: Some BIOSen report the PC BIOS
25595 * area (640->1Mb) as ram even though it is not.
25596 */
25597 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
25598 - pagenr < (BIOS_END >> PAGE_SHIFT))
25599 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
25600 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25601 return 0;
25602
25603 for (i = 0; i < e820.nr_map; i++) {
25604 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25605 /*
25606 * Don't allow anybody to remap normal RAM that we're using..
25607 */
25608 - for (pfn = phys_addr >> PAGE_SHIFT;
25609 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
25610 - pfn++) {
25611 -
25612 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
25613 int is_ram = page_is_ram(pfn);
25614
25615 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25616 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25617 return NULL;
25618 WARN_ON_ONCE(is_ram);
25619 }
25620 @@ -378,6 +375,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25621
25622 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25623 if (page_is_ram(start >> PAGE_SHIFT))
25624 +#ifdef CONFIG_HIGHMEM
25625 + if ((start >> PAGE_SHIFT) < max_low_pfn)
25626 +#endif
25627 return __va(phys);
25628
25629 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
25630 @@ -407,7 +407,7 @@ static int __init early_ioremap_debug_setup(char *str)
25631 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25632
25633 static __initdata int after_paging_init;
25634 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25635 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25636
25637 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25638 {
25639 @@ -439,8 +439,7 @@ void __init early_ioremap_init(void)
25640 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25641
25642 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25643 - memset(bm_pte, 0, sizeof(bm_pte));
25644 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
25645 + pmd_populate_user(&init_mm, pmd, bm_pte);
25646
25647 /*
25648 * The boot-ioremap range spans multiple pmds, for which
25649 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25650 index 8cc1833..1abbc5b 100644
25651 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
25652 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25653 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25654 * memory (e.g. tracked pages)? For now, we need this to avoid
25655 * invoking kmemcheck for PnP BIOS calls.
25656 */
25657 - if (regs->flags & X86_VM_MASK)
25658 + if (v8086_mode(regs))
25659 return false;
25660 - if (regs->cs != __KERNEL_CS)
25661 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25662 return false;
25663
25664 pte = kmemcheck_pte_lookup(address);
25665 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25666 index c9e57af..07a321b 100644
25667 --- a/arch/x86/mm/mmap.c
25668 +++ b/arch/x86/mm/mmap.c
25669 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
25670 * Leave an at least ~128 MB hole with possible stack randomization.
25671 */
25672 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25673 -#define MAX_GAP (TASK_SIZE/6*5)
25674 +#define MAX_GAP (pax_task_size/6*5)
25675
25676 /*
25677 * True on X86_32 or when emulating IA32 on X86_64
25678 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
25679 return rnd << PAGE_SHIFT;
25680 }
25681
25682 -static unsigned long mmap_base(void)
25683 +static unsigned long mmap_base(struct mm_struct *mm)
25684 {
25685 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
25686 + unsigned long pax_task_size = TASK_SIZE;
25687 +
25688 +#ifdef CONFIG_PAX_SEGMEXEC
25689 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25690 + pax_task_size = SEGMEXEC_TASK_SIZE;
25691 +#endif
25692
25693 if (gap < MIN_GAP)
25694 gap = MIN_GAP;
25695 else if (gap > MAX_GAP)
25696 gap = MAX_GAP;
25697
25698 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25699 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25700 }
25701
25702 /*
25703 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25704 * does, but not when emulating X86_32
25705 */
25706 -static unsigned long mmap_legacy_base(void)
25707 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
25708 {
25709 - if (mmap_is_ia32())
25710 + if (mmap_is_ia32()) {
25711 +
25712 +#ifdef CONFIG_PAX_SEGMEXEC
25713 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25714 + return SEGMEXEC_TASK_UNMAPPED_BASE;
25715 + else
25716 +#endif
25717 +
25718 return TASK_UNMAPPED_BASE;
25719 - else
25720 + } else
25721 return TASK_UNMAPPED_BASE + mmap_rnd();
25722 }
25723
25724 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
25725 void arch_pick_mmap_layout(struct mm_struct *mm)
25726 {
25727 if (mmap_is_legacy()) {
25728 - mm->mmap_base = mmap_legacy_base();
25729 + mm->mmap_base = mmap_legacy_base(mm);
25730 +
25731 +#ifdef CONFIG_PAX_RANDMMAP
25732 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25733 + mm->mmap_base += mm->delta_mmap;
25734 +#endif
25735 +
25736 mm->get_unmapped_area = arch_get_unmapped_area;
25737 mm->unmap_area = arch_unmap_area;
25738 } else {
25739 - mm->mmap_base = mmap_base();
25740 + mm->mmap_base = mmap_base(mm);
25741 +
25742 +#ifdef CONFIG_PAX_RANDMMAP
25743 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25744 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25745 +#endif
25746 +
25747 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25748 mm->unmap_area = arch_unmap_area_topdown;
25749 }
25750 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25751 index 132772a..b961f11 100644
25752 --- a/arch/x86/mm/mmio-mod.c
25753 +++ b/arch/x86/mm/mmio-mod.c
25754 @@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25755 break;
25756 default:
25757 {
25758 - unsigned char *ip = (unsigned char *)instptr;
25759 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25760 my_trace->opcode = MMIO_UNKNOWN_OP;
25761 my_trace->width = 0;
25762 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25763 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25764 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25765 void __iomem *addr)
25766 {
25767 - static atomic_t next_id;
25768 + static atomic_unchecked_t next_id;
25769 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25770 /* These are page-unaligned. */
25771 struct mmiotrace_map map = {
25772 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25773 .private = trace
25774 },
25775 .phys = offset,
25776 - .id = atomic_inc_return(&next_id)
25777 + .id = atomic_inc_return_unchecked(&next_id)
25778 };
25779 map.map_id = trace->id;
25780
25781 diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
25782 index d253006..e56dd6a 100644
25783 --- a/arch/x86/mm/numa_32.c
25784 +++ b/arch/x86/mm/numa_32.c
25785 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
25786 }
25787 #endif
25788
25789 -extern unsigned long find_max_low_pfn(void);
25790 extern unsigned long highend_pfn, highstart_pfn;
25791
25792 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
25793 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25794 index e1d1069..2251ff3 100644
25795 --- a/arch/x86/mm/pageattr-test.c
25796 +++ b/arch/x86/mm/pageattr-test.c
25797 @@ -36,7 +36,7 @@ enum {
25798
25799 static int pte_testbit(pte_t pte)
25800 {
25801 - return pte_flags(pte) & _PAGE_UNUSED1;
25802 + return pte_flags(pte) & _PAGE_CPA_TEST;
25803 }
25804
25805 struct split_state {
25806 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25807 index dd38bfb..b72c63e 100644
25808 --- a/arch/x86/mm/pageattr.c
25809 +++ b/arch/x86/mm/pageattr.c
25810 @@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25811 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
25812 */
25813 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25814 - pgprot_val(forbidden) |= _PAGE_NX;
25815 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25816
25817 /*
25818 * The kernel text needs to be executable for obvious reasons
25819 * Does not cover __inittext since that is gone later on. On
25820 * 64bit we do not enforce !NX on the low mapping
25821 */
25822 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
25823 - pgprot_val(forbidden) |= _PAGE_NX;
25824 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25825 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25826
25827 +#ifdef CONFIG_DEBUG_RODATA
25828 /*
25829 * The .rodata section needs to be read-only. Using the pfn
25830 * catches all aliases.
25831 @@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25832 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25833 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25834 pgprot_val(forbidden) |= _PAGE_RW;
25835 +#endif
25836 +
25837 +#ifdef CONFIG_PAX_KERNEXEC
25838 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25839 + pgprot_val(forbidden) |= _PAGE_RW;
25840 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25841 + }
25842 +#endif
25843
25844 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25845
25846 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25847 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25848 {
25849 /* change init_mm */
25850 + pax_open_kernel();
25851 set_pte_atomic(kpte, pte);
25852 +
25853 #ifdef CONFIG_X86_32
25854 if (!SHARED_KERNEL_PMD) {
25855 +
25856 +#ifdef CONFIG_PAX_PER_CPU_PGD
25857 + unsigned long cpu;
25858 +#else
25859 struct page *page;
25860 +#endif
25861
25862 +#ifdef CONFIG_PAX_PER_CPU_PGD
25863 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25864 + pgd_t *pgd = get_cpu_pgd(cpu);
25865 +#else
25866 list_for_each_entry(page, &pgd_list, lru) {
25867 - pgd_t *pgd;
25868 + pgd_t *pgd = (pgd_t *)page_address(page);
25869 +#endif
25870 +
25871 pud_t *pud;
25872 pmd_t *pmd;
25873
25874 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
25875 + pgd += pgd_index(address);
25876 pud = pud_offset(pgd, address);
25877 pmd = pmd_offset(pud, address);
25878 set_pte_atomic((pte_t *)pmd, pte);
25879 }
25880 }
25881 #endif
25882 + pax_close_kernel();
25883 }
25884
25885 static int
25886 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25887 index e78cd0e..de0a817 100644
25888 --- a/arch/x86/mm/pat.c
25889 +++ b/arch/x86/mm/pat.c
25890 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
25891
25892 conflict:
25893 printk(KERN_INFO "%s:%d conflicting memory types "
25894 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
25895 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
25896 new->end, cattr_name(new->type), cattr_name(entry->type));
25897 return -EBUSY;
25898 }
25899 @@ -559,7 +559,7 @@ unlock_ret:
25900
25901 if (err) {
25902 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25903 - current->comm, current->pid, start, end);
25904 + current->comm, task_pid_nr(current), start, end);
25905 }
25906
25907 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
25908 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25909 while (cursor < to) {
25910 if (!devmem_is_allowed(pfn)) {
25911 printk(KERN_INFO
25912 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25913 - current->comm, from, to);
25914 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25915 + current->comm, from, to, cursor);
25916 return 0;
25917 }
25918 cursor += PAGE_SIZE;
25919 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25920 printk(KERN_INFO
25921 "%s:%d ioremap_change_attr failed %s "
25922 "for %Lx-%Lx\n",
25923 - current->comm, current->pid,
25924 + current->comm, task_pid_nr(current),
25925 cattr_name(flags),
25926 base, (unsigned long long)(base + size));
25927 return -EINVAL;
25928 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25929 free_memtype(paddr, paddr + size);
25930 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25931 " for %Lx-%Lx, got %s\n",
25932 - current->comm, current->pid,
25933 + current->comm, task_pid_nr(current),
25934 cattr_name(want_flags),
25935 (unsigned long long)paddr,
25936 (unsigned long long)(paddr + size),
25937 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25938 index df3d5c8..c2223e1 100644
25939 --- a/arch/x86/mm/pf_in.c
25940 +++ b/arch/x86/mm/pf_in.c
25941 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25942 int i;
25943 enum reason_type rv = OTHERS;
25944
25945 - p = (unsigned char *)ins_addr;
25946 + p = (unsigned char *)ktla_ktva(ins_addr);
25947 p += skip_prefix(p, &prf);
25948 p += get_opcode(p, &opcode);
25949
25950 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25951 struct prefix_bits prf;
25952 int i;
25953
25954 - p = (unsigned char *)ins_addr;
25955 + p = (unsigned char *)ktla_ktva(ins_addr);
25956 p += skip_prefix(p, &prf);
25957 p += get_opcode(p, &opcode);
25958
25959 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25960 struct prefix_bits prf;
25961 int i;
25962
25963 - p = (unsigned char *)ins_addr;
25964 + p = (unsigned char *)ktla_ktva(ins_addr);
25965 p += skip_prefix(p, &prf);
25966 p += get_opcode(p, &opcode);
25967
25968 @@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25969 int i;
25970 unsigned long rv;
25971
25972 - p = (unsigned char *)ins_addr;
25973 + p = (unsigned char *)ktla_ktva(ins_addr);
25974 p += skip_prefix(p, &prf);
25975 p += get_opcode(p, &opcode);
25976 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25977 @@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25978 int i;
25979 unsigned long rv;
25980
25981 - p = (unsigned char *)ins_addr;
25982 + p = (unsigned char *)ktla_ktva(ins_addr);
25983 p += skip_prefix(p, &prf);
25984 p += get_opcode(p, &opcode);
25985 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25986 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25987 index e0e6fad..c56b495 100644
25988 --- a/arch/x86/mm/pgtable.c
25989 +++ b/arch/x86/mm/pgtable.c
25990 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
25991 list_del(&page->lru);
25992 }
25993
25994 -#define UNSHARED_PTRS_PER_PGD \
25995 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25996 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25997 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25998
25999 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
26000 +{
26001 + while (count--)
26002 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
26003 +}
26004 +#endif
26005 +
26006 +#ifdef CONFIG_PAX_PER_CPU_PGD
26007 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
26008 +{
26009 + while (count--)
26010 +
26011 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26012 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
26013 +#else
26014 + *dst++ = *src++;
26015 +#endif
26016 +
26017 +}
26018 +#endif
26019 +
26020 +#ifdef CONFIG_X86_64
26021 +#define pxd_t pud_t
26022 +#define pyd_t pgd_t
26023 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
26024 +#define pxd_free(mm, pud) pud_free((mm), (pud))
26025 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
26026 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
26027 +#define PYD_SIZE PGDIR_SIZE
26028 +#else
26029 +#define pxd_t pmd_t
26030 +#define pyd_t pud_t
26031 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
26032 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
26033 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
26034 +#define pyd_offset(mm, address) pud_offset((mm), (address))
26035 +#define PYD_SIZE PUD_SIZE
26036 +#endif
26037 +
26038 +#ifdef CONFIG_PAX_PER_CPU_PGD
26039 +static inline void pgd_ctor(pgd_t *pgd) {}
26040 +static inline void pgd_dtor(pgd_t *pgd) {}
26041 +#else
26042 static void pgd_ctor(pgd_t *pgd)
26043 {
26044 /* If the pgd points to a shared pagetable level (either the
26045 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
26046 pgd_list_del(pgd);
26047 spin_unlock_irqrestore(&pgd_lock, flags);
26048 }
26049 +#endif
26050
26051 /*
26052 * List of all pgd's needed for non-PAE so it can invalidate entries
26053 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
26054 * -- wli
26055 */
26056
26057 -#ifdef CONFIG_X86_PAE
26058 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26059 /*
26060 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
26061 * updating the top-level pagetable entries to guarantee the
26062 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
26063 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
26064 * and initialize the kernel pmds here.
26065 */
26066 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
26067 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
26068
26069 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26070 {
26071 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26072 */
26073 flush_tlb_mm(mm);
26074 }
26075 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
26076 +#define PREALLOCATED_PXDS USER_PGD_PTRS
26077 #else /* !CONFIG_X86_PAE */
26078
26079 /* No need to prepopulate any pagetable entries in non-PAE modes. */
26080 -#define PREALLOCATED_PMDS 0
26081 +#define PREALLOCATED_PXDS 0
26082
26083 #endif /* CONFIG_X86_PAE */
26084
26085 -static void free_pmds(pmd_t *pmds[])
26086 +static void free_pxds(pxd_t *pxds[])
26087 {
26088 int i;
26089
26090 - for(i = 0; i < PREALLOCATED_PMDS; i++)
26091 - if (pmds[i])
26092 - free_page((unsigned long)pmds[i]);
26093 + for(i = 0; i < PREALLOCATED_PXDS; i++)
26094 + if (pxds[i])
26095 + free_page((unsigned long)pxds[i]);
26096 }
26097
26098 -static int preallocate_pmds(pmd_t *pmds[])
26099 +static int preallocate_pxds(pxd_t *pxds[])
26100 {
26101 int i;
26102 bool failed = false;
26103
26104 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
26105 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
26106 - if (pmd == NULL)
26107 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
26108 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
26109 + if (pxd == NULL)
26110 failed = true;
26111 - pmds[i] = pmd;
26112 + pxds[i] = pxd;
26113 }
26114
26115 if (failed) {
26116 - free_pmds(pmds);
26117 + free_pxds(pxds);
26118 return -ENOMEM;
26119 }
26120
26121 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
26122 * preallocate which never got a corresponding vma will need to be
26123 * freed manually.
26124 */
26125 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
26126 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
26127 {
26128 int i;
26129
26130 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
26131 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
26132 pgd_t pgd = pgdp[i];
26133
26134 if (pgd_val(pgd) != 0) {
26135 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
26136 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
26137
26138 - pgdp[i] = native_make_pgd(0);
26139 + set_pgd(pgdp + i, native_make_pgd(0));
26140
26141 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
26142 - pmd_free(mm, pmd);
26143 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
26144 + pxd_free(mm, pxd);
26145 }
26146 }
26147 }
26148
26149 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
26150 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
26151 {
26152 - pud_t *pud;
26153 + pyd_t *pyd;
26154 unsigned long addr;
26155 int i;
26156
26157 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
26158 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
26159 return;
26160
26161 - pud = pud_offset(pgd, 0);
26162 +#ifdef CONFIG_X86_64
26163 + pyd = pyd_offset(mm, 0L);
26164 +#else
26165 + pyd = pyd_offset(pgd, 0L);
26166 +#endif
26167
26168 - for (addr = i = 0; i < PREALLOCATED_PMDS;
26169 - i++, pud++, addr += PUD_SIZE) {
26170 - pmd_t *pmd = pmds[i];
26171 + for (addr = i = 0; i < PREALLOCATED_PXDS;
26172 + i++, pyd++, addr += PYD_SIZE) {
26173 + pxd_t *pxd = pxds[i];
26174
26175 if (i >= KERNEL_PGD_BOUNDARY)
26176 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26177 - sizeof(pmd_t) * PTRS_PER_PMD);
26178 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26179 + sizeof(pxd_t) * PTRS_PER_PMD);
26180
26181 - pud_populate(mm, pud, pmd);
26182 + pyd_populate(mm, pyd, pxd);
26183 }
26184 }
26185
26186 pgd_t *pgd_alloc(struct mm_struct *mm)
26187 {
26188 pgd_t *pgd;
26189 - pmd_t *pmds[PREALLOCATED_PMDS];
26190 + pxd_t *pxds[PREALLOCATED_PXDS];
26191 +
26192 unsigned long flags;
26193
26194 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
26195 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
26196
26197 mm->pgd = pgd;
26198
26199 - if (preallocate_pmds(pmds) != 0)
26200 + if (preallocate_pxds(pxds) != 0)
26201 goto out_free_pgd;
26202
26203 if (paravirt_pgd_alloc(mm) != 0)
26204 - goto out_free_pmds;
26205 + goto out_free_pxds;
26206
26207 /*
26208 * Make sure that pre-populating the pmds is atomic with
26209 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
26210 spin_lock_irqsave(&pgd_lock, flags);
26211
26212 pgd_ctor(pgd);
26213 - pgd_prepopulate_pmd(mm, pgd, pmds);
26214 + pgd_prepopulate_pxd(mm, pgd, pxds);
26215
26216 spin_unlock_irqrestore(&pgd_lock, flags);
26217
26218 return pgd;
26219
26220 -out_free_pmds:
26221 - free_pmds(pmds);
26222 +out_free_pxds:
26223 + free_pxds(pxds);
26224 out_free_pgd:
26225 free_page((unsigned long)pgd);
26226 out:
26227 @@ -287,7 +338,7 @@ out:
26228
26229 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
26230 {
26231 - pgd_mop_up_pmds(mm, pgd);
26232 + pgd_mop_up_pxds(mm, pgd);
26233 pgd_dtor(pgd);
26234 paravirt_pgd_free(mm, pgd);
26235 free_page((unsigned long)pgd);
26236 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
26237 index 46c8834..fcab43d 100644
26238 --- a/arch/x86/mm/pgtable_32.c
26239 +++ b/arch/x86/mm/pgtable_32.c
26240 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
26241 return;
26242 }
26243 pte = pte_offset_kernel(pmd, vaddr);
26244 +
26245 + pax_open_kernel();
26246 if (pte_val(pteval))
26247 set_pte_at(&init_mm, vaddr, pte, pteval);
26248 else
26249 pte_clear(&init_mm, vaddr, pte);
26250 + pax_close_kernel();
26251
26252 /*
26253 * It's enough to flush this one mapping.
26254 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
26255 index 513d8ed..978c161 100644
26256 --- a/arch/x86/mm/setup_nx.c
26257 +++ b/arch/x86/mm/setup_nx.c
26258 @@ -4,11 +4,10 @@
26259
26260 #include <asm/pgtable.h>
26261
26262 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26263 int nx_enabled;
26264
26265 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26266 -static int disable_nx __cpuinitdata;
26267 -
26268 +#ifndef CONFIG_PAX_PAGEEXEC
26269 /*
26270 * noexec = on|off
26271 *
26272 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
26273 if (!str)
26274 return -EINVAL;
26275 if (!strncmp(str, "on", 2)) {
26276 - __supported_pte_mask |= _PAGE_NX;
26277 - disable_nx = 0;
26278 + nx_enabled = 1;
26279 } else if (!strncmp(str, "off", 3)) {
26280 - disable_nx = 1;
26281 - __supported_pte_mask &= ~_PAGE_NX;
26282 + nx_enabled = 0;
26283 }
26284 return 0;
26285 }
26286 early_param("noexec", noexec_setup);
26287 #endif
26288 +#endif
26289
26290 #ifdef CONFIG_X86_PAE
26291 void __init set_nx(void)
26292 {
26293 - unsigned int v[4], l, h;
26294 + if (!nx_enabled && cpu_has_nx) {
26295 + unsigned l, h;
26296
26297 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
26298 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
26299 -
26300 - if ((v[3] & (1 << 20)) && !disable_nx) {
26301 - rdmsr(MSR_EFER, l, h);
26302 - l |= EFER_NX;
26303 - wrmsr(MSR_EFER, l, h);
26304 - nx_enabled = 1;
26305 - __supported_pte_mask |= _PAGE_NX;
26306 - }
26307 + __supported_pte_mask &= ~_PAGE_NX;
26308 + rdmsr(MSR_EFER, l, h);
26309 + l &= ~EFER_NX;
26310 + wrmsr(MSR_EFER, l, h);
26311 }
26312 }
26313 #else
26314 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
26315 unsigned long efer;
26316
26317 rdmsrl(MSR_EFER, efer);
26318 - if (!(efer & EFER_NX) || disable_nx)
26319 + if (!(efer & EFER_NX) || !nx_enabled)
26320 __supported_pte_mask &= ~_PAGE_NX;
26321 }
26322 #endif
26323 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
26324 index 36fe08e..b123d3a 100644
26325 --- a/arch/x86/mm/tlb.c
26326 +++ b/arch/x86/mm/tlb.c
26327 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
26328 BUG();
26329 cpumask_clear_cpu(cpu,
26330 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
26331 +
26332 +#ifndef CONFIG_PAX_PER_CPU_PGD
26333 load_cr3(swapper_pg_dir);
26334 +#endif
26335 +
26336 }
26337 EXPORT_SYMBOL_GPL(leave_mm);
26338
26339 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26340 index 829edf0..672adb3 100644
26341 --- a/arch/x86/oprofile/backtrace.c
26342 +++ b/arch/x86/oprofile/backtrace.c
26343 @@ -115,7 +115,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26344 {
26345 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
26346
26347 - if (!user_mode_vm(regs)) {
26348 + if (!user_mode(regs)) {
26349 unsigned long stack = kernel_stack_pointer(regs);
26350 if (depth)
26351 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26352 diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
26353 index e6a160a..36deff6 100644
26354 --- a/arch/x86/oprofile/op_model_p4.c
26355 +++ b/arch/x86/oprofile/op_model_p4.c
26356 @@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
26357 #endif
26358 }
26359
26360 -static int inline addr_increment(void)
26361 +static inline int addr_increment(void)
26362 {
26363 #ifdef CONFIG_SMP
26364 return smp_num_siblings == 2 ? 2 : 1;
26365 diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
26366 index 1331fcf..03901b2 100644
26367 --- a/arch/x86/pci/common.c
26368 +++ b/arch/x86/pci/common.c
26369 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
26370 int pcibios_last_bus = -1;
26371 unsigned long pirq_table_addr;
26372 struct pci_bus *pci_root_bus;
26373 -struct pci_raw_ops *raw_pci_ops;
26374 -struct pci_raw_ops *raw_pci_ext_ops;
26375 +const struct pci_raw_ops *raw_pci_ops;
26376 +const struct pci_raw_ops *raw_pci_ext_ops;
26377
26378 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
26379 int reg, int len, u32 *val)
26380 diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
26381 index 347d882..4baf6b6 100644
26382 --- a/arch/x86/pci/direct.c
26383 +++ b/arch/x86/pci/direct.c
26384 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
26385
26386 #undef PCI_CONF1_ADDRESS
26387
26388 -struct pci_raw_ops pci_direct_conf1 = {
26389 +const struct pci_raw_ops pci_direct_conf1 = {
26390 .read = pci_conf1_read,
26391 .write = pci_conf1_write,
26392 };
26393 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
26394
26395 #undef PCI_CONF2_ADDRESS
26396
26397 -struct pci_raw_ops pci_direct_conf2 = {
26398 +const struct pci_raw_ops pci_direct_conf2 = {
26399 .read = pci_conf2_read,
26400 .write = pci_conf2_write,
26401 };
26402 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
26403 * This should be close to trivial, but it isn't, because there are buggy
26404 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
26405 */
26406 -static int __init pci_sanity_check(struct pci_raw_ops *o)
26407 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
26408 {
26409 u32 x = 0;
26410 int year, devfn;
26411 diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
26412 index f10a7e9..0425342 100644
26413 --- a/arch/x86/pci/mmconfig_32.c
26414 +++ b/arch/x86/pci/mmconfig_32.c
26415 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26416 return 0;
26417 }
26418
26419 -static struct pci_raw_ops pci_mmcfg = {
26420 +static const struct pci_raw_ops pci_mmcfg = {
26421 .read = pci_mmcfg_read,
26422 .write = pci_mmcfg_write,
26423 };
26424 diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
26425 index 94349f8..41600a7 100644
26426 --- a/arch/x86/pci/mmconfig_64.c
26427 +++ b/arch/x86/pci/mmconfig_64.c
26428 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26429 return 0;
26430 }
26431
26432 -static struct pci_raw_ops pci_mmcfg = {
26433 +static const struct pci_raw_ops pci_mmcfg = {
26434 .read = pci_mmcfg_read,
26435 .write = pci_mmcfg_write,
26436 };
26437 diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
26438 index 8eb295e..86bd657 100644
26439 --- a/arch/x86/pci/numaq_32.c
26440 +++ b/arch/x86/pci/numaq_32.c
26441 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
26442
26443 #undef PCI_CONF1_MQ_ADDRESS
26444
26445 -static struct pci_raw_ops pci_direct_conf1_mq = {
26446 +static const struct pci_raw_ops pci_direct_conf1_mq = {
26447 .read = pci_conf1_mq_read,
26448 .write = pci_conf1_mq_write
26449 };
26450 diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
26451 index b889d82..5a58a0a 100644
26452 --- a/arch/x86/pci/olpc.c
26453 +++ b/arch/x86/pci/olpc.c
26454 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
26455 return 0;
26456 }
26457
26458 -static struct pci_raw_ops pci_olpc_conf = {
26459 +static const struct pci_raw_ops pci_olpc_conf = {
26460 .read = pci_olpc_read,
26461 .write = pci_olpc_write,
26462 };
26463 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26464 index 1c975cc..b8e16c2 100644
26465 --- a/arch/x86/pci/pcbios.c
26466 +++ b/arch/x86/pci/pcbios.c
26467 @@ -56,50 +56,93 @@ union bios32 {
26468 static struct {
26469 unsigned long address;
26470 unsigned short segment;
26471 -} bios32_indirect = { 0, __KERNEL_CS };
26472 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26473
26474 /*
26475 * Returns the entry point for the given service, NULL on error
26476 */
26477
26478 -static unsigned long bios32_service(unsigned long service)
26479 +static unsigned long __devinit bios32_service(unsigned long service)
26480 {
26481 unsigned char return_code; /* %al */
26482 unsigned long address; /* %ebx */
26483 unsigned long length; /* %ecx */
26484 unsigned long entry; /* %edx */
26485 unsigned long flags;
26486 + struct desc_struct d, *gdt;
26487
26488 local_irq_save(flags);
26489 - __asm__("lcall *(%%edi); cld"
26490 +
26491 + gdt = get_cpu_gdt_table(smp_processor_id());
26492 +
26493 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26494 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26495 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26496 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26497 +
26498 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26499 : "=a" (return_code),
26500 "=b" (address),
26501 "=c" (length),
26502 "=d" (entry)
26503 : "0" (service),
26504 "1" (0),
26505 - "D" (&bios32_indirect));
26506 + "D" (&bios32_indirect),
26507 + "r"(__PCIBIOS_DS)
26508 + : "memory");
26509 +
26510 + pax_open_kernel();
26511 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26512 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26513 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26514 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26515 + pax_close_kernel();
26516 +
26517 local_irq_restore(flags);
26518
26519 switch (return_code) {
26520 - case 0:
26521 - return address + entry;
26522 - case 0x80: /* Not present */
26523 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26524 - return 0;
26525 - default: /* Shouldn't happen */
26526 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26527 - service, return_code);
26528 + case 0: {
26529 + int cpu;
26530 + unsigned char flags;
26531 +
26532 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26533 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26534 + printk(KERN_WARNING "bios32_service: not valid\n");
26535 return 0;
26536 + }
26537 + address = address + PAGE_OFFSET;
26538 + length += 16UL; /* some BIOSs underreport this... */
26539 + flags = 4;
26540 + if (length >= 64*1024*1024) {
26541 + length >>= PAGE_SHIFT;
26542 + flags |= 8;
26543 + }
26544 +
26545 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26546 + gdt = get_cpu_gdt_table(cpu);
26547 + pack_descriptor(&d, address, length, 0x9b, flags);
26548 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26549 + pack_descriptor(&d, address, length, 0x93, flags);
26550 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26551 + }
26552 + return entry;
26553 + }
26554 + case 0x80: /* Not present */
26555 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26556 + return 0;
26557 + default: /* Shouldn't happen */
26558 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26559 + service, return_code);
26560 + return 0;
26561 }
26562 }
26563
26564 static struct {
26565 unsigned long address;
26566 unsigned short segment;
26567 -} pci_indirect = { 0, __KERNEL_CS };
26568 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26569
26570 -static int pci_bios_present;
26571 +static int pci_bios_present __read_only;
26572
26573 static int __devinit check_pcibios(void)
26574 {
26575 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
26576 unsigned long flags, pcibios_entry;
26577
26578 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26579 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26580 + pci_indirect.address = pcibios_entry;
26581
26582 local_irq_save(flags);
26583 - __asm__(
26584 - "lcall *(%%edi); cld\n\t"
26585 + __asm__("movw %w6, %%ds\n\t"
26586 + "lcall *%%ss:(%%edi); cld\n\t"
26587 + "push %%ss\n\t"
26588 + "pop %%ds\n\t"
26589 "jc 1f\n\t"
26590 "xor %%ah, %%ah\n"
26591 "1:"
26592 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
26593 "=b" (ebx),
26594 "=c" (ecx)
26595 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26596 - "D" (&pci_indirect)
26597 + "D" (&pci_indirect),
26598 + "r" (__PCIBIOS_DS)
26599 : "memory");
26600 local_irq_restore(flags);
26601
26602 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26603
26604 switch (len) {
26605 case 1:
26606 - __asm__("lcall *(%%esi); cld\n\t"
26607 + __asm__("movw %w6, %%ds\n\t"
26608 + "lcall *%%ss:(%%esi); cld\n\t"
26609 + "push %%ss\n\t"
26610 + "pop %%ds\n\t"
26611 "jc 1f\n\t"
26612 "xor %%ah, %%ah\n"
26613 "1:"
26614 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26615 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26616 "b" (bx),
26617 "D" ((long)reg),
26618 - "S" (&pci_indirect));
26619 + "S" (&pci_indirect),
26620 + "r" (__PCIBIOS_DS));
26621 /*
26622 * Zero-extend the result beyond 8 bits, do not trust the
26623 * BIOS having done it:
26624 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26625 *value &= 0xff;
26626 break;
26627 case 2:
26628 - __asm__("lcall *(%%esi); cld\n\t"
26629 + __asm__("movw %w6, %%ds\n\t"
26630 + "lcall *%%ss:(%%esi); cld\n\t"
26631 + "push %%ss\n\t"
26632 + "pop %%ds\n\t"
26633 "jc 1f\n\t"
26634 "xor %%ah, %%ah\n"
26635 "1:"
26636 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26637 : "1" (PCIBIOS_READ_CONFIG_WORD),
26638 "b" (bx),
26639 "D" ((long)reg),
26640 - "S" (&pci_indirect));
26641 + "S" (&pci_indirect),
26642 + "r" (__PCIBIOS_DS));
26643 /*
26644 * Zero-extend the result beyond 16 bits, do not trust the
26645 * BIOS having done it:
26646 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26647 *value &= 0xffff;
26648 break;
26649 case 4:
26650 - __asm__("lcall *(%%esi); cld\n\t"
26651 + __asm__("movw %w6, %%ds\n\t"
26652 + "lcall *%%ss:(%%esi); cld\n\t"
26653 + "push %%ss\n\t"
26654 + "pop %%ds\n\t"
26655 "jc 1f\n\t"
26656 "xor %%ah, %%ah\n"
26657 "1:"
26658 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26659 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26660 "b" (bx),
26661 "D" ((long)reg),
26662 - "S" (&pci_indirect));
26663 + "S" (&pci_indirect),
26664 + "r" (__PCIBIOS_DS));
26665 break;
26666 }
26667
26668 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26669
26670 switch (len) {
26671 case 1:
26672 - __asm__("lcall *(%%esi); cld\n\t"
26673 + __asm__("movw %w6, %%ds\n\t"
26674 + "lcall *%%ss:(%%esi); cld\n\t"
26675 + "push %%ss\n\t"
26676 + "pop %%ds\n\t"
26677 "jc 1f\n\t"
26678 "xor %%ah, %%ah\n"
26679 "1:"
26680 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26681 "c" (value),
26682 "b" (bx),
26683 "D" ((long)reg),
26684 - "S" (&pci_indirect));
26685 + "S" (&pci_indirect),
26686 + "r" (__PCIBIOS_DS));
26687 break;
26688 case 2:
26689 - __asm__("lcall *(%%esi); cld\n\t"
26690 + __asm__("movw %w6, %%ds\n\t"
26691 + "lcall *%%ss:(%%esi); cld\n\t"
26692 + "push %%ss\n\t"
26693 + "pop %%ds\n\t"
26694 "jc 1f\n\t"
26695 "xor %%ah, %%ah\n"
26696 "1:"
26697 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26698 "c" (value),
26699 "b" (bx),
26700 "D" ((long)reg),
26701 - "S" (&pci_indirect));
26702 + "S" (&pci_indirect),
26703 + "r" (__PCIBIOS_DS));
26704 break;
26705 case 4:
26706 - __asm__("lcall *(%%esi); cld\n\t"
26707 + __asm__("movw %w6, %%ds\n\t"
26708 + "lcall *%%ss:(%%esi); cld\n\t"
26709 + "push %%ss\n\t"
26710 + "pop %%ds\n\t"
26711 "jc 1f\n\t"
26712 "xor %%ah, %%ah\n"
26713 "1:"
26714 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26715 "c" (value),
26716 "b" (bx),
26717 "D" ((long)reg),
26718 - "S" (&pci_indirect));
26719 + "S" (&pci_indirect),
26720 + "r" (__PCIBIOS_DS));
26721 break;
26722 }
26723
26724 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26725 * Function table for BIOS32 access
26726 */
26727
26728 -static struct pci_raw_ops pci_bios_access = {
26729 +static const struct pci_raw_ops pci_bios_access = {
26730 .read = pci_bios_read,
26731 .write = pci_bios_write
26732 };
26733 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
26734 * Try to find PCI BIOS.
26735 */
26736
26737 -static struct pci_raw_ops * __devinit pci_find_bios(void)
26738 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
26739 {
26740 union bios32 *check;
26741 unsigned char sum;
26742 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26743
26744 DBG("PCI: Fetching IRQ routing table... ");
26745 __asm__("push %%es\n\t"
26746 + "movw %w8, %%ds\n\t"
26747 "push %%ds\n\t"
26748 "pop %%es\n\t"
26749 - "lcall *(%%esi); cld\n\t"
26750 + "lcall *%%ss:(%%esi); cld\n\t"
26751 "pop %%es\n\t"
26752 + "push %%ss\n\t"
26753 + "pop %%ds\n"
26754 "jc 1f\n\t"
26755 "xor %%ah, %%ah\n"
26756 "1:"
26757 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26758 "1" (0),
26759 "D" ((long) &opt),
26760 "S" (&pci_indirect),
26761 - "m" (opt)
26762 + "m" (opt),
26763 + "r" (__PCIBIOS_DS)
26764 : "memory");
26765 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26766 if (ret & 0xff00)
26767 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26768 {
26769 int ret;
26770
26771 - __asm__("lcall *(%%esi); cld\n\t"
26772 + __asm__("movw %w5, %%ds\n\t"
26773 + "lcall *%%ss:(%%esi); cld\n\t"
26774 + "push %%ss\n\t"
26775 + "pop %%ds\n"
26776 "jc 1f\n\t"
26777 "xor %%ah, %%ah\n"
26778 "1:"
26779 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26780 : "0" (PCIBIOS_SET_PCI_HW_INT),
26781 "b" ((dev->bus->number << 8) | dev->devfn),
26782 "c" ((irq << 8) | (pin + 10)),
26783 - "S" (&pci_indirect));
26784 + "S" (&pci_indirect),
26785 + "r" (__PCIBIOS_DS));
26786 return !(ret & 0xff00);
26787 }
26788 EXPORT_SYMBOL(pcibios_set_irq_routing);
26789 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26790 index fa0f651..9d8f3d9 100644
26791 --- a/arch/x86/power/cpu.c
26792 +++ b/arch/x86/power/cpu.c
26793 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
26794 static void fix_processor_context(void)
26795 {
26796 int cpu = smp_processor_id();
26797 - struct tss_struct *t = &per_cpu(init_tss, cpu);
26798 + struct tss_struct *t = init_tss + cpu;
26799
26800 set_tss_desc(cpu, t); /*
26801 * This just modifies memory; should not be
26802 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
26803 */
26804
26805 #ifdef CONFIG_X86_64
26806 + pax_open_kernel();
26807 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26808 + pax_close_kernel();
26809
26810 syscall_init(); /* This sets MSR_*STAR and related */
26811 #endif
26812 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26813 index dd78ef6..f9d928d 100644
26814 --- a/arch/x86/vdso/Makefile
26815 +++ b/arch/x86/vdso/Makefile
26816 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
26817 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
26818 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
26819
26820 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26821 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26822 GCOV_PROFILE := n
26823
26824 #
26825 diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
26826 index ee55754..0013b2e 100644
26827 --- a/arch/x86/vdso/vclock_gettime.c
26828 +++ b/arch/x86/vdso/vclock_gettime.c
26829 @@ -22,24 +22,48 @@
26830 #include <asm/hpet.h>
26831 #include <asm/unistd.h>
26832 #include <asm/io.h>
26833 +#include <asm/fixmap.h>
26834 #include "vextern.h"
26835
26836 #define gtod vdso_vsyscall_gtod_data
26837
26838 +notrace noinline long __vdso_fallback_time(long *t)
26839 +{
26840 + long secs;
26841 + asm volatile("syscall"
26842 + : "=a" (secs)
26843 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
26844 + return secs;
26845 +}
26846 +
26847 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
26848 {
26849 long ret;
26850 asm("syscall" : "=a" (ret) :
26851 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
26852 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
26853 return ret;
26854 }
26855
26856 +notrace static inline cycle_t __vdso_vread_hpet(void)
26857 +{
26858 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
26859 +}
26860 +
26861 +notrace static inline cycle_t __vdso_vread_tsc(void)
26862 +{
26863 + cycle_t ret = (cycle_t)vget_cycles();
26864 +
26865 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
26866 +}
26867 +
26868 notrace static inline long vgetns(void)
26869 {
26870 long v;
26871 - cycles_t (*vread)(void);
26872 - vread = gtod->clock.vread;
26873 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
26874 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
26875 + v = __vdso_vread_tsc();
26876 + else
26877 + v = __vdso_vread_hpet();
26878 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
26879 return (v * gtod->clock.mult) >> gtod->clock.shift;
26880 }
26881
26882 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
26883
26884 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26885 {
26886 - if (likely(gtod->sysctl_enabled))
26887 + if (likely(gtod->sysctl_enabled &&
26888 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26889 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26890 switch (clock) {
26891 case CLOCK_REALTIME:
26892 if (likely(gtod->clock.vread))
26893 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26894 int clock_gettime(clockid_t, struct timespec *)
26895 __attribute__((weak, alias("__vdso_clock_gettime")));
26896
26897 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
26898 +{
26899 + long ret;
26900 + asm("syscall" : "=a" (ret) :
26901 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
26902 + return ret;
26903 +}
26904 +
26905 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26906 {
26907 - long ret;
26908 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
26909 + if (likely(gtod->sysctl_enabled &&
26910 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26911 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26912 + {
26913 if (likely(tv != NULL)) {
26914 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
26915 offsetof(struct timespec, tv_nsec) ||
26916 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26917 }
26918 return 0;
26919 }
26920 - asm("syscall" : "=a" (ret) :
26921 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
26922 - return ret;
26923 + return __vdso_fallback_gettimeofday(tv, tz);
26924 }
26925 int gettimeofday(struct timeval *, struct timezone *)
26926 __attribute__((weak, alias("__vdso_gettimeofday")));
26927 diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
26928 index 4e5dd3b..00ba15e 100644
26929 --- a/arch/x86/vdso/vdso.lds.S
26930 +++ b/arch/x86/vdso/vdso.lds.S
26931 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
26932 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
26933 #include "vextern.h"
26934 #undef VEXTERN
26935 +
26936 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
26937 +VEXTERN(fallback_gettimeofday)
26938 +VEXTERN(fallback_time)
26939 +VEXTERN(getcpu)
26940 +#undef VEXTERN
26941 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26942 index 58bc00f..d53fb48 100644
26943 --- a/arch/x86/vdso/vdso32-setup.c
26944 +++ b/arch/x86/vdso/vdso32-setup.c
26945 @@ -25,6 +25,7 @@
26946 #include <asm/tlbflush.h>
26947 #include <asm/vdso.h>
26948 #include <asm/proto.h>
26949 +#include <asm/mman.h>
26950
26951 enum {
26952 VDSO_DISABLED = 0,
26953 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26954 void enable_sep_cpu(void)
26955 {
26956 int cpu = get_cpu();
26957 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
26958 + struct tss_struct *tss = init_tss + cpu;
26959
26960 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26961 put_cpu();
26962 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26963 gate_vma.vm_start = FIXADDR_USER_START;
26964 gate_vma.vm_end = FIXADDR_USER_END;
26965 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26966 - gate_vma.vm_page_prot = __P101;
26967 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26968 /*
26969 * Make sure the vDSO gets into every core dump.
26970 * Dumping its contents makes post-mortem fully interpretable later
26971 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26972 if (compat)
26973 addr = VDSO_HIGH_BASE;
26974 else {
26975 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26976 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26977 if (IS_ERR_VALUE(addr)) {
26978 ret = addr;
26979 goto up_fail;
26980 }
26981 }
26982
26983 - current->mm->context.vdso = (void *)addr;
26984 + current->mm->context.vdso = addr;
26985
26986 if (compat_uses_vma || !compat) {
26987 /*
26988 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26989 }
26990
26991 current_thread_info()->sysenter_return =
26992 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26993 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26994
26995 up_fail:
26996 if (ret)
26997 - current->mm->context.vdso = NULL;
26998 + current->mm->context.vdso = 0;
26999
27000 up_write(&mm->mmap_sem);
27001
27002 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
27003
27004 const char *arch_vma_name(struct vm_area_struct *vma)
27005 {
27006 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
27007 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
27008 return "[vdso]";
27009 +
27010 +#ifdef CONFIG_PAX_SEGMEXEC
27011 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
27012 + return "[vdso]";
27013 +#endif
27014 +
27015 return NULL;
27016 }
27017
27018 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
27019 struct mm_struct *mm = tsk->mm;
27020
27021 /* Check to see if this task was created in compat vdso mode */
27022 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
27023 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
27024 return &gate_vma;
27025 return NULL;
27026 }
27027 diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
27028 index 1683ba2..48d07f3 100644
27029 --- a/arch/x86/vdso/vextern.h
27030 +++ b/arch/x86/vdso/vextern.h
27031 @@ -11,6 +11,5 @@
27032 put into vextern.h and be referenced as a pointer with vdso prefix.
27033 The main kernel later fills in the values. */
27034
27035 -VEXTERN(jiffies)
27036 VEXTERN(vgetcpu_mode)
27037 VEXTERN(vsyscall_gtod_data)
27038 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
27039 index 21e1aeb..2c0b3c4 100644
27040 --- a/arch/x86/vdso/vma.c
27041 +++ b/arch/x86/vdso/vma.c
27042 @@ -17,8 +17,6 @@
27043 #include "vextern.h" /* Just for VMAGIC. */
27044 #undef VEXTERN
27045
27046 -unsigned int __read_mostly vdso_enabled = 1;
27047 -
27048 extern char vdso_start[], vdso_end[];
27049 extern unsigned short vdso_sync_cpuid;
27050
27051 @@ -27,10 +25,8 @@ static unsigned vdso_size;
27052
27053 static inline void *var_ref(void *p, char *name)
27054 {
27055 - if (*(void **)p != (void *)VMAGIC) {
27056 - printk("VDSO: variable %s broken\n", name);
27057 - vdso_enabled = 0;
27058 - }
27059 + if (*(void **)p != (void *)VMAGIC)
27060 + panic("VDSO: variable %s broken\n", name);
27061 return p;
27062 }
27063
27064 @@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
27065 if (!vbase)
27066 goto oom;
27067
27068 - if (memcmp(vbase, "\177ELF", 4)) {
27069 - printk("VDSO: I'm broken; not ELF\n");
27070 - vdso_enabled = 0;
27071 - }
27072 + if (memcmp(vbase, ELFMAG, SELFMAG))
27073 + panic("VDSO: I'm broken; not ELF\n");
27074
27075 #define VEXTERN(x) \
27076 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
27077 #include "vextern.h"
27078 #undef VEXTERN
27079 + vunmap(vbase);
27080 return 0;
27081
27082 oom:
27083 - printk("Cannot allocate vdso\n");
27084 - vdso_enabled = 0;
27085 - return -ENOMEM;
27086 + panic("Cannot allocate vdso\n");
27087 }
27088 __initcall(init_vdso_vars);
27089
27090 @@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
27091 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27092 {
27093 struct mm_struct *mm = current->mm;
27094 - unsigned long addr;
27095 + unsigned long addr = 0;
27096 int ret;
27097
27098 - if (!vdso_enabled)
27099 - return 0;
27100 -
27101 down_write(&mm->mmap_sem);
27102 +
27103 +#ifdef CONFIG_PAX_RANDMMAP
27104 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27105 +#endif
27106 +
27107 addr = vdso_addr(mm->start_stack, vdso_size);
27108 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
27109 if (IS_ERR_VALUE(addr)) {
27110 @@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27111 goto up_fail;
27112 }
27113
27114 - current->mm->context.vdso = (void *)addr;
27115 + current->mm->context.vdso = addr;
27116
27117 ret = install_special_mapping(mm, addr, vdso_size,
27118 VM_READ|VM_EXEC|
27119 @@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27120 VM_ALWAYSDUMP,
27121 vdso_pages);
27122 if (ret) {
27123 - current->mm->context.vdso = NULL;
27124 + current->mm->context.vdso = 0;
27125 goto up_fail;
27126 }
27127
27128 @@ -132,10 +127,3 @@ up_fail:
27129 up_write(&mm->mmap_sem);
27130 return ret;
27131 }
27132 -
27133 -static __init int vdso_setup(char *s)
27134 -{
27135 - vdso_enabled = simple_strtoul(s, NULL, 0);
27136 - return 0;
27137 -}
27138 -__setup("vdso=", vdso_setup);
27139 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
27140 index 0087b00..eecb34f 100644
27141 --- a/arch/x86/xen/enlighten.c
27142 +++ b/arch/x86/xen/enlighten.c
27143 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
27144
27145 struct shared_info xen_dummy_shared_info;
27146
27147 -void *xen_initial_gdt;
27148 -
27149 /*
27150 * Point at some empty memory to start with. We map the real shared_info
27151 * page as soon as fixmap is up and running.
27152 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
27153
27154 preempt_disable();
27155
27156 - start = __get_cpu_var(idt_desc).address;
27157 + start = (unsigned long)__get_cpu_var(idt_desc).address;
27158 end = start + __get_cpu_var(idt_desc).size + 1;
27159
27160 xen_mc_flush();
27161 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
27162 #endif
27163 };
27164
27165 -static void xen_reboot(int reason)
27166 +static __noreturn void xen_reboot(int reason)
27167 {
27168 struct sched_shutdown r = { .reason = reason };
27169
27170 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
27171 BUG();
27172 }
27173
27174 -static void xen_restart(char *msg)
27175 +static __noreturn void xen_restart(char *msg)
27176 {
27177 xen_reboot(SHUTDOWN_reboot);
27178 }
27179
27180 -static void xen_emergency_restart(void)
27181 +static __noreturn void xen_emergency_restart(void)
27182 {
27183 xen_reboot(SHUTDOWN_reboot);
27184 }
27185
27186 -static void xen_machine_halt(void)
27187 +static __noreturn void xen_machine_halt(void)
27188 {
27189 xen_reboot(SHUTDOWN_poweroff);
27190 }
27191 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
27192 */
27193 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27194
27195 -#ifdef CONFIG_X86_64
27196 /* Work out if we support NX */
27197 - check_efer();
27198 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27199 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27200 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27201 + unsigned l, h;
27202 +
27203 +#ifdef CONFIG_X86_PAE
27204 + nx_enabled = 1;
27205 +#endif
27206 + __supported_pte_mask |= _PAGE_NX;
27207 + rdmsr(MSR_EFER, l, h);
27208 + l |= EFER_NX;
27209 + wrmsr(MSR_EFER, l, h);
27210 + }
27211 #endif
27212
27213 xen_setup_features();
27214 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
27215
27216 machine_ops = xen_machine_ops;
27217
27218 - /*
27219 - * The only reliable way to retain the initial address of the
27220 - * percpu gdt_page is to remember it here, so we can go and
27221 - * mark it RW later, when the initial percpu area is freed.
27222 - */
27223 - xen_initial_gdt = &per_cpu(gdt_page, 0);
27224 -
27225 xen_smp_init();
27226
27227 pgd = (pgd_t *)xen_start_info->pt_base;
27228 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27229 index 3f90a2c..2c2ad84 100644
27230 --- a/arch/x86/xen/mmu.c
27231 +++ b/arch/x86/xen/mmu.c
27232 @@ -1719,6 +1719,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27233 convert_pfn_mfn(init_level4_pgt);
27234 convert_pfn_mfn(level3_ident_pgt);
27235 convert_pfn_mfn(level3_kernel_pgt);
27236 + convert_pfn_mfn(level3_vmalloc_start_pgt);
27237 + convert_pfn_mfn(level3_vmalloc_end_pgt);
27238 + convert_pfn_mfn(level3_vmemmap_pgt);
27239
27240 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27241 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27242 @@ -1737,7 +1740,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27243 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27244 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27245 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27246 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27247 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27248 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27249 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27250 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27251 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27252 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27253
27254 @@ -1860,6 +1867,7 @@ static __init void xen_post_allocator_init(void)
27255 pv_mmu_ops.set_pud = xen_set_pud;
27256 #if PAGETABLE_LEVELS == 4
27257 pv_mmu_ops.set_pgd = xen_set_pgd;
27258 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27259 #endif
27260
27261 /* This will work as long as patching hasn't happened yet
27262 @@ -1946,6 +1954,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
27263 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27264 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27265 .set_pgd = xen_set_pgd_hyper,
27266 + .set_pgd_batched = xen_set_pgd_hyper,
27267
27268 .alloc_pud = xen_alloc_pmd_init,
27269 .release_pud = xen_release_pmd_init,
27270 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27271 index a96204a..fca9b8e 100644
27272 --- a/arch/x86/xen/smp.c
27273 +++ b/arch/x86/xen/smp.c
27274 @@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27275 {
27276 BUG_ON(smp_processor_id() != 0);
27277 native_smp_prepare_boot_cpu();
27278 -
27279 - /* We've switched to the "real" per-cpu gdt, so make sure the
27280 - old memory can be recycled */
27281 - make_lowmem_page_readwrite(xen_initial_gdt);
27282 -
27283 xen_setup_vcpu_info_placement();
27284 }
27285
27286 @@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27287 gdt = get_cpu_gdt_table(cpu);
27288
27289 ctxt->flags = VGCF_IN_KERNEL;
27290 - ctxt->user_regs.ds = __USER_DS;
27291 - ctxt->user_regs.es = __USER_DS;
27292 + ctxt->user_regs.ds = __KERNEL_DS;
27293 + ctxt->user_regs.es = __KERNEL_DS;
27294 ctxt->user_regs.ss = __KERNEL_DS;
27295 #ifdef CONFIG_X86_32
27296 ctxt->user_regs.fs = __KERNEL_PERCPU;
27297 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27298 + savesegment(gs, ctxt->user_regs.gs);
27299 #else
27300 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27301 #endif
27302 @@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27303 int rc;
27304
27305 per_cpu(current_task, cpu) = idle;
27306 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
27307 #ifdef CONFIG_X86_32
27308 irq_ctx_init(cpu);
27309 #else
27310 clear_tsk_thread_flag(idle, TIF_FORK);
27311 - per_cpu(kernel_stack, cpu) =
27312 - (unsigned long)task_stack_page(idle) -
27313 - KERNEL_STACK_OFFSET + THREAD_SIZE;
27314 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27315 #endif
27316 xen_setup_runstate_info(cpu);
27317 xen_setup_timer(cpu);
27318 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27319 index 9a95a9c..4f39e774 100644
27320 --- a/arch/x86/xen/xen-asm_32.S
27321 +++ b/arch/x86/xen/xen-asm_32.S
27322 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
27323 ESP_OFFSET=4 # bytes pushed onto stack
27324
27325 /*
27326 - * Store vcpu_info pointer for easy access. Do it this way to
27327 - * avoid having to reload %fs
27328 + * Store vcpu_info pointer for easy access.
27329 */
27330 #ifdef CONFIG_SMP
27331 - GET_THREAD_INFO(%eax)
27332 - movl TI_cpu(%eax), %eax
27333 - movl __per_cpu_offset(,%eax,4), %eax
27334 - mov per_cpu__xen_vcpu(%eax), %eax
27335 + push %fs
27336 + mov $(__KERNEL_PERCPU), %eax
27337 + mov %eax, %fs
27338 + mov PER_CPU_VAR(xen_vcpu), %eax
27339 + pop %fs
27340 #else
27341 movl per_cpu__xen_vcpu, %eax
27342 #endif
27343 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27344 index 1a5ff24..a187d40 100644
27345 --- a/arch/x86/xen/xen-head.S
27346 +++ b/arch/x86/xen/xen-head.S
27347 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
27348 #ifdef CONFIG_X86_32
27349 mov %esi,xen_start_info
27350 mov $init_thread_union+THREAD_SIZE,%esp
27351 +#ifdef CONFIG_SMP
27352 + movl $cpu_gdt_table,%edi
27353 + movl $__per_cpu_load,%eax
27354 + movw %ax,__KERNEL_PERCPU + 2(%edi)
27355 + rorl $16,%eax
27356 + movb %al,__KERNEL_PERCPU + 4(%edi)
27357 + movb %ah,__KERNEL_PERCPU + 7(%edi)
27358 + movl $__per_cpu_end - 1,%eax
27359 + subl $__per_cpu_start,%eax
27360 + movw %ax,__KERNEL_PERCPU + 0(%edi)
27361 +#endif
27362 #else
27363 mov %rsi,xen_start_info
27364 mov $init_thread_union+THREAD_SIZE,%rsp
27365 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27366 index f9153a3..51eab3d 100644
27367 --- a/arch/x86/xen/xen-ops.h
27368 +++ b/arch/x86/xen/xen-ops.h
27369 @@ -10,8 +10,6 @@
27370 extern const char xen_hypervisor_callback[];
27371 extern const char xen_failsafe_callback[];
27372
27373 -extern void *xen_initial_gdt;
27374 -
27375 struct trap_info;
27376 void xen_copy_trap_info(struct trap_info *traps);
27377
27378 diff --git a/block/blk-integrity.c b/block/blk-integrity.c
27379 index 15c6308..96e83c2 100644
27380 --- a/block/blk-integrity.c
27381 +++ b/block/blk-integrity.c
27382 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
27383 NULL,
27384 };
27385
27386 -static struct sysfs_ops integrity_ops = {
27387 +static const struct sysfs_ops integrity_ops = {
27388 .show = &integrity_attr_show,
27389 .store = &integrity_attr_store,
27390 };
27391 diff --git a/block/blk-ioc.c b/block/blk-ioc.c
27392 index d4ed600..cbdabb0 100644
27393 --- a/block/blk-ioc.c
27394 +++ b/block/blk-ioc.c
27395 @@ -66,22 +66,22 @@ static void cfq_exit(struct io_context *ioc)
27396 }
27397
27398 /* Called by the exitting task */
27399 -void exit_io_context(void)
27400 +void exit_io_context(struct task_struct *task)
27401 {
27402 struct io_context *ioc;
27403
27404 - task_lock(current);
27405 - ioc = current->io_context;
27406 - current->io_context = NULL;
27407 - task_unlock(current);
27408 + task_lock(task);
27409 + ioc = task->io_context;
27410 + task->io_context = NULL;
27411 + task_unlock(task);
27412
27413 if (atomic_dec_and_test(&ioc->nr_tasks)) {
27414 if (ioc->aic && ioc->aic->exit)
27415 ioc->aic->exit(ioc->aic);
27416 cfq_exit(ioc);
27417
27418 - put_io_context(ioc);
27419 }
27420 + put_io_context(ioc);
27421 }
27422
27423 struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
27424 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27425 index ca56420..f2fc409 100644
27426 --- a/block/blk-iopoll.c
27427 +++ b/block/blk-iopoll.c
27428 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27429 }
27430 EXPORT_SYMBOL(blk_iopoll_complete);
27431
27432 -static void blk_iopoll_softirq(struct softirq_action *h)
27433 +static void blk_iopoll_softirq(void)
27434 {
27435 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27436 int rearm = 0, budget = blk_iopoll_budget;
27437 diff --git a/block/blk-map.c b/block/blk-map.c
27438 index 30a7e51..0aeec6a 100644
27439 --- a/block/blk-map.c
27440 +++ b/block/blk-map.c
27441 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
27442 * direct dma. else, set up kernel bounce buffers
27443 */
27444 uaddr = (unsigned long) ubuf;
27445 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
27446 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
27447 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
27448 else
27449 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
27450 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
27451 for (i = 0; i < iov_count; i++) {
27452 unsigned long uaddr = (unsigned long)iov[i].iov_base;
27453
27454 + if (!iov[i].iov_len)
27455 + return -EINVAL;
27456 +
27457 if (uaddr & queue_dma_alignment(q)) {
27458 unaligned = 1;
27459 break;
27460 }
27461 - if (!iov[i].iov_len)
27462 - return -EINVAL;
27463 }
27464
27465 if (unaligned || (q->dma_pad_mask & len) || map_data)
27466 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27467 if (!len || !kbuf)
27468 return -EINVAL;
27469
27470 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
27471 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
27472 if (do_copy)
27473 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27474 else
27475 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27476 index ee9c216..58d410a 100644
27477 --- a/block/blk-softirq.c
27478 +++ b/block/blk-softirq.c
27479 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27480 * Softirq action handler - move entries to local list and loop over them
27481 * while passing them to the queue registered handler.
27482 */
27483 -static void blk_done_softirq(struct softirq_action *h)
27484 +static void blk_done_softirq(void)
27485 {
27486 struct list_head *cpu_list, local_list;
27487
27488 diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
27489 index bb9c5ea..5330d48 100644
27490 --- a/block/blk-sysfs.c
27491 +++ b/block/blk-sysfs.c
27492 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
27493 kmem_cache_free(blk_requestq_cachep, q);
27494 }
27495
27496 -static struct sysfs_ops queue_sysfs_ops = {
27497 +static const struct sysfs_ops queue_sysfs_ops = {
27498 .show = queue_attr_show,
27499 .store = queue_attr_store,
27500 };
27501 diff --git a/block/bsg.c b/block/bsg.c
27502 index 7154a7a..08ac2f0 100644
27503 --- a/block/bsg.c
27504 +++ b/block/bsg.c
27505 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27506 struct sg_io_v4 *hdr, struct bsg_device *bd,
27507 fmode_t has_write_perm)
27508 {
27509 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27510 + unsigned char *cmdptr;
27511 +
27512 if (hdr->request_len > BLK_MAX_CDB) {
27513 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27514 if (!rq->cmd)
27515 return -ENOMEM;
27516 - }
27517 + cmdptr = rq->cmd;
27518 + } else
27519 + cmdptr = tmpcmd;
27520
27521 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
27522 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27523 hdr->request_len))
27524 return -EFAULT;
27525
27526 + if (cmdptr != rq->cmd)
27527 + memcpy(rq->cmd, cmdptr, hdr->request_len);
27528 +
27529 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27530 if (blk_verify_command(rq->cmd, has_write_perm))
27531 return -EPERM;
27532 @@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27533 rq->next_rq = next_rq;
27534 next_rq->cmd_type = rq->cmd_type;
27535
27536 - dxferp = (void*)(unsigned long)hdr->din_xferp;
27537 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27538 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
27539 hdr->din_xfer_len, GFP_KERNEL);
27540 if (ret)
27541 @@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27542
27543 if (hdr->dout_xfer_len) {
27544 dxfer_len = hdr->dout_xfer_len;
27545 - dxferp = (void*)(unsigned long)hdr->dout_xferp;
27546 + dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
27547 } else if (hdr->din_xfer_len) {
27548 dxfer_len = hdr->din_xfer_len;
27549 - dxferp = (void*)(unsigned long)hdr->din_xferp;
27550 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27551 } else
27552 dxfer_len = 0;
27553
27554 @@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
27555 int len = min_t(unsigned int, hdr->max_response_len,
27556 rq->sense_len);
27557
27558 - ret = copy_to_user((void*)(unsigned long)hdr->response,
27559 + ret = copy_to_user((void __user *)(unsigned long)hdr->response,
27560 rq->sense, len);
27561 if (!ret)
27562 hdr->response_len = len;
27563 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27564 index 9bd086c..ca1fc22 100644
27565 --- a/block/compat_ioctl.c
27566 +++ b/block/compat_ioctl.c
27567 @@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27568 err |= __get_user(f->spec1, &uf->spec1);
27569 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27570 err |= __get_user(name, &uf->name);
27571 - f->name = compat_ptr(name);
27572 + f->name = (void __force_kernel *)compat_ptr(name);
27573 if (err) {
27574 err = -EFAULT;
27575 goto out;
27576 diff --git a/block/elevator.c b/block/elevator.c
27577 index a847046..75a1746 100644
27578 --- a/block/elevator.c
27579 +++ b/block/elevator.c
27580 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
27581 return error;
27582 }
27583
27584 -static struct sysfs_ops elv_sysfs_ops = {
27585 +static const struct sysfs_ops elv_sysfs_ops = {
27586 .show = elv_attr_show,
27587 .store = elv_attr_store,
27588 };
27589 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27590 index 2be0a97..bded3fd 100644
27591 --- a/block/scsi_ioctl.c
27592 +++ b/block/scsi_ioctl.c
27593 @@ -221,8 +221,20 @@ EXPORT_SYMBOL(blk_verify_command);
27594 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27595 struct sg_io_hdr *hdr, fmode_t mode)
27596 {
27597 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27598 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27599 + unsigned char *cmdptr;
27600 +
27601 + if (rq->cmd != rq->__cmd)
27602 + cmdptr = rq->cmd;
27603 + else
27604 + cmdptr = tmpcmd;
27605 +
27606 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27607 return -EFAULT;
27608 +
27609 + if (cmdptr != rq->cmd)
27610 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27611 +
27612 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27613 return -EPERM;
27614
27615 @@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27616 int err;
27617 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27618 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27619 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27620 + unsigned char *cmdptr;
27621
27622 if (!sic)
27623 return -EINVAL;
27624 @@ -464,9 +478,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27625 */
27626 err = -EFAULT;
27627 rq->cmd_len = cmdlen;
27628 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
27629 +
27630 + if (rq->cmd != rq->__cmd)
27631 + cmdptr = rq->cmd;
27632 + else
27633 + cmdptr = tmpcmd;
27634 +
27635 + if (copy_from_user(cmdptr, sic->data, cmdlen))
27636 goto error;
27637
27638 + if (rq->cmd != cmdptr)
27639 + memcpy(rq->cmd, cmdptr, cmdlen);
27640 +
27641 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27642 goto error;
27643
27644 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27645 index 3533582..f143117 100644
27646 --- a/crypto/cryptd.c
27647 +++ b/crypto/cryptd.c
27648 @@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
27649
27650 struct cryptd_blkcipher_request_ctx {
27651 crypto_completion_t complete;
27652 -};
27653 +} __no_const;
27654
27655 struct cryptd_hash_ctx {
27656 struct crypto_shash *child;
27657 diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
27658 index a90d260..7a9765e 100644
27659 --- a/crypto/gf128mul.c
27660 +++ b/crypto/gf128mul.c
27661 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
27662 for (i = 0; i < 7; ++i)
27663 gf128mul_x_lle(&p[i + 1], &p[i]);
27664
27665 - memset(r, 0, sizeof(r));
27666 + memset(r, 0, sizeof(*r));
27667 for (i = 0;;) {
27668 u8 ch = ((u8 *)b)[15 - i];
27669
27670 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
27671 for (i = 0; i < 7; ++i)
27672 gf128mul_x_bbe(&p[i + 1], &p[i]);
27673
27674 - memset(r, 0, sizeof(r));
27675 + memset(r, 0, sizeof(*r));
27676 for (i = 0;;) {
27677 u8 ch = ((u8 *)b)[i];
27678
27679 diff --git a/crypto/serpent.c b/crypto/serpent.c
27680 index b651a55..023297d 100644
27681 --- a/crypto/serpent.c
27682 +++ b/crypto/serpent.c
27683 @@ -21,6 +21,7 @@
27684 #include <asm/byteorder.h>
27685 #include <linux/crypto.h>
27686 #include <linux/types.h>
27687 +#include <linux/sched.h>
27688
27689 /* Key is padded to the maximum of 256 bits before round key generation.
27690 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
27691 @@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
27692 u32 r0,r1,r2,r3,r4;
27693 int i;
27694
27695 + pax_track_stack();
27696 +
27697 /* Copy key, add padding */
27698
27699 for (i = 0; i < keylen; ++i)
27700 diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
27701 index 0d2cdb8..d8de48d 100644
27702 --- a/drivers/acpi/acpi_pad.c
27703 +++ b/drivers/acpi/acpi_pad.c
27704 @@ -30,7 +30,7 @@
27705 #include <acpi/acpi_bus.h>
27706 #include <acpi/acpi_drivers.h>
27707
27708 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
27709 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
27710 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
27711 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
27712 static DEFINE_MUTEX(isolated_cpus_lock);
27713 diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
27714 index 3f4602b..2e41d36 100644
27715 --- a/drivers/acpi/battery.c
27716 +++ b/drivers/acpi/battery.c
27717 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
27718 }
27719
27720 static struct battery_file {
27721 - struct file_operations ops;
27722 + const struct file_operations ops;
27723 mode_t mode;
27724 const char *name;
27725 } acpi_battery_file[] = {
27726 diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
27727 index 7338b6a..82f0257 100644
27728 --- a/drivers/acpi/dock.c
27729 +++ b/drivers/acpi/dock.c
27730 @@ -77,7 +77,7 @@ struct dock_dependent_device {
27731 struct list_head list;
27732 struct list_head hotplug_list;
27733 acpi_handle handle;
27734 - struct acpi_dock_ops *ops;
27735 + const struct acpi_dock_ops *ops;
27736 void *context;
27737 };
27738
27739 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
27740 * the dock driver after _DCK is executed.
27741 */
27742 int
27743 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
27744 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
27745 void *context)
27746 {
27747 struct dock_dependent_device *dd;
27748 diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
27749 index 7c1c59e..2993595 100644
27750 --- a/drivers/acpi/osl.c
27751 +++ b/drivers/acpi/osl.c
27752 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
27753 void __iomem *virt_addr;
27754
27755 virt_addr = ioremap(phys_addr, width);
27756 + if (!virt_addr)
27757 + return AE_NO_MEMORY;
27758 if (!value)
27759 value = &dummy;
27760
27761 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
27762 void __iomem *virt_addr;
27763
27764 virt_addr = ioremap(phys_addr, width);
27765 + if (!virt_addr)
27766 + return AE_NO_MEMORY;
27767
27768 switch (width) {
27769 case 8:
27770 diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
27771 index c216062..eec10d2 100644
27772 --- a/drivers/acpi/power_meter.c
27773 +++ b/drivers/acpi/power_meter.c
27774 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
27775 return res;
27776
27777 temp /= 1000;
27778 - if (temp < 0)
27779 - return -EINVAL;
27780
27781 mutex_lock(&resource->lock);
27782 resource->trip[attr->index - 7] = temp;
27783 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27784 index d0d25e2..961643d 100644
27785 --- a/drivers/acpi/proc.c
27786 +++ b/drivers/acpi/proc.c
27787 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
27788 size_t count, loff_t * ppos)
27789 {
27790 struct list_head *node, *next;
27791 - char strbuf[5];
27792 - char str[5] = "";
27793 - unsigned int len = count;
27794 + char strbuf[5] = {0};
27795 struct acpi_device *found_dev = NULL;
27796
27797 - if (len > 4)
27798 - len = 4;
27799 - if (len < 0)
27800 - return -EFAULT;
27801 + if (count > 4)
27802 + count = 4;
27803
27804 - if (copy_from_user(strbuf, buffer, len))
27805 + if (copy_from_user(strbuf, buffer, count))
27806 return -EFAULT;
27807 - strbuf[len] = '\0';
27808 - sscanf(strbuf, "%s", str);
27809 + strbuf[count] = '\0';
27810
27811 mutex_lock(&acpi_device_lock);
27812 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27813 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
27814 if (!dev->wakeup.flags.valid)
27815 continue;
27816
27817 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
27818 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27819 dev->wakeup.state.enabled =
27820 dev->wakeup.state.enabled ? 0 : 1;
27821 found_dev = dev;
27822 diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
27823 index 7102474..de8ad22 100644
27824 --- a/drivers/acpi/processor_core.c
27825 +++ b/drivers/acpi/processor_core.c
27826 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27827 return 0;
27828 }
27829
27830 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27831 + BUG_ON(pr->id >= nr_cpu_ids);
27832
27833 /*
27834 * Buggy BIOS check
27835 diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
27836 index d933980..5761f13 100644
27837 --- a/drivers/acpi/sbshc.c
27838 +++ b/drivers/acpi/sbshc.c
27839 @@ -17,7 +17,7 @@
27840
27841 #define PREFIX "ACPI: "
27842
27843 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
27844 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
27845 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
27846
27847 struct acpi_smb_hc {
27848 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
27849 index 0458094..6978e7b 100644
27850 --- a/drivers/acpi/sleep.c
27851 +++ b/drivers/acpi/sleep.c
27852 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
27853 }
27854 }
27855
27856 -static struct platform_suspend_ops acpi_suspend_ops = {
27857 +static const struct platform_suspend_ops acpi_suspend_ops = {
27858 .valid = acpi_suspend_state_valid,
27859 .begin = acpi_suspend_begin,
27860 .prepare_late = acpi_pm_prepare,
27861 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
27862 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27863 * been requested.
27864 */
27865 -static struct platform_suspend_ops acpi_suspend_ops_old = {
27866 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
27867 .valid = acpi_suspend_state_valid,
27868 .begin = acpi_suspend_begin_old,
27869 .prepare_late = acpi_pm_disable_gpes,
27870 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
27871 acpi_enable_all_runtime_gpes();
27872 }
27873
27874 -static struct platform_hibernation_ops acpi_hibernation_ops = {
27875 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
27876 .begin = acpi_hibernation_begin,
27877 .end = acpi_pm_end,
27878 .pre_snapshot = acpi_hibernation_pre_snapshot,
27879 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
27880 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27881 * been requested.
27882 */
27883 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
27884 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
27885 .begin = acpi_hibernation_begin_old,
27886 .end = acpi_pm_end,
27887 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
27888 diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
27889 index 05dff63..b662ab7 100644
27890 --- a/drivers/acpi/video.c
27891 +++ b/drivers/acpi/video.c
27892 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
27893 vd->brightness->levels[request_level]);
27894 }
27895
27896 -static struct backlight_ops acpi_backlight_ops = {
27897 +static const struct backlight_ops acpi_backlight_ops = {
27898 .get_brightness = acpi_video_get_brightness,
27899 .update_status = acpi_video_set_brightness,
27900 };
27901 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
27902 index 6787aab..23ffb0e 100644
27903 --- a/drivers/ata/ahci.c
27904 +++ b/drivers/ata/ahci.c
27905 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
27906 .sdev_attrs = ahci_sdev_attrs,
27907 };
27908
27909 -static struct ata_port_operations ahci_ops = {
27910 +static const struct ata_port_operations ahci_ops = {
27911 .inherits = &sata_pmp_port_ops,
27912
27913 .qc_defer = sata_pmp_qc_defer_cmd_switch,
27914 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
27915 .port_stop = ahci_port_stop,
27916 };
27917
27918 -static struct ata_port_operations ahci_vt8251_ops = {
27919 +static const struct ata_port_operations ahci_vt8251_ops = {
27920 .inherits = &ahci_ops,
27921 .hardreset = ahci_vt8251_hardreset,
27922 };
27923
27924 -static struct ata_port_operations ahci_p5wdh_ops = {
27925 +static const struct ata_port_operations ahci_p5wdh_ops = {
27926 .inherits = &ahci_ops,
27927 .hardreset = ahci_p5wdh_hardreset,
27928 };
27929
27930 -static struct ata_port_operations ahci_sb600_ops = {
27931 +static const struct ata_port_operations ahci_sb600_ops = {
27932 .inherits = &ahci_ops,
27933 .softreset = ahci_sb600_softreset,
27934 .pmp_softreset = ahci_sb600_softreset,
27935 diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
27936 index 99e7196..4968c77 100644
27937 --- a/drivers/ata/ata_generic.c
27938 +++ b/drivers/ata/ata_generic.c
27939 @@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
27940 ATA_BMDMA_SHT(DRV_NAME),
27941 };
27942
27943 -static struct ata_port_operations generic_port_ops = {
27944 +static const struct ata_port_operations generic_port_ops = {
27945 .inherits = &ata_bmdma_port_ops,
27946 .cable_detect = ata_cable_unknown,
27947 .set_mode = generic_set_mode,
27948 diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
27949 index c33591d..000c121 100644
27950 --- a/drivers/ata/ata_piix.c
27951 +++ b/drivers/ata/ata_piix.c
27952 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
27953 ATA_BMDMA_SHT(DRV_NAME),
27954 };
27955
27956 -static struct ata_port_operations piix_pata_ops = {
27957 +static const struct ata_port_operations piix_pata_ops = {
27958 .inherits = &ata_bmdma32_port_ops,
27959 .cable_detect = ata_cable_40wire,
27960 .set_piomode = piix_set_piomode,
27961 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
27962 .prereset = piix_pata_prereset,
27963 };
27964
27965 -static struct ata_port_operations piix_vmw_ops = {
27966 +static const struct ata_port_operations piix_vmw_ops = {
27967 .inherits = &piix_pata_ops,
27968 .bmdma_status = piix_vmw_bmdma_status,
27969 };
27970
27971 -static struct ata_port_operations ich_pata_ops = {
27972 +static const struct ata_port_operations ich_pata_ops = {
27973 .inherits = &piix_pata_ops,
27974 .cable_detect = ich_pata_cable_detect,
27975 .set_dmamode = ich_set_dmamode,
27976 };
27977
27978 -static struct ata_port_operations piix_sata_ops = {
27979 +static const struct ata_port_operations piix_sata_ops = {
27980 .inherits = &ata_bmdma_port_ops,
27981 };
27982
27983 -static struct ata_port_operations piix_sidpr_sata_ops = {
27984 +static const struct ata_port_operations piix_sidpr_sata_ops = {
27985 .inherits = &piix_sata_ops,
27986 .hardreset = sata_std_hardreset,
27987 .scr_read = piix_sidpr_scr_read,
27988 diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
27989 index b0882cd..c295d65 100644
27990 --- a/drivers/ata/libata-acpi.c
27991 +++ b/drivers/ata/libata-acpi.c
27992 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
27993 ata_acpi_uevent(dev->link->ap, dev, event);
27994 }
27995
27996 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27997 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27998 .handler = ata_acpi_dev_notify_dock,
27999 .uevent = ata_acpi_dev_uevent,
28000 };
28001
28002 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
28003 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
28004 .handler = ata_acpi_ap_notify_dock,
28005 .uevent = ata_acpi_ap_uevent,
28006 };
28007 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
28008 index d4f7f99..94f603e 100644
28009 --- a/drivers/ata/libata-core.c
28010 +++ b/drivers/ata/libata-core.c
28011 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
28012 struct ata_port *ap;
28013 unsigned int tag;
28014
28015 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28016 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28017 ap = qc->ap;
28018
28019 qc->flags = 0;
28020 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
28021 struct ata_port *ap;
28022 struct ata_link *link;
28023
28024 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28025 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28026 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
28027 ap = qc->ap;
28028 link = qc->dev->link;
28029 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
28030 * LOCKING:
28031 * None.
28032 */
28033 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
28034 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
28035 {
28036 static DEFINE_SPINLOCK(lock);
28037 const struct ata_port_operations *cur;
28038 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
28039 return;
28040
28041 spin_lock(&lock);
28042 + pax_open_kernel();
28043
28044 for (cur = ops->inherits; cur; cur = cur->inherits) {
28045 void **inherit = (void **)cur;
28046 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
28047 if (IS_ERR(*pp))
28048 *pp = NULL;
28049
28050 - ops->inherits = NULL;
28051 + *(struct ata_port_operations **)&ops->inherits = NULL;
28052
28053 + pax_close_kernel();
28054 spin_unlock(&lock);
28055 }
28056
28057 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
28058 */
28059 /* KILLME - the only user left is ipr */
28060 void ata_host_init(struct ata_host *host, struct device *dev,
28061 - unsigned long flags, struct ata_port_operations *ops)
28062 + unsigned long flags, const struct ata_port_operations *ops)
28063 {
28064 spin_lock_init(&host->lock);
28065 host->dev = dev;
28066 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
28067 /* truly dummy */
28068 }
28069
28070 -struct ata_port_operations ata_dummy_port_ops = {
28071 +const struct ata_port_operations ata_dummy_port_ops = {
28072 .qc_prep = ata_noop_qc_prep,
28073 .qc_issue = ata_dummy_qc_issue,
28074 .error_handler = ata_dummy_error_handler,
28075 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
28076 index e5bdb9b..45a8e72 100644
28077 --- a/drivers/ata/libata-eh.c
28078 +++ b/drivers/ata/libata-eh.c
28079 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
28080 {
28081 struct ata_link *link;
28082
28083 + pax_track_stack();
28084 +
28085 ata_for_each_link(link, ap, HOST_FIRST)
28086 ata_eh_link_report(link);
28087 }
28088 @@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
28089 */
28090 void ata_std_error_handler(struct ata_port *ap)
28091 {
28092 - struct ata_port_operations *ops = ap->ops;
28093 + const struct ata_port_operations *ops = ap->ops;
28094 ata_reset_fn_t hardreset = ops->hardreset;
28095
28096 /* ignore built-in hardreset if SCR access is not available */
28097 diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
28098 index 51f0ffb..19ce3e3 100644
28099 --- a/drivers/ata/libata-pmp.c
28100 +++ b/drivers/ata/libata-pmp.c
28101 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
28102 */
28103 static int sata_pmp_eh_recover(struct ata_port *ap)
28104 {
28105 - struct ata_port_operations *ops = ap->ops;
28106 + const struct ata_port_operations *ops = ap->ops;
28107 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
28108 struct ata_link *pmp_link = &ap->link;
28109 struct ata_device *pmp_dev = pmp_link->device;
28110 diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
28111 index d8f35fe..288180a 100644
28112 --- a/drivers/ata/pata_acpi.c
28113 +++ b/drivers/ata/pata_acpi.c
28114 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
28115 ATA_BMDMA_SHT(DRV_NAME),
28116 };
28117
28118 -static struct ata_port_operations pacpi_ops = {
28119 +static const struct ata_port_operations pacpi_ops = {
28120 .inherits = &ata_bmdma_port_ops,
28121 .qc_issue = pacpi_qc_issue,
28122 .cable_detect = pacpi_cable_detect,
28123 diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
28124 index 9434114..1f2f364 100644
28125 --- a/drivers/ata/pata_ali.c
28126 +++ b/drivers/ata/pata_ali.c
28127 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
28128 * Port operations for PIO only ALi
28129 */
28130
28131 -static struct ata_port_operations ali_early_port_ops = {
28132 +static const struct ata_port_operations ali_early_port_ops = {
28133 .inherits = &ata_sff_port_ops,
28134 .cable_detect = ata_cable_40wire,
28135 .set_piomode = ali_set_piomode,
28136 @@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
28137 * Port operations for DMA capable ALi without cable
28138 * detect
28139 */
28140 -static struct ata_port_operations ali_20_port_ops = {
28141 +static const struct ata_port_operations ali_20_port_ops = {
28142 .inherits = &ali_dma_base_ops,
28143 .cable_detect = ata_cable_40wire,
28144 .mode_filter = ali_20_filter,
28145 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
28146 /*
28147 * Port operations for DMA capable ALi with cable detect
28148 */
28149 -static struct ata_port_operations ali_c2_port_ops = {
28150 +static const struct ata_port_operations ali_c2_port_ops = {
28151 .inherits = &ali_dma_base_ops,
28152 .check_atapi_dma = ali_check_atapi_dma,
28153 .cable_detect = ali_c2_cable_detect,
28154 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
28155 /*
28156 * Port operations for DMA capable ALi with cable detect
28157 */
28158 -static struct ata_port_operations ali_c4_port_ops = {
28159 +static const struct ata_port_operations ali_c4_port_ops = {
28160 .inherits = &ali_dma_base_ops,
28161 .check_atapi_dma = ali_check_atapi_dma,
28162 .cable_detect = ali_c2_cable_detect,
28163 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
28164 /*
28165 * Port operations for DMA capable ALi with cable detect and LBA48
28166 */
28167 -static struct ata_port_operations ali_c5_port_ops = {
28168 +static const struct ata_port_operations ali_c5_port_ops = {
28169 .inherits = &ali_dma_base_ops,
28170 .check_atapi_dma = ali_check_atapi_dma,
28171 .dev_config = ali_warn_atapi_dma,
28172 diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
28173 index 567f3f7..c8ee0da 100644
28174 --- a/drivers/ata/pata_amd.c
28175 +++ b/drivers/ata/pata_amd.c
28176 @@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
28177 .prereset = amd_pre_reset,
28178 };
28179
28180 -static struct ata_port_operations amd33_port_ops = {
28181 +static const struct ata_port_operations amd33_port_ops = {
28182 .inherits = &amd_base_port_ops,
28183 .cable_detect = ata_cable_40wire,
28184 .set_piomode = amd33_set_piomode,
28185 .set_dmamode = amd33_set_dmamode,
28186 };
28187
28188 -static struct ata_port_operations amd66_port_ops = {
28189 +static const struct ata_port_operations amd66_port_ops = {
28190 .inherits = &amd_base_port_ops,
28191 .cable_detect = ata_cable_unknown,
28192 .set_piomode = amd66_set_piomode,
28193 .set_dmamode = amd66_set_dmamode,
28194 };
28195
28196 -static struct ata_port_operations amd100_port_ops = {
28197 +static const struct ata_port_operations amd100_port_ops = {
28198 .inherits = &amd_base_port_ops,
28199 .cable_detect = ata_cable_unknown,
28200 .set_piomode = amd100_set_piomode,
28201 .set_dmamode = amd100_set_dmamode,
28202 };
28203
28204 -static struct ata_port_operations amd133_port_ops = {
28205 +static const struct ata_port_operations amd133_port_ops = {
28206 .inherits = &amd_base_port_ops,
28207 .cable_detect = amd_cable_detect,
28208 .set_piomode = amd133_set_piomode,
28209 @@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
28210 .host_stop = nv_host_stop,
28211 };
28212
28213 -static struct ata_port_operations nv100_port_ops = {
28214 +static const struct ata_port_operations nv100_port_ops = {
28215 .inherits = &nv_base_port_ops,
28216 .set_piomode = nv100_set_piomode,
28217 .set_dmamode = nv100_set_dmamode,
28218 };
28219
28220 -static struct ata_port_operations nv133_port_ops = {
28221 +static const struct ata_port_operations nv133_port_ops = {
28222 .inherits = &nv_base_port_ops,
28223 .set_piomode = nv133_set_piomode,
28224 .set_dmamode = nv133_set_dmamode,
28225 diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
28226 index d332cfd..4b7eaae 100644
28227 --- a/drivers/ata/pata_artop.c
28228 +++ b/drivers/ata/pata_artop.c
28229 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
28230 ATA_BMDMA_SHT(DRV_NAME),
28231 };
28232
28233 -static struct ata_port_operations artop6210_ops = {
28234 +static const struct ata_port_operations artop6210_ops = {
28235 .inherits = &ata_bmdma_port_ops,
28236 .cable_detect = ata_cable_40wire,
28237 .set_piomode = artop6210_set_piomode,
28238 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
28239 .qc_defer = artop6210_qc_defer,
28240 };
28241
28242 -static struct ata_port_operations artop6260_ops = {
28243 +static const struct ata_port_operations artop6260_ops = {
28244 .inherits = &ata_bmdma_port_ops,
28245 .cable_detect = artop6260_cable_detect,
28246 .set_piomode = artop6260_set_piomode,
28247 diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
28248 index 5c129f9..7bb7ccb 100644
28249 --- a/drivers/ata/pata_at32.c
28250 +++ b/drivers/ata/pata_at32.c
28251 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
28252 ATA_PIO_SHT(DRV_NAME),
28253 };
28254
28255 -static struct ata_port_operations at32_port_ops = {
28256 +static const struct ata_port_operations at32_port_ops = {
28257 .inherits = &ata_sff_port_ops,
28258 .cable_detect = ata_cable_40wire,
28259 .set_piomode = pata_at32_set_piomode,
28260 diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
28261 index 41c94b1..829006d 100644
28262 --- a/drivers/ata/pata_at91.c
28263 +++ b/drivers/ata/pata_at91.c
28264 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
28265 ATA_PIO_SHT(DRV_NAME),
28266 };
28267
28268 -static struct ata_port_operations pata_at91_port_ops = {
28269 +static const struct ata_port_operations pata_at91_port_ops = {
28270 .inherits = &ata_sff_port_ops,
28271
28272 .sff_data_xfer = pata_at91_data_xfer_noirq,
28273 diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
28274 index ae4454d..d391eb4 100644
28275 --- a/drivers/ata/pata_atiixp.c
28276 +++ b/drivers/ata/pata_atiixp.c
28277 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
28278 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28279 };
28280
28281 -static struct ata_port_operations atiixp_port_ops = {
28282 +static const struct ata_port_operations atiixp_port_ops = {
28283 .inherits = &ata_bmdma_port_ops,
28284
28285 .qc_prep = ata_sff_dumb_qc_prep,
28286 diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
28287 index 6fe7ded..2a425dc 100644
28288 --- a/drivers/ata/pata_atp867x.c
28289 +++ b/drivers/ata/pata_atp867x.c
28290 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
28291 ATA_BMDMA_SHT(DRV_NAME),
28292 };
28293
28294 -static struct ata_port_operations atp867x_ops = {
28295 +static const struct ata_port_operations atp867x_ops = {
28296 .inherits = &ata_bmdma_port_ops,
28297 .cable_detect = atp867x_cable_detect,
28298 .set_piomode = atp867x_set_piomode,
28299 diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
28300 index c4b47a3..b27a367 100644
28301 --- a/drivers/ata/pata_bf54x.c
28302 +++ b/drivers/ata/pata_bf54x.c
28303 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
28304 .dma_boundary = ATA_DMA_BOUNDARY,
28305 };
28306
28307 -static struct ata_port_operations bfin_pata_ops = {
28308 +static const struct ata_port_operations bfin_pata_ops = {
28309 .inherits = &ata_sff_port_ops,
28310
28311 .set_piomode = bfin_set_piomode,
28312 diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
28313 index 5acf9fa..84248be 100644
28314 --- a/drivers/ata/pata_cmd640.c
28315 +++ b/drivers/ata/pata_cmd640.c
28316 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
28317 ATA_BMDMA_SHT(DRV_NAME),
28318 };
28319
28320 -static struct ata_port_operations cmd640_port_ops = {
28321 +static const struct ata_port_operations cmd640_port_ops = {
28322 .inherits = &ata_bmdma_port_ops,
28323 /* In theory xfer_noirq is not needed once we kill the prefetcher */
28324 .sff_data_xfer = ata_sff_data_xfer_noirq,
28325 diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
28326 index ccd2694..c869c3d 100644
28327 --- a/drivers/ata/pata_cmd64x.c
28328 +++ b/drivers/ata/pata_cmd64x.c
28329 @@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
28330 .set_dmamode = cmd64x_set_dmamode,
28331 };
28332
28333 -static struct ata_port_operations cmd64x_port_ops = {
28334 +static const struct ata_port_operations cmd64x_port_ops = {
28335 .inherits = &cmd64x_base_ops,
28336 .cable_detect = ata_cable_40wire,
28337 };
28338
28339 -static struct ata_port_operations cmd646r1_port_ops = {
28340 +static const struct ata_port_operations cmd646r1_port_ops = {
28341 .inherits = &cmd64x_base_ops,
28342 .bmdma_stop = cmd646r1_bmdma_stop,
28343 .cable_detect = ata_cable_40wire,
28344 };
28345
28346 -static struct ata_port_operations cmd648_port_ops = {
28347 +static const struct ata_port_operations cmd648_port_ops = {
28348 .inherits = &cmd64x_base_ops,
28349 .bmdma_stop = cmd648_bmdma_stop,
28350 .cable_detect = cmd648_cable_detect,
28351 diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
28352 index 0df83cf..d7595b0 100644
28353 --- a/drivers/ata/pata_cs5520.c
28354 +++ b/drivers/ata/pata_cs5520.c
28355 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
28356 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28357 };
28358
28359 -static struct ata_port_operations cs5520_port_ops = {
28360 +static const struct ata_port_operations cs5520_port_ops = {
28361 .inherits = &ata_bmdma_port_ops,
28362 .qc_prep = ata_sff_dumb_qc_prep,
28363 .cable_detect = ata_cable_40wire,
28364 diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
28365 index c974b05..6d26b11 100644
28366 --- a/drivers/ata/pata_cs5530.c
28367 +++ b/drivers/ata/pata_cs5530.c
28368 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
28369 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28370 };
28371
28372 -static struct ata_port_operations cs5530_port_ops = {
28373 +static const struct ata_port_operations cs5530_port_ops = {
28374 .inherits = &ata_bmdma_port_ops,
28375
28376 .qc_prep = ata_sff_dumb_qc_prep,
28377 diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
28378 index 403f561..aacd26b 100644
28379 --- a/drivers/ata/pata_cs5535.c
28380 +++ b/drivers/ata/pata_cs5535.c
28381 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
28382 ATA_BMDMA_SHT(DRV_NAME),
28383 };
28384
28385 -static struct ata_port_operations cs5535_port_ops = {
28386 +static const struct ata_port_operations cs5535_port_ops = {
28387 .inherits = &ata_bmdma_port_ops,
28388 .cable_detect = cs5535_cable_detect,
28389 .set_piomode = cs5535_set_piomode,
28390 diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
28391 index 6da4cb4..de24a25 100644
28392 --- a/drivers/ata/pata_cs5536.c
28393 +++ b/drivers/ata/pata_cs5536.c
28394 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
28395 ATA_BMDMA_SHT(DRV_NAME),
28396 };
28397
28398 -static struct ata_port_operations cs5536_port_ops = {
28399 +static const struct ata_port_operations cs5536_port_ops = {
28400 .inherits = &ata_bmdma_port_ops,
28401 .cable_detect = cs5536_cable_detect,
28402 .set_piomode = cs5536_set_piomode,
28403 diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
28404 index 8fb040b..b16a9c9 100644
28405 --- a/drivers/ata/pata_cypress.c
28406 +++ b/drivers/ata/pata_cypress.c
28407 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
28408 ATA_BMDMA_SHT(DRV_NAME),
28409 };
28410
28411 -static struct ata_port_operations cy82c693_port_ops = {
28412 +static const struct ata_port_operations cy82c693_port_ops = {
28413 .inherits = &ata_bmdma_port_ops,
28414 .cable_detect = ata_cable_40wire,
28415 .set_piomode = cy82c693_set_piomode,
28416 diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
28417 index 2a6412f..555ee11 100644
28418 --- a/drivers/ata/pata_efar.c
28419 +++ b/drivers/ata/pata_efar.c
28420 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
28421 ATA_BMDMA_SHT(DRV_NAME),
28422 };
28423
28424 -static struct ata_port_operations efar_ops = {
28425 +static const struct ata_port_operations efar_ops = {
28426 .inherits = &ata_bmdma_port_ops,
28427 .cable_detect = efar_cable_detect,
28428 .set_piomode = efar_set_piomode,
28429 diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
28430 index b9d8836..0b92030 100644
28431 --- a/drivers/ata/pata_hpt366.c
28432 +++ b/drivers/ata/pata_hpt366.c
28433 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
28434 * Configuration for HPT366/68
28435 */
28436
28437 -static struct ata_port_operations hpt366_port_ops = {
28438 +static const struct ata_port_operations hpt366_port_ops = {
28439 .inherits = &ata_bmdma_port_ops,
28440 .cable_detect = hpt36x_cable_detect,
28441 .mode_filter = hpt366_filter,
28442 diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
28443 index 5af7f19..00c4980 100644
28444 --- a/drivers/ata/pata_hpt37x.c
28445 +++ b/drivers/ata/pata_hpt37x.c
28446 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
28447 * Configuration for HPT370
28448 */
28449
28450 -static struct ata_port_operations hpt370_port_ops = {
28451 +static const struct ata_port_operations hpt370_port_ops = {
28452 .inherits = &ata_bmdma_port_ops,
28453
28454 .bmdma_stop = hpt370_bmdma_stop,
28455 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
28456 * Configuration for HPT370A. Close to 370 but less filters
28457 */
28458
28459 -static struct ata_port_operations hpt370a_port_ops = {
28460 +static const struct ata_port_operations hpt370a_port_ops = {
28461 .inherits = &hpt370_port_ops,
28462 .mode_filter = hpt370a_filter,
28463 };
28464 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
28465 * and DMA mode setting functionality.
28466 */
28467
28468 -static struct ata_port_operations hpt372_port_ops = {
28469 +static const struct ata_port_operations hpt372_port_ops = {
28470 .inherits = &ata_bmdma_port_ops,
28471
28472 .bmdma_stop = hpt37x_bmdma_stop,
28473 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
28474 * but we have a different cable detection procedure for function 1.
28475 */
28476
28477 -static struct ata_port_operations hpt374_fn1_port_ops = {
28478 +static const struct ata_port_operations hpt374_fn1_port_ops = {
28479 .inherits = &hpt372_port_ops,
28480 .prereset = hpt374_fn1_pre_reset,
28481 };
28482 diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
28483 index 100f227..2e39382 100644
28484 --- a/drivers/ata/pata_hpt3x2n.c
28485 +++ b/drivers/ata/pata_hpt3x2n.c
28486 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
28487 * Configuration for HPT3x2n.
28488 */
28489
28490 -static struct ata_port_operations hpt3x2n_port_ops = {
28491 +static const struct ata_port_operations hpt3x2n_port_ops = {
28492 .inherits = &ata_bmdma_port_ops,
28493
28494 .bmdma_stop = hpt3x2n_bmdma_stop,
28495 diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
28496 index 7e31025..6fca8f4 100644
28497 --- a/drivers/ata/pata_hpt3x3.c
28498 +++ b/drivers/ata/pata_hpt3x3.c
28499 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
28500 ATA_BMDMA_SHT(DRV_NAME),
28501 };
28502
28503 -static struct ata_port_operations hpt3x3_port_ops = {
28504 +static const struct ata_port_operations hpt3x3_port_ops = {
28505 .inherits = &ata_bmdma_port_ops,
28506 .cable_detect = ata_cable_40wire,
28507 .set_piomode = hpt3x3_set_piomode,
28508 diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
28509 index b663b7f..9a26c2a 100644
28510 --- a/drivers/ata/pata_icside.c
28511 +++ b/drivers/ata/pata_icside.c
28512 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
28513 }
28514 }
28515
28516 -static struct ata_port_operations pata_icside_port_ops = {
28517 +static const struct ata_port_operations pata_icside_port_ops = {
28518 .inherits = &ata_sff_port_ops,
28519 /* no need to build any PRD tables for DMA */
28520 .qc_prep = ata_noop_qc_prep,
28521 diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
28522 index 4bceb88..457dfb6 100644
28523 --- a/drivers/ata/pata_isapnp.c
28524 +++ b/drivers/ata/pata_isapnp.c
28525 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
28526 ATA_PIO_SHT(DRV_NAME),
28527 };
28528
28529 -static struct ata_port_operations isapnp_port_ops = {
28530 +static const struct ata_port_operations isapnp_port_ops = {
28531 .inherits = &ata_sff_port_ops,
28532 .cable_detect = ata_cable_40wire,
28533 };
28534
28535 -static struct ata_port_operations isapnp_noalt_port_ops = {
28536 +static const struct ata_port_operations isapnp_noalt_port_ops = {
28537 .inherits = &ata_sff_port_ops,
28538 .cable_detect = ata_cable_40wire,
28539 /* No altstatus so we don't want to use the lost interrupt poll */
28540 diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
28541 index f156da8..24976e2 100644
28542 --- a/drivers/ata/pata_it8213.c
28543 +++ b/drivers/ata/pata_it8213.c
28544 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
28545 };
28546
28547
28548 -static struct ata_port_operations it8213_ops = {
28549 +static const struct ata_port_operations it8213_ops = {
28550 .inherits = &ata_bmdma_port_ops,
28551 .cable_detect = it8213_cable_detect,
28552 .set_piomode = it8213_set_piomode,
28553 diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
28554 index 188bc2f..ca9e785 100644
28555 --- a/drivers/ata/pata_it821x.c
28556 +++ b/drivers/ata/pata_it821x.c
28557 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
28558 ATA_BMDMA_SHT(DRV_NAME),
28559 };
28560
28561 -static struct ata_port_operations it821x_smart_port_ops = {
28562 +static const struct ata_port_operations it821x_smart_port_ops = {
28563 .inherits = &ata_bmdma_port_ops,
28564
28565 .check_atapi_dma= it821x_check_atapi_dma,
28566 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
28567 .port_start = it821x_port_start,
28568 };
28569
28570 -static struct ata_port_operations it821x_passthru_port_ops = {
28571 +static const struct ata_port_operations it821x_passthru_port_ops = {
28572 .inherits = &ata_bmdma_port_ops,
28573
28574 .check_atapi_dma= it821x_check_atapi_dma,
28575 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
28576 .port_start = it821x_port_start,
28577 };
28578
28579 -static struct ata_port_operations it821x_rdc_port_ops = {
28580 +static const struct ata_port_operations it821x_rdc_port_ops = {
28581 .inherits = &ata_bmdma_port_ops,
28582
28583 .check_atapi_dma= it821x_check_atapi_dma,
28584 diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
28585 index ba54b08..4b952b7 100644
28586 --- a/drivers/ata/pata_ixp4xx_cf.c
28587 +++ b/drivers/ata/pata_ixp4xx_cf.c
28588 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
28589 ATA_PIO_SHT(DRV_NAME),
28590 };
28591
28592 -static struct ata_port_operations ixp4xx_port_ops = {
28593 +static const struct ata_port_operations ixp4xx_port_ops = {
28594 .inherits = &ata_sff_port_ops,
28595 .sff_data_xfer = ixp4xx_mmio_data_xfer,
28596 .cable_detect = ata_cable_40wire,
28597 diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
28598 index 3a1474a..434b0ff 100644
28599 --- a/drivers/ata/pata_jmicron.c
28600 +++ b/drivers/ata/pata_jmicron.c
28601 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
28602 ATA_BMDMA_SHT(DRV_NAME),
28603 };
28604
28605 -static struct ata_port_operations jmicron_ops = {
28606 +static const struct ata_port_operations jmicron_ops = {
28607 .inherits = &ata_bmdma_port_ops,
28608 .prereset = jmicron_pre_reset,
28609 };
28610 diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
28611 index 6932e56..220e71d 100644
28612 --- a/drivers/ata/pata_legacy.c
28613 +++ b/drivers/ata/pata_legacy.c
28614 @@ -106,7 +106,7 @@ struct legacy_probe {
28615
28616 struct legacy_controller {
28617 const char *name;
28618 - struct ata_port_operations *ops;
28619 + const struct ata_port_operations *ops;
28620 unsigned int pio_mask;
28621 unsigned int flags;
28622 unsigned int pflags;
28623 @@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
28624 * pio_mask as well.
28625 */
28626
28627 -static struct ata_port_operations simple_port_ops = {
28628 +static const struct ata_port_operations simple_port_ops = {
28629 .inherits = &legacy_base_port_ops,
28630 .sff_data_xfer = ata_sff_data_xfer_noirq,
28631 };
28632
28633 -static struct ata_port_operations legacy_port_ops = {
28634 +static const struct ata_port_operations legacy_port_ops = {
28635 .inherits = &legacy_base_port_ops,
28636 .sff_data_xfer = ata_sff_data_xfer_noirq,
28637 .set_mode = legacy_set_mode,
28638 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
28639 return buflen;
28640 }
28641
28642 -static struct ata_port_operations pdc20230_port_ops = {
28643 +static const struct ata_port_operations pdc20230_port_ops = {
28644 .inherits = &legacy_base_port_ops,
28645 .set_piomode = pdc20230_set_piomode,
28646 .sff_data_xfer = pdc_data_xfer_vlb,
28647 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
28648 ioread8(ap->ioaddr.status_addr);
28649 }
28650
28651 -static struct ata_port_operations ht6560a_port_ops = {
28652 +static const struct ata_port_operations ht6560a_port_ops = {
28653 .inherits = &legacy_base_port_ops,
28654 .set_piomode = ht6560a_set_piomode,
28655 };
28656 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
28657 ioread8(ap->ioaddr.status_addr);
28658 }
28659
28660 -static struct ata_port_operations ht6560b_port_ops = {
28661 +static const struct ata_port_operations ht6560b_port_ops = {
28662 .inherits = &legacy_base_port_ops,
28663 .set_piomode = ht6560b_set_piomode,
28664 };
28665 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
28666 }
28667
28668
28669 -static struct ata_port_operations opti82c611a_port_ops = {
28670 +static const struct ata_port_operations opti82c611a_port_ops = {
28671 .inherits = &legacy_base_port_ops,
28672 .set_piomode = opti82c611a_set_piomode,
28673 };
28674 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
28675 return ata_sff_qc_issue(qc);
28676 }
28677
28678 -static struct ata_port_operations opti82c46x_port_ops = {
28679 +static const struct ata_port_operations opti82c46x_port_ops = {
28680 .inherits = &legacy_base_port_ops,
28681 .set_piomode = opti82c46x_set_piomode,
28682 .qc_issue = opti82c46x_qc_issue,
28683 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
28684 return 0;
28685 }
28686
28687 -static struct ata_port_operations qdi6500_port_ops = {
28688 +static const struct ata_port_operations qdi6500_port_ops = {
28689 .inherits = &legacy_base_port_ops,
28690 .set_piomode = qdi6500_set_piomode,
28691 .qc_issue = qdi_qc_issue,
28692 .sff_data_xfer = vlb32_data_xfer,
28693 };
28694
28695 -static struct ata_port_operations qdi6580_port_ops = {
28696 +static const struct ata_port_operations qdi6580_port_ops = {
28697 .inherits = &legacy_base_port_ops,
28698 .set_piomode = qdi6580_set_piomode,
28699 .sff_data_xfer = vlb32_data_xfer,
28700 };
28701
28702 -static struct ata_port_operations qdi6580dp_port_ops = {
28703 +static const struct ata_port_operations qdi6580dp_port_ops = {
28704 .inherits = &legacy_base_port_ops,
28705 .set_piomode = qdi6580dp_set_piomode,
28706 .sff_data_xfer = vlb32_data_xfer,
28707 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
28708 return 0;
28709 }
28710
28711 -static struct ata_port_operations winbond_port_ops = {
28712 +static const struct ata_port_operations winbond_port_ops = {
28713 .inherits = &legacy_base_port_ops,
28714 .set_piomode = winbond_set_piomode,
28715 .sff_data_xfer = vlb32_data_xfer,
28716 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
28717 int pio_modes = controller->pio_mask;
28718 unsigned long io = probe->port;
28719 u32 mask = (1 << probe->slot);
28720 - struct ata_port_operations *ops = controller->ops;
28721 + const struct ata_port_operations *ops = controller->ops;
28722 struct legacy_data *ld = &legacy_data[probe->slot];
28723 struct ata_host *host = NULL;
28724 struct ata_port *ap;
28725 diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
28726 index 2096fb7..4d090fc 100644
28727 --- a/drivers/ata/pata_marvell.c
28728 +++ b/drivers/ata/pata_marvell.c
28729 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
28730 ATA_BMDMA_SHT(DRV_NAME),
28731 };
28732
28733 -static struct ata_port_operations marvell_ops = {
28734 +static const struct ata_port_operations marvell_ops = {
28735 .inherits = &ata_bmdma_port_ops,
28736 .cable_detect = marvell_cable_detect,
28737 .prereset = marvell_pre_reset,
28738 diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
28739 index 99d41be..7d56aa8 100644
28740 --- a/drivers/ata/pata_mpc52xx.c
28741 +++ b/drivers/ata/pata_mpc52xx.c
28742 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
28743 ATA_PIO_SHT(DRV_NAME),
28744 };
28745
28746 -static struct ata_port_operations mpc52xx_ata_port_ops = {
28747 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
28748 .inherits = &ata_bmdma_port_ops,
28749 .sff_dev_select = mpc52xx_ata_dev_select,
28750 .set_piomode = mpc52xx_ata_set_piomode,
28751 diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
28752 index b21f002..0a27e7f 100644
28753 --- a/drivers/ata/pata_mpiix.c
28754 +++ b/drivers/ata/pata_mpiix.c
28755 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
28756 ATA_PIO_SHT(DRV_NAME),
28757 };
28758
28759 -static struct ata_port_operations mpiix_port_ops = {
28760 +static const struct ata_port_operations mpiix_port_ops = {
28761 .inherits = &ata_sff_port_ops,
28762 .qc_issue = mpiix_qc_issue,
28763 .cable_detect = ata_cable_40wire,
28764 diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
28765 index f0d52f7..89c3be3 100644
28766 --- a/drivers/ata/pata_netcell.c
28767 +++ b/drivers/ata/pata_netcell.c
28768 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
28769 ATA_BMDMA_SHT(DRV_NAME),
28770 };
28771
28772 -static struct ata_port_operations netcell_ops = {
28773 +static const struct ata_port_operations netcell_ops = {
28774 .inherits = &ata_bmdma_port_ops,
28775 .cable_detect = ata_cable_80wire,
28776 .read_id = netcell_read_id,
28777 diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
28778 index dd53a66..a3f4317 100644
28779 --- a/drivers/ata/pata_ninja32.c
28780 +++ b/drivers/ata/pata_ninja32.c
28781 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
28782 ATA_BMDMA_SHT(DRV_NAME),
28783 };
28784
28785 -static struct ata_port_operations ninja32_port_ops = {
28786 +static const struct ata_port_operations ninja32_port_ops = {
28787 .inherits = &ata_bmdma_port_ops,
28788 .sff_dev_select = ninja32_dev_select,
28789 .cable_detect = ata_cable_40wire,
28790 diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
28791 index ca53fac..9aa93ef 100644
28792 --- a/drivers/ata/pata_ns87410.c
28793 +++ b/drivers/ata/pata_ns87410.c
28794 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
28795 ATA_PIO_SHT(DRV_NAME),
28796 };
28797
28798 -static struct ata_port_operations ns87410_port_ops = {
28799 +static const struct ata_port_operations ns87410_port_ops = {
28800 .inherits = &ata_sff_port_ops,
28801 .qc_issue = ns87410_qc_issue,
28802 .cable_detect = ata_cable_40wire,
28803 diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
28804 index 773b159..55f454e 100644
28805 --- a/drivers/ata/pata_ns87415.c
28806 +++ b/drivers/ata/pata_ns87415.c
28807 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
28808 }
28809 #endif /* 87560 SuperIO Support */
28810
28811 -static struct ata_port_operations ns87415_pata_ops = {
28812 +static const struct ata_port_operations ns87415_pata_ops = {
28813 .inherits = &ata_bmdma_port_ops,
28814
28815 .check_atapi_dma = ns87415_check_atapi_dma,
28816 @@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
28817 };
28818
28819 #if defined(CONFIG_SUPERIO)
28820 -static struct ata_port_operations ns87560_pata_ops = {
28821 +static const struct ata_port_operations ns87560_pata_ops = {
28822 .inherits = &ns87415_pata_ops,
28823 .sff_tf_read = ns87560_tf_read,
28824 .sff_check_status = ns87560_check_status,
28825 diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
28826 index d6f6956..639295b 100644
28827 --- a/drivers/ata/pata_octeon_cf.c
28828 +++ b/drivers/ata/pata_octeon_cf.c
28829 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
28830 return 0;
28831 }
28832
28833 +/* cannot be const */
28834 static struct ata_port_operations octeon_cf_ops = {
28835 .inherits = &ata_sff_port_ops,
28836 .check_atapi_dma = octeon_cf_check_atapi_dma,
28837 diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
28838 index 84ac503..adee1cd 100644
28839 --- a/drivers/ata/pata_oldpiix.c
28840 +++ b/drivers/ata/pata_oldpiix.c
28841 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
28842 ATA_BMDMA_SHT(DRV_NAME),
28843 };
28844
28845 -static struct ata_port_operations oldpiix_pata_ops = {
28846 +static const struct ata_port_operations oldpiix_pata_ops = {
28847 .inherits = &ata_bmdma_port_ops,
28848 .qc_issue = oldpiix_qc_issue,
28849 .cable_detect = ata_cable_40wire,
28850 diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
28851 index 99eddda..3a4c0aa 100644
28852 --- a/drivers/ata/pata_opti.c
28853 +++ b/drivers/ata/pata_opti.c
28854 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
28855 ATA_PIO_SHT(DRV_NAME),
28856 };
28857
28858 -static struct ata_port_operations opti_port_ops = {
28859 +static const struct ata_port_operations opti_port_ops = {
28860 .inherits = &ata_sff_port_ops,
28861 .cable_detect = ata_cable_40wire,
28862 .set_piomode = opti_set_piomode,
28863 diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
28864 index 86885a4..8e9968d 100644
28865 --- a/drivers/ata/pata_optidma.c
28866 +++ b/drivers/ata/pata_optidma.c
28867 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
28868 ATA_BMDMA_SHT(DRV_NAME),
28869 };
28870
28871 -static struct ata_port_operations optidma_port_ops = {
28872 +static const struct ata_port_operations optidma_port_ops = {
28873 .inherits = &ata_bmdma_port_ops,
28874 .cable_detect = ata_cable_40wire,
28875 .set_piomode = optidma_set_pio_mode,
28876 @@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
28877 .prereset = optidma_pre_reset,
28878 };
28879
28880 -static struct ata_port_operations optiplus_port_ops = {
28881 +static const struct ata_port_operations optiplus_port_ops = {
28882 .inherits = &optidma_port_ops,
28883 .set_piomode = optiplus_set_pio_mode,
28884 .set_dmamode = optiplus_set_dma_mode,
28885 diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
28886 index 11fb4cc..1a14022 100644
28887 --- a/drivers/ata/pata_palmld.c
28888 +++ b/drivers/ata/pata_palmld.c
28889 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
28890 ATA_PIO_SHT(DRV_NAME),
28891 };
28892
28893 -static struct ata_port_operations palmld_port_ops = {
28894 +static const struct ata_port_operations palmld_port_ops = {
28895 .inherits = &ata_sff_port_ops,
28896 .sff_data_xfer = ata_sff_data_xfer_noirq,
28897 .cable_detect = ata_cable_40wire,
28898 diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
28899 index dc99e26..7f4b1e4 100644
28900 --- a/drivers/ata/pata_pcmcia.c
28901 +++ b/drivers/ata/pata_pcmcia.c
28902 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
28903 ATA_PIO_SHT(DRV_NAME),
28904 };
28905
28906 -static struct ata_port_operations pcmcia_port_ops = {
28907 +static const struct ata_port_operations pcmcia_port_ops = {
28908 .inherits = &ata_sff_port_ops,
28909 .sff_data_xfer = ata_sff_data_xfer_noirq,
28910 .cable_detect = ata_cable_40wire,
28911 .set_mode = pcmcia_set_mode,
28912 };
28913
28914 -static struct ata_port_operations pcmcia_8bit_port_ops = {
28915 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
28916 .inherits = &ata_sff_port_ops,
28917 .sff_data_xfer = ata_data_xfer_8bit,
28918 .cable_detect = ata_cable_40wire,
28919 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
28920 unsigned long io_base, ctl_base;
28921 void __iomem *io_addr, *ctl_addr;
28922 int n_ports = 1;
28923 - struct ata_port_operations *ops = &pcmcia_port_ops;
28924 + const struct ata_port_operations *ops = &pcmcia_port_ops;
28925
28926 info = kzalloc(sizeof(*info), GFP_KERNEL);
28927 if (info == NULL)
28928 diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
28929 index ca5cad0..3a1f125 100644
28930 --- a/drivers/ata/pata_pdc2027x.c
28931 +++ b/drivers/ata/pata_pdc2027x.c
28932 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
28933 ATA_BMDMA_SHT(DRV_NAME),
28934 };
28935
28936 -static struct ata_port_operations pdc2027x_pata100_ops = {
28937 +static const struct ata_port_operations pdc2027x_pata100_ops = {
28938 .inherits = &ata_bmdma_port_ops,
28939 .check_atapi_dma = pdc2027x_check_atapi_dma,
28940 .cable_detect = pdc2027x_cable_detect,
28941 .prereset = pdc2027x_prereset,
28942 };
28943
28944 -static struct ata_port_operations pdc2027x_pata133_ops = {
28945 +static const struct ata_port_operations pdc2027x_pata133_ops = {
28946 .inherits = &pdc2027x_pata100_ops,
28947 .mode_filter = pdc2027x_mode_filter,
28948 .set_piomode = pdc2027x_set_piomode,
28949 diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
28950 index 2911120..4bf62aa 100644
28951 --- a/drivers/ata/pata_pdc202xx_old.c
28952 +++ b/drivers/ata/pata_pdc202xx_old.c
28953 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
28954 ATA_BMDMA_SHT(DRV_NAME),
28955 };
28956
28957 -static struct ata_port_operations pdc2024x_port_ops = {
28958 +static const struct ata_port_operations pdc2024x_port_ops = {
28959 .inherits = &ata_bmdma_port_ops,
28960
28961 .cable_detect = ata_cable_40wire,
28962 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
28963 .sff_exec_command = pdc202xx_exec_command,
28964 };
28965
28966 -static struct ata_port_operations pdc2026x_port_ops = {
28967 +static const struct ata_port_operations pdc2026x_port_ops = {
28968 .inherits = &pdc2024x_port_ops,
28969
28970 .check_atapi_dma = pdc2026x_check_atapi_dma,
28971 diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
28972 index 3f6ebc6..a18c358 100644
28973 --- a/drivers/ata/pata_platform.c
28974 +++ b/drivers/ata/pata_platform.c
28975 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
28976 ATA_PIO_SHT(DRV_NAME),
28977 };
28978
28979 -static struct ata_port_operations pata_platform_port_ops = {
28980 +static const struct ata_port_operations pata_platform_port_ops = {
28981 .inherits = &ata_sff_port_ops,
28982 .sff_data_xfer = ata_sff_data_xfer_noirq,
28983 .cable_detect = ata_cable_unknown,
28984 diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
28985 index 45879dc..165a9f9 100644
28986 --- a/drivers/ata/pata_qdi.c
28987 +++ b/drivers/ata/pata_qdi.c
28988 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
28989 ATA_PIO_SHT(DRV_NAME),
28990 };
28991
28992 -static struct ata_port_operations qdi6500_port_ops = {
28993 +static const struct ata_port_operations qdi6500_port_ops = {
28994 .inherits = &ata_sff_port_ops,
28995 .qc_issue = qdi_qc_issue,
28996 .sff_data_xfer = qdi_data_xfer,
28997 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
28998 .set_piomode = qdi6500_set_piomode,
28999 };
29000
29001 -static struct ata_port_operations qdi6580_port_ops = {
29002 +static const struct ata_port_operations qdi6580_port_ops = {
29003 .inherits = &qdi6500_port_ops,
29004 .set_piomode = qdi6580_set_piomode,
29005 };
29006 diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
29007 index 4401b33..716c5cc 100644
29008 --- a/drivers/ata/pata_radisys.c
29009 +++ b/drivers/ata/pata_radisys.c
29010 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
29011 ATA_BMDMA_SHT(DRV_NAME),
29012 };
29013
29014 -static struct ata_port_operations radisys_pata_ops = {
29015 +static const struct ata_port_operations radisys_pata_ops = {
29016 .inherits = &ata_bmdma_port_ops,
29017 .qc_issue = radisys_qc_issue,
29018 .cable_detect = ata_cable_unknown,
29019 diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
29020 index 45f1e10..fab6bca 100644
29021 --- a/drivers/ata/pata_rb532_cf.c
29022 +++ b/drivers/ata/pata_rb532_cf.c
29023 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
29024 return IRQ_HANDLED;
29025 }
29026
29027 -static struct ata_port_operations rb532_pata_port_ops = {
29028 +static const struct ata_port_operations rb532_pata_port_ops = {
29029 .inherits = &ata_sff_port_ops,
29030 .sff_data_xfer = ata_sff_data_xfer32,
29031 };
29032 diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
29033 index c843a1e..b5853c3 100644
29034 --- a/drivers/ata/pata_rdc.c
29035 +++ b/drivers/ata/pata_rdc.c
29036 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
29037 pci_write_config_byte(dev, 0x48, udma_enable);
29038 }
29039
29040 -static struct ata_port_operations rdc_pata_ops = {
29041 +static const struct ata_port_operations rdc_pata_ops = {
29042 .inherits = &ata_bmdma32_port_ops,
29043 .cable_detect = rdc_pata_cable_detect,
29044 .set_piomode = rdc_set_piomode,
29045 diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
29046 index a5e4dfe..080c8c9 100644
29047 --- a/drivers/ata/pata_rz1000.c
29048 +++ b/drivers/ata/pata_rz1000.c
29049 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
29050 ATA_PIO_SHT(DRV_NAME),
29051 };
29052
29053 -static struct ata_port_operations rz1000_port_ops = {
29054 +static const struct ata_port_operations rz1000_port_ops = {
29055 .inherits = &ata_sff_port_ops,
29056 .cable_detect = ata_cable_40wire,
29057 .set_mode = rz1000_set_mode,
29058 diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
29059 index 3bbed83..e309daf 100644
29060 --- a/drivers/ata/pata_sc1200.c
29061 +++ b/drivers/ata/pata_sc1200.c
29062 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
29063 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29064 };
29065
29066 -static struct ata_port_operations sc1200_port_ops = {
29067 +static const struct ata_port_operations sc1200_port_ops = {
29068 .inherits = &ata_bmdma_port_ops,
29069 .qc_prep = ata_sff_dumb_qc_prep,
29070 .qc_issue = sc1200_qc_issue,
29071 diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
29072 index 4257d6b..4c1d9d5 100644
29073 --- a/drivers/ata/pata_scc.c
29074 +++ b/drivers/ata/pata_scc.c
29075 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
29076 ATA_BMDMA_SHT(DRV_NAME),
29077 };
29078
29079 -static struct ata_port_operations scc_pata_ops = {
29080 +static const struct ata_port_operations scc_pata_ops = {
29081 .inherits = &ata_bmdma_port_ops,
29082
29083 .set_piomode = scc_set_piomode,
29084 diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
29085 index 99cceb4..e2e0a87 100644
29086 --- a/drivers/ata/pata_sch.c
29087 +++ b/drivers/ata/pata_sch.c
29088 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
29089 ATA_BMDMA_SHT(DRV_NAME),
29090 };
29091
29092 -static struct ata_port_operations sch_pata_ops = {
29093 +static const struct ata_port_operations sch_pata_ops = {
29094 .inherits = &ata_bmdma_port_ops,
29095 .cable_detect = ata_cable_unknown,
29096 .set_piomode = sch_set_piomode,
29097 diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
29098 index beaed12..39969f1 100644
29099 --- a/drivers/ata/pata_serverworks.c
29100 +++ b/drivers/ata/pata_serverworks.c
29101 @@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
29102 ATA_BMDMA_SHT(DRV_NAME),
29103 };
29104
29105 -static struct ata_port_operations serverworks_osb4_port_ops = {
29106 +static const struct ata_port_operations serverworks_osb4_port_ops = {
29107 .inherits = &ata_bmdma_port_ops,
29108 .cable_detect = serverworks_cable_detect,
29109 .mode_filter = serverworks_osb4_filter,
29110 @@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
29111 .set_dmamode = serverworks_set_dmamode,
29112 };
29113
29114 -static struct ata_port_operations serverworks_csb_port_ops = {
29115 +static const struct ata_port_operations serverworks_csb_port_ops = {
29116 .inherits = &serverworks_osb4_port_ops,
29117 .mode_filter = serverworks_csb_filter,
29118 };
29119 diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
29120 index a2ace48..0463b44 100644
29121 --- a/drivers/ata/pata_sil680.c
29122 +++ b/drivers/ata/pata_sil680.c
29123 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
29124 ATA_BMDMA_SHT(DRV_NAME),
29125 };
29126
29127 -static struct ata_port_operations sil680_port_ops = {
29128 +static const struct ata_port_operations sil680_port_ops = {
29129 .inherits = &ata_bmdma32_port_ops,
29130 .cable_detect = sil680_cable_detect,
29131 .set_piomode = sil680_set_piomode,
29132 diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
29133 index 488e77b..b3724d5 100644
29134 --- a/drivers/ata/pata_sis.c
29135 +++ b/drivers/ata/pata_sis.c
29136 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
29137 ATA_BMDMA_SHT(DRV_NAME),
29138 };
29139
29140 -static struct ata_port_operations sis_133_for_sata_ops = {
29141 +static const struct ata_port_operations sis_133_for_sata_ops = {
29142 .inherits = &ata_bmdma_port_ops,
29143 .set_piomode = sis_133_set_piomode,
29144 .set_dmamode = sis_133_set_dmamode,
29145 .cable_detect = sis_133_cable_detect,
29146 };
29147
29148 -static struct ata_port_operations sis_base_ops = {
29149 +static const struct ata_port_operations sis_base_ops = {
29150 .inherits = &ata_bmdma_port_ops,
29151 .prereset = sis_pre_reset,
29152 };
29153
29154 -static struct ata_port_operations sis_133_ops = {
29155 +static const struct ata_port_operations sis_133_ops = {
29156 .inherits = &sis_base_ops,
29157 .set_piomode = sis_133_set_piomode,
29158 .set_dmamode = sis_133_set_dmamode,
29159 .cable_detect = sis_133_cable_detect,
29160 };
29161
29162 -static struct ata_port_operations sis_133_early_ops = {
29163 +static const struct ata_port_operations sis_133_early_ops = {
29164 .inherits = &sis_base_ops,
29165 .set_piomode = sis_100_set_piomode,
29166 .set_dmamode = sis_133_early_set_dmamode,
29167 .cable_detect = sis_66_cable_detect,
29168 };
29169
29170 -static struct ata_port_operations sis_100_ops = {
29171 +static const struct ata_port_operations sis_100_ops = {
29172 .inherits = &sis_base_ops,
29173 .set_piomode = sis_100_set_piomode,
29174 .set_dmamode = sis_100_set_dmamode,
29175 .cable_detect = sis_66_cable_detect,
29176 };
29177
29178 -static struct ata_port_operations sis_66_ops = {
29179 +static const struct ata_port_operations sis_66_ops = {
29180 .inherits = &sis_base_ops,
29181 .set_piomode = sis_old_set_piomode,
29182 .set_dmamode = sis_66_set_dmamode,
29183 .cable_detect = sis_66_cable_detect,
29184 };
29185
29186 -static struct ata_port_operations sis_old_ops = {
29187 +static const struct ata_port_operations sis_old_ops = {
29188 .inherits = &sis_base_ops,
29189 .set_piomode = sis_old_set_piomode,
29190 .set_dmamode = sis_old_set_dmamode,
29191 diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
29192 index 29f733c..43e9ca0 100644
29193 --- a/drivers/ata/pata_sl82c105.c
29194 +++ b/drivers/ata/pata_sl82c105.c
29195 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
29196 ATA_BMDMA_SHT(DRV_NAME),
29197 };
29198
29199 -static struct ata_port_operations sl82c105_port_ops = {
29200 +static const struct ata_port_operations sl82c105_port_ops = {
29201 .inherits = &ata_bmdma_port_ops,
29202 .qc_defer = sl82c105_qc_defer,
29203 .bmdma_start = sl82c105_bmdma_start,
29204 diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
29205 index f1f13ff..df39e99 100644
29206 --- a/drivers/ata/pata_triflex.c
29207 +++ b/drivers/ata/pata_triflex.c
29208 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
29209 ATA_BMDMA_SHT(DRV_NAME),
29210 };
29211
29212 -static struct ata_port_operations triflex_port_ops = {
29213 +static const struct ata_port_operations triflex_port_ops = {
29214 .inherits = &ata_bmdma_port_ops,
29215 .bmdma_start = triflex_bmdma_start,
29216 .bmdma_stop = triflex_bmdma_stop,
29217 diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
29218 index 1d73b8d..98a4b29 100644
29219 --- a/drivers/ata/pata_via.c
29220 +++ b/drivers/ata/pata_via.c
29221 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
29222 ATA_BMDMA_SHT(DRV_NAME),
29223 };
29224
29225 -static struct ata_port_operations via_port_ops = {
29226 +static const struct ata_port_operations via_port_ops = {
29227 .inherits = &ata_bmdma_port_ops,
29228 .cable_detect = via_cable_detect,
29229 .set_piomode = via_set_piomode,
29230 @@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
29231 .port_start = via_port_start,
29232 };
29233
29234 -static struct ata_port_operations via_port_ops_noirq = {
29235 +static const struct ata_port_operations via_port_ops_noirq = {
29236 .inherits = &via_port_ops,
29237 .sff_data_xfer = ata_sff_data_xfer_noirq,
29238 };
29239 diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
29240 index 6d8619b..ad511c4 100644
29241 --- a/drivers/ata/pata_winbond.c
29242 +++ b/drivers/ata/pata_winbond.c
29243 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
29244 ATA_PIO_SHT(DRV_NAME),
29245 };
29246
29247 -static struct ata_port_operations winbond_port_ops = {
29248 +static const struct ata_port_operations winbond_port_ops = {
29249 .inherits = &ata_sff_port_ops,
29250 .sff_data_xfer = winbond_data_xfer,
29251 .cable_detect = ata_cable_40wire,
29252 diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
29253 index 6c65b07..f996ec7 100644
29254 --- a/drivers/ata/pdc_adma.c
29255 +++ b/drivers/ata/pdc_adma.c
29256 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
29257 .dma_boundary = ADMA_DMA_BOUNDARY,
29258 };
29259
29260 -static struct ata_port_operations adma_ata_ops = {
29261 +static const struct ata_port_operations adma_ata_ops = {
29262 .inherits = &ata_sff_port_ops,
29263
29264 .lost_interrupt = ATA_OP_NULL,
29265 diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
29266 index 172b57e..c49bc1e 100644
29267 --- a/drivers/ata/sata_fsl.c
29268 +++ b/drivers/ata/sata_fsl.c
29269 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
29270 .dma_boundary = ATA_DMA_BOUNDARY,
29271 };
29272
29273 -static struct ata_port_operations sata_fsl_ops = {
29274 +static const struct ata_port_operations sata_fsl_ops = {
29275 .inherits = &sata_pmp_port_ops,
29276
29277 .qc_defer = ata_std_qc_defer,
29278 diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
29279 index 4406902..60603ef 100644
29280 --- a/drivers/ata/sata_inic162x.c
29281 +++ b/drivers/ata/sata_inic162x.c
29282 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
29283 return 0;
29284 }
29285
29286 -static struct ata_port_operations inic_port_ops = {
29287 +static const struct ata_port_operations inic_port_ops = {
29288 .inherits = &sata_port_ops,
29289
29290 .check_atapi_dma = inic_check_atapi_dma,
29291 diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
29292 index cf41126..8107be6 100644
29293 --- a/drivers/ata/sata_mv.c
29294 +++ b/drivers/ata/sata_mv.c
29295 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
29296 .dma_boundary = MV_DMA_BOUNDARY,
29297 };
29298
29299 -static struct ata_port_operations mv5_ops = {
29300 +static const struct ata_port_operations mv5_ops = {
29301 .inherits = &ata_sff_port_ops,
29302
29303 .lost_interrupt = ATA_OP_NULL,
29304 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
29305 .port_stop = mv_port_stop,
29306 };
29307
29308 -static struct ata_port_operations mv6_ops = {
29309 +static const struct ata_port_operations mv6_ops = {
29310 .inherits = &mv5_ops,
29311 .dev_config = mv6_dev_config,
29312 .scr_read = mv_scr_read,
29313 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
29314 .bmdma_status = mv_bmdma_status,
29315 };
29316
29317 -static struct ata_port_operations mv_iie_ops = {
29318 +static const struct ata_port_operations mv_iie_ops = {
29319 .inherits = &mv6_ops,
29320 .dev_config = ATA_OP_NULL,
29321 .qc_prep = mv_qc_prep_iie,
29322 diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
29323 index ae2297c..d5c9c33 100644
29324 --- a/drivers/ata/sata_nv.c
29325 +++ b/drivers/ata/sata_nv.c
29326 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
29327 * cases. Define nv_hardreset() which only kicks in for post-boot
29328 * probing and use it for all variants.
29329 */
29330 -static struct ata_port_operations nv_generic_ops = {
29331 +static const struct ata_port_operations nv_generic_ops = {
29332 .inherits = &ata_bmdma_port_ops,
29333 .lost_interrupt = ATA_OP_NULL,
29334 .scr_read = nv_scr_read,
29335 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
29336 .hardreset = nv_hardreset,
29337 };
29338
29339 -static struct ata_port_operations nv_nf2_ops = {
29340 +static const struct ata_port_operations nv_nf2_ops = {
29341 .inherits = &nv_generic_ops,
29342 .freeze = nv_nf2_freeze,
29343 .thaw = nv_nf2_thaw,
29344 };
29345
29346 -static struct ata_port_operations nv_ck804_ops = {
29347 +static const struct ata_port_operations nv_ck804_ops = {
29348 .inherits = &nv_generic_ops,
29349 .freeze = nv_ck804_freeze,
29350 .thaw = nv_ck804_thaw,
29351 .host_stop = nv_ck804_host_stop,
29352 };
29353
29354 -static struct ata_port_operations nv_adma_ops = {
29355 +static const struct ata_port_operations nv_adma_ops = {
29356 .inherits = &nv_ck804_ops,
29357
29358 .check_atapi_dma = nv_adma_check_atapi_dma,
29359 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
29360 .host_stop = nv_adma_host_stop,
29361 };
29362
29363 -static struct ata_port_operations nv_swncq_ops = {
29364 +static const struct ata_port_operations nv_swncq_ops = {
29365 .inherits = &nv_generic_ops,
29366
29367 .qc_defer = ata_std_qc_defer,
29368 diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
29369 index 07d8d00..6cc70bb 100644
29370 --- a/drivers/ata/sata_promise.c
29371 +++ b/drivers/ata/sata_promise.c
29372 @@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
29373 .error_handler = pdc_error_handler,
29374 };
29375
29376 -static struct ata_port_operations pdc_sata_ops = {
29377 +static const struct ata_port_operations pdc_sata_ops = {
29378 .inherits = &pdc_common_ops,
29379 .cable_detect = pdc_sata_cable_detect,
29380 .freeze = pdc_sata_freeze,
29381 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
29382
29383 /* First-generation chips need a more restrictive ->check_atapi_dma op,
29384 and ->freeze/thaw that ignore the hotplug controls. */
29385 -static struct ata_port_operations pdc_old_sata_ops = {
29386 +static const struct ata_port_operations pdc_old_sata_ops = {
29387 .inherits = &pdc_sata_ops,
29388 .freeze = pdc_freeze,
29389 .thaw = pdc_thaw,
29390 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
29391 };
29392
29393 -static struct ata_port_operations pdc_pata_ops = {
29394 +static const struct ata_port_operations pdc_pata_ops = {
29395 .inherits = &pdc_common_ops,
29396 .cable_detect = pdc_pata_cable_detect,
29397 .freeze = pdc_freeze,
29398 diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
29399 index 326c0cf..36ecebe 100644
29400 --- a/drivers/ata/sata_qstor.c
29401 +++ b/drivers/ata/sata_qstor.c
29402 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
29403 .dma_boundary = QS_DMA_BOUNDARY,
29404 };
29405
29406 -static struct ata_port_operations qs_ata_ops = {
29407 +static const struct ata_port_operations qs_ata_ops = {
29408 .inherits = &ata_sff_port_ops,
29409
29410 .check_atapi_dma = qs_check_atapi_dma,
29411 diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
29412 index 3cb69d5..0871d3c 100644
29413 --- a/drivers/ata/sata_sil.c
29414 +++ b/drivers/ata/sata_sil.c
29415 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
29416 .sg_tablesize = ATA_MAX_PRD
29417 };
29418
29419 -static struct ata_port_operations sil_ops = {
29420 +static const struct ata_port_operations sil_ops = {
29421 .inherits = &ata_bmdma32_port_ops,
29422 .dev_config = sil_dev_config,
29423 .set_mode = sil_set_mode,
29424 diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
29425 index e6946fc..eddb794 100644
29426 --- a/drivers/ata/sata_sil24.c
29427 +++ b/drivers/ata/sata_sil24.c
29428 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
29429 .dma_boundary = ATA_DMA_BOUNDARY,
29430 };
29431
29432 -static struct ata_port_operations sil24_ops = {
29433 +static const struct ata_port_operations sil24_ops = {
29434 .inherits = &sata_pmp_port_ops,
29435
29436 .qc_defer = sil24_qc_defer,
29437 diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
29438 index f8a91bf..9cb06b6 100644
29439 --- a/drivers/ata/sata_sis.c
29440 +++ b/drivers/ata/sata_sis.c
29441 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
29442 ATA_BMDMA_SHT(DRV_NAME),
29443 };
29444
29445 -static struct ata_port_operations sis_ops = {
29446 +static const struct ata_port_operations sis_ops = {
29447 .inherits = &ata_bmdma_port_ops,
29448 .scr_read = sis_scr_read,
29449 .scr_write = sis_scr_write,
29450 diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
29451 index 7257f2d..d04c6f5 100644
29452 --- a/drivers/ata/sata_svw.c
29453 +++ b/drivers/ata/sata_svw.c
29454 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
29455 };
29456
29457
29458 -static struct ata_port_operations k2_sata_ops = {
29459 +static const struct ata_port_operations k2_sata_ops = {
29460 .inherits = &ata_bmdma_port_ops,
29461 .sff_tf_load = k2_sata_tf_load,
29462 .sff_tf_read = k2_sata_tf_read,
29463 diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
29464 index bbcf970..cd0df0d 100644
29465 --- a/drivers/ata/sata_sx4.c
29466 +++ b/drivers/ata/sata_sx4.c
29467 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
29468 };
29469
29470 /* TODO: inherit from base port_ops after converting to new EH */
29471 -static struct ata_port_operations pdc_20621_ops = {
29472 +static const struct ata_port_operations pdc_20621_ops = {
29473 .inherits = &ata_sff_port_ops,
29474
29475 .check_atapi_dma = pdc_check_atapi_dma,
29476 diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
29477 index e5bff47..089d859 100644
29478 --- a/drivers/ata/sata_uli.c
29479 +++ b/drivers/ata/sata_uli.c
29480 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
29481 ATA_BMDMA_SHT(DRV_NAME),
29482 };
29483
29484 -static struct ata_port_operations uli_ops = {
29485 +static const struct ata_port_operations uli_ops = {
29486 .inherits = &ata_bmdma_port_ops,
29487 .scr_read = uli_scr_read,
29488 .scr_write = uli_scr_write,
29489 diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
29490 index f5dcca7..77b94eb 100644
29491 --- a/drivers/ata/sata_via.c
29492 +++ b/drivers/ata/sata_via.c
29493 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
29494 ATA_BMDMA_SHT(DRV_NAME),
29495 };
29496
29497 -static struct ata_port_operations svia_base_ops = {
29498 +static const struct ata_port_operations svia_base_ops = {
29499 .inherits = &ata_bmdma_port_ops,
29500 .sff_tf_load = svia_tf_load,
29501 };
29502
29503 -static struct ata_port_operations vt6420_sata_ops = {
29504 +static const struct ata_port_operations vt6420_sata_ops = {
29505 .inherits = &svia_base_ops,
29506 .freeze = svia_noop_freeze,
29507 .prereset = vt6420_prereset,
29508 .bmdma_start = vt6420_bmdma_start,
29509 };
29510
29511 -static struct ata_port_operations vt6421_pata_ops = {
29512 +static const struct ata_port_operations vt6421_pata_ops = {
29513 .inherits = &svia_base_ops,
29514 .cable_detect = vt6421_pata_cable_detect,
29515 .set_piomode = vt6421_set_pio_mode,
29516 .set_dmamode = vt6421_set_dma_mode,
29517 };
29518
29519 -static struct ata_port_operations vt6421_sata_ops = {
29520 +static const struct ata_port_operations vt6421_sata_ops = {
29521 .inherits = &svia_base_ops,
29522 .scr_read = svia_scr_read,
29523 .scr_write = svia_scr_write,
29524 };
29525
29526 -static struct ata_port_operations vt8251_ops = {
29527 +static const struct ata_port_operations vt8251_ops = {
29528 .inherits = &svia_base_ops,
29529 .hardreset = sata_std_hardreset,
29530 .scr_read = vt8251_scr_read,
29531 diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
29532 index 8b2a278..51e65d3 100644
29533 --- a/drivers/ata/sata_vsc.c
29534 +++ b/drivers/ata/sata_vsc.c
29535 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
29536 };
29537
29538
29539 -static struct ata_port_operations vsc_sata_ops = {
29540 +static const struct ata_port_operations vsc_sata_ops = {
29541 .inherits = &ata_bmdma_port_ops,
29542 /* The IRQ handling is not quite standard SFF behaviour so we
29543 cannot use the default lost interrupt handler */
29544 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
29545 index 5effec6..7e4019a 100644
29546 --- a/drivers/atm/adummy.c
29547 +++ b/drivers/atm/adummy.c
29548 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
29549 vcc->pop(vcc, skb);
29550 else
29551 dev_kfree_skb_any(skb);
29552 - atomic_inc(&vcc->stats->tx);
29553 + atomic_inc_unchecked(&vcc->stats->tx);
29554
29555 return 0;
29556 }
29557 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
29558 index 66e1813..26a27c6 100644
29559 --- a/drivers/atm/ambassador.c
29560 +++ b/drivers/atm/ambassador.c
29561 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
29562 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
29563
29564 // VC layer stats
29565 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29566 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29567
29568 // free the descriptor
29569 kfree (tx_descr);
29570 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29571 dump_skb ("<<<", vc, skb);
29572
29573 // VC layer stats
29574 - atomic_inc(&atm_vcc->stats->rx);
29575 + atomic_inc_unchecked(&atm_vcc->stats->rx);
29576 __net_timestamp(skb);
29577 // end of our responsability
29578 atm_vcc->push (atm_vcc, skb);
29579 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29580 } else {
29581 PRINTK (KERN_INFO, "dropped over-size frame");
29582 // should we count this?
29583 - atomic_inc(&atm_vcc->stats->rx_drop);
29584 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29585 }
29586
29587 } else {
29588 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
29589 }
29590
29591 if (check_area (skb->data, skb->len)) {
29592 - atomic_inc(&atm_vcc->stats->tx_err);
29593 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
29594 return -ENOMEM; // ?
29595 }
29596
29597 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
29598 index 02ad83d..6daffeb 100644
29599 --- a/drivers/atm/atmtcp.c
29600 +++ b/drivers/atm/atmtcp.c
29601 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29602 if (vcc->pop) vcc->pop(vcc,skb);
29603 else dev_kfree_skb(skb);
29604 if (dev_data) return 0;
29605 - atomic_inc(&vcc->stats->tx_err);
29606 + atomic_inc_unchecked(&vcc->stats->tx_err);
29607 return -ENOLINK;
29608 }
29609 size = skb->len+sizeof(struct atmtcp_hdr);
29610 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29611 if (!new_skb) {
29612 if (vcc->pop) vcc->pop(vcc,skb);
29613 else dev_kfree_skb(skb);
29614 - atomic_inc(&vcc->stats->tx_err);
29615 + atomic_inc_unchecked(&vcc->stats->tx_err);
29616 return -ENOBUFS;
29617 }
29618 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
29619 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29620 if (vcc->pop) vcc->pop(vcc,skb);
29621 else dev_kfree_skb(skb);
29622 out_vcc->push(out_vcc,new_skb);
29623 - atomic_inc(&vcc->stats->tx);
29624 - atomic_inc(&out_vcc->stats->rx);
29625 + atomic_inc_unchecked(&vcc->stats->tx);
29626 + atomic_inc_unchecked(&out_vcc->stats->rx);
29627 return 0;
29628 }
29629
29630 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29631 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
29632 read_unlock(&vcc_sklist_lock);
29633 if (!out_vcc) {
29634 - atomic_inc(&vcc->stats->tx_err);
29635 + atomic_inc_unchecked(&vcc->stats->tx_err);
29636 goto done;
29637 }
29638 skb_pull(skb,sizeof(struct atmtcp_hdr));
29639 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29640 __net_timestamp(new_skb);
29641 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
29642 out_vcc->push(out_vcc,new_skb);
29643 - atomic_inc(&vcc->stats->tx);
29644 - atomic_inc(&out_vcc->stats->rx);
29645 + atomic_inc_unchecked(&vcc->stats->tx);
29646 + atomic_inc_unchecked(&out_vcc->stats->rx);
29647 done:
29648 if (vcc->pop) vcc->pop(vcc,skb);
29649 else dev_kfree_skb(skb);
29650 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
29651 index 0c30261..3da356e 100644
29652 --- a/drivers/atm/eni.c
29653 +++ b/drivers/atm/eni.c
29654 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
29655 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
29656 vcc->dev->number);
29657 length = 0;
29658 - atomic_inc(&vcc->stats->rx_err);
29659 + atomic_inc_unchecked(&vcc->stats->rx_err);
29660 }
29661 else {
29662 length = ATM_CELL_SIZE-1; /* no HEC */
29663 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29664 size);
29665 }
29666 eff = length = 0;
29667 - atomic_inc(&vcc->stats->rx_err);
29668 + atomic_inc_unchecked(&vcc->stats->rx_err);
29669 }
29670 else {
29671 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
29672 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29673 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
29674 vcc->dev->number,vcc->vci,length,size << 2,descr);
29675 length = eff = 0;
29676 - atomic_inc(&vcc->stats->rx_err);
29677 + atomic_inc_unchecked(&vcc->stats->rx_err);
29678 }
29679 }
29680 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
29681 @@ -770,7 +770,7 @@ rx_dequeued++;
29682 vcc->push(vcc,skb);
29683 pushed++;
29684 }
29685 - atomic_inc(&vcc->stats->rx);
29686 + atomic_inc_unchecked(&vcc->stats->rx);
29687 }
29688 wake_up(&eni_dev->rx_wait);
29689 }
29690 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
29691 PCI_DMA_TODEVICE);
29692 if (vcc->pop) vcc->pop(vcc,skb);
29693 else dev_kfree_skb_irq(skb);
29694 - atomic_inc(&vcc->stats->tx);
29695 + atomic_inc_unchecked(&vcc->stats->tx);
29696 wake_up(&eni_dev->tx_wait);
29697 dma_complete++;
29698 }
29699 @@ -1570,7 +1570,7 @@ tx_complete++;
29700 /*--------------------------------- entries ---------------------------------*/
29701
29702
29703 -static const char *media_name[] __devinitdata = {
29704 +static const char *media_name[] __devinitconst = {
29705 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
29706 "UTP", "05?", "06?", "07?", /* 4- 7 */
29707 "TAXI","09?", "10?", "11?", /* 8-11 */
29708 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
29709 index cd5049a..a51209f 100644
29710 --- a/drivers/atm/firestream.c
29711 +++ b/drivers/atm/firestream.c
29712 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
29713 }
29714 }
29715
29716 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29717 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29718
29719 fs_dprintk (FS_DEBUG_TXMEM, "i");
29720 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
29721 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29722 #endif
29723 skb_put (skb, qe->p1 & 0xffff);
29724 ATM_SKB(skb)->vcc = atm_vcc;
29725 - atomic_inc(&atm_vcc->stats->rx);
29726 + atomic_inc_unchecked(&atm_vcc->stats->rx);
29727 __net_timestamp(skb);
29728 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
29729 atm_vcc->push (atm_vcc, skb);
29730 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29731 kfree (pe);
29732 }
29733 if (atm_vcc)
29734 - atomic_inc(&atm_vcc->stats->rx_drop);
29735 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29736 break;
29737 case 0x1f: /* Reassembly abort: no buffers. */
29738 /* Silently increment error counter. */
29739 if (atm_vcc)
29740 - atomic_inc(&atm_vcc->stats->rx_drop);
29741 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29742 break;
29743 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
29744 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
29745 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
29746 index f766cc4..a34002e 100644
29747 --- a/drivers/atm/fore200e.c
29748 +++ b/drivers/atm/fore200e.c
29749 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
29750 #endif
29751 /* check error condition */
29752 if (*entry->status & STATUS_ERROR)
29753 - atomic_inc(&vcc->stats->tx_err);
29754 + atomic_inc_unchecked(&vcc->stats->tx_err);
29755 else
29756 - atomic_inc(&vcc->stats->tx);
29757 + atomic_inc_unchecked(&vcc->stats->tx);
29758 }
29759 }
29760
29761 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29762 if (skb == NULL) {
29763 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
29764
29765 - atomic_inc(&vcc->stats->rx_drop);
29766 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29767 return -ENOMEM;
29768 }
29769
29770 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29771
29772 dev_kfree_skb_any(skb);
29773
29774 - atomic_inc(&vcc->stats->rx_drop);
29775 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29776 return -ENOMEM;
29777 }
29778
29779 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29780
29781 vcc->push(vcc, skb);
29782 - atomic_inc(&vcc->stats->rx);
29783 + atomic_inc_unchecked(&vcc->stats->rx);
29784
29785 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29786
29787 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
29788 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
29789 fore200e->atm_dev->number,
29790 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
29791 - atomic_inc(&vcc->stats->rx_err);
29792 + atomic_inc_unchecked(&vcc->stats->rx_err);
29793 }
29794 }
29795
29796 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
29797 goto retry_here;
29798 }
29799
29800 - atomic_inc(&vcc->stats->tx_err);
29801 + atomic_inc_unchecked(&vcc->stats->tx_err);
29802
29803 fore200e->tx_sat++;
29804 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
29805 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
29806 index 7066703..2b130de 100644
29807 --- a/drivers/atm/he.c
29808 +++ b/drivers/atm/he.c
29809 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29810
29811 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
29812 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
29813 - atomic_inc(&vcc->stats->rx_drop);
29814 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29815 goto return_host_buffers;
29816 }
29817
29818 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29819 RBRQ_LEN_ERR(he_dev->rbrq_head)
29820 ? "LEN_ERR" : "",
29821 vcc->vpi, vcc->vci);
29822 - atomic_inc(&vcc->stats->rx_err);
29823 + atomic_inc_unchecked(&vcc->stats->rx_err);
29824 goto return_host_buffers;
29825 }
29826
29827 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29828 vcc->push(vcc, skb);
29829 spin_lock(&he_dev->global_lock);
29830
29831 - atomic_inc(&vcc->stats->rx);
29832 + atomic_inc_unchecked(&vcc->stats->rx);
29833
29834 return_host_buffers:
29835 ++pdus_assembled;
29836 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
29837 tpd->vcc->pop(tpd->vcc, tpd->skb);
29838 else
29839 dev_kfree_skb_any(tpd->skb);
29840 - atomic_inc(&tpd->vcc->stats->tx_err);
29841 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
29842 }
29843 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
29844 return;
29845 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29846 vcc->pop(vcc, skb);
29847 else
29848 dev_kfree_skb_any(skb);
29849 - atomic_inc(&vcc->stats->tx_err);
29850 + atomic_inc_unchecked(&vcc->stats->tx_err);
29851 return -EINVAL;
29852 }
29853
29854 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29855 vcc->pop(vcc, skb);
29856 else
29857 dev_kfree_skb_any(skb);
29858 - atomic_inc(&vcc->stats->tx_err);
29859 + atomic_inc_unchecked(&vcc->stats->tx_err);
29860 return -EINVAL;
29861 }
29862 #endif
29863 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29864 vcc->pop(vcc, skb);
29865 else
29866 dev_kfree_skb_any(skb);
29867 - atomic_inc(&vcc->stats->tx_err);
29868 + atomic_inc_unchecked(&vcc->stats->tx_err);
29869 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29870 return -ENOMEM;
29871 }
29872 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29873 vcc->pop(vcc, skb);
29874 else
29875 dev_kfree_skb_any(skb);
29876 - atomic_inc(&vcc->stats->tx_err);
29877 + atomic_inc_unchecked(&vcc->stats->tx_err);
29878 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29879 return -ENOMEM;
29880 }
29881 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29882 __enqueue_tpd(he_dev, tpd, cid);
29883 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29884
29885 - atomic_inc(&vcc->stats->tx);
29886 + atomic_inc_unchecked(&vcc->stats->tx);
29887
29888 return 0;
29889 }
29890 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
29891 index 4e49021..01b1512 100644
29892 --- a/drivers/atm/horizon.c
29893 +++ b/drivers/atm/horizon.c
29894 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
29895 {
29896 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
29897 // VC layer stats
29898 - atomic_inc(&vcc->stats->rx);
29899 + atomic_inc_unchecked(&vcc->stats->rx);
29900 __net_timestamp(skb);
29901 // end of our responsability
29902 vcc->push (vcc, skb);
29903 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
29904 dev->tx_iovec = NULL;
29905
29906 // VC layer stats
29907 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29908 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29909
29910 // free the skb
29911 hrz_kfree_skb (skb);
29912 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
29913 index e33ae00..9deb4ab 100644
29914 --- a/drivers/atm/idt77252.c
29915 +++ b/drivers/atm/idt77252.c
29916 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
29917 else
29918 dev_kfree_skb(skb);
29919
29920 - atomic_inc(&vcc->stats->tx);
29921 + atomic_inc_unchecked(&vcc->stats->tx);
29922 }
29923
29924 atomic_dec(&scq->used);
29925 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29926 if ((sb = dev_alloc_skb(64)) == NULL) {
29927 printk("%s: Can't allocate buffers for aal0.\n",
29928 card->name);
29929 - atomic_add(i, &vcc->stats->rx_drop);
29930 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
29931 break;
29932 }
29933 if (!atm_charge(vcc, sb->truesize)) {
29934 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
29935 card->name);
29936 - atomic_add(i - 1, &vcc->stats->rx_drop);
29937 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
29938 dev_kfree_skb(sb);
29939 break;
29940 }
29941 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29942 ATM_SKB(sb)->vcc = vcc;
29943 __net_timestamp(sb);
29944 vcc->push(vcc, sb);
29945 - atomic_inc(&vcc->stats->rx);
29946 + atomic_inc_unchecked(&vcc->stats->rx);
29947
29948 cell += ATM_CELL_PAYLOAD;
29949 }
29950 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29951 "(CDC: %08x)\n",
29952 card->name, len, rpp->len, readl(SAR_REG_CDC));
29953 recycle_rx_pool_skb(card, rpp);
29954 - atomic_inc(&vcc->stats->rx_err);
29955 + atomic_inc_unchecked(&vcc->stats->rx_err);
29956 return;
29957 }
29958 if (stat & SAR_RSQE_CRC) {
29959 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
29960 recycle_rx_pool_skb(card, rpp);
29961 - atomic_inc(&vcc->stats->rx_err);
29962 + atomic_inc_unchecked(&vcc->stats->rx_err);
29963 return;
29964 }
29965 if (skb_queue_len(&rpp->queue) > 1) {
29966 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29967 RXPRINTK("%s: Can't alloc RX skb.\n",
29968 card->name);
29969 recycle_rx_pool_skb(card, rpp);
29970 - atomic_inc(&vcc->stats->rx_err);
29971 + atomic_inc_unchecked(&vcc->stats->rx_err);
29972 return;
29973 }
29974 if (!atm_charge(vcc, skb->truesize)) {
29975 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29976 __net_timestamp(skb);
29977
29978 vcc->push(vcc, skb);
29979 - atomic_inc(&vcc->stats->rx);
29980 + atomic_inc_unchecked(&vcc->stats->rx);
29981
29982 return;
29983 }
29984 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29985 __net_timestamp(skb);
29986
29987 vcc->push(vcc, skb);
29988 - atomic_inc(&vcc->stats->rx);
29989 + atomic_inc_unchecked(&vcc->stats->rx);
29990
29991 if (skb->truesize > SAR_FB_SIZE_3)
29992 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
29993 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
29994 if (vcc->qos.aal != ATM_AAL0) {
29995 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
29996 card->name, vpi, vci);
29997 - atomic_inc(&vcc->stats->rx_drop);
29998 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29999 goto drop;
30000 }
30001
30002 if ((sb = dev_alloc_skb(64)) == NULL) {
30003 printk("%s: Can't allocate buffers for AAL0.\n",
30004 card->name);
30005 - atomic_inc(&vcc->stats->rx_err);
30006 + atomic_inc_unchecked(&vcc->stats->rx_err);
30007 goto drop;
30008 }
30009
30010 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
30011 ATM_SKB(sb)->vcc = vcc;
30012 __net_timestamp(sb);
30013 vcc->push(vcc, sb);
30014 - atomic_inc(&vcc->stats->rx);
30015 + atomic_inc_unchecked(&vcc->stats->rx);
30016
30017 drop:
30018 skb_pull(queue, 64);
30019 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30020
30021 if (vc == NULL) {
30022 printk("%s: NULL connection in send().\n", card->name);
30023 - atomic_inc(&vcc->stats->tx_err);
30024 + atomic_inc_unchecked(&vcc->stats->tx_err);
30025 dev_kfree_skb(skb);
30026 return -EINVAL;
30027 }
30028 if (!test_bit(VCF_TX, &vc->flags)) {
30029 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
30030 - atomic_inc(&vcc->stats->tx_err);
30031 + atomic_inc_unchecked(&vcc->stats->tx_err);
30032 dev_kfree_skb(skb);
30033 return -EINVAL;
30034 }
30035 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30036 break;
30037 default:
30038 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
30039 - atomic_inc(&vcc->stats->tx_err);
30040 + atomic_inc_unchecked(&vcc->stats->tx_err);
30041 dev_kfree_skb(skb);
30042 return -EINVAL;
30043 }
30044
30045 if (skb_shinfo(skb)->nr_frags != 0) {
30046 printk("%s: No scatter-gather yet.\n", card->name);
30047 - atomic_inc(&vcc->stats->tx_err);
30048 + atomic_inc_unchecked(&vcc->stats->tx_err);
30049 dev_kfree_skb(skb);
30050 return -EINVAL;
30051 }
30052 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30053
30054 err = queue_skb(card, vc, skb, oam);
30055 if (err) {
30056 - atomic_inc(&vcc->stats->tx_err);
30057 + atomic_inc_unchecked(&vcc->stats->tx_err);
30058 dev_kfree_skb(skb);
30059 return err;
30060 }
30061 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
30062 skb = dev_alloc_skb(64);
30063 if (!skb) {
30064 printk("%s: Out of memory in send_oam().\n", card->name);
30065 - atomic_inc(&vcc->stats->tx_err);
30066 + atomic_inc_unchecked(&vcc->stats->tx_err);
30067 return -ENOMEM;
30068 }
30069 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
30070 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
30071 index b2c1b37..faa672b 100644
30072 --- a/drivers/atm/iphase.c
30073 +++ b/drivers/atm/iphase.c
30074 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
30075 status = (u_short) (buf_desc_ptr->desc_mode);
30076 if (status & (RX_CER | RX_PTE | RX_OFL))
30077 {
30078 - atomic_inc(&vcc->stats->rx_err);
30079 + atomic_inc_unchecked(&vcc->stats->rx_err);
30080 IF_ERR(printk("IA: bad packet, dropping it");)
30081 if (status & RX_CER) {
30082 IF_ERR(printk(" cause: packet CRC error\n");)
30083 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
30084 len = dma_addr - buf_addr;
30085 if (len > iadev->rx_buf_sz) {
30086 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
30087 - atomic_inc(&vcc->stats->rx_err);
30088 + atomic_inc_unchecked(&vcc->stats->rx_err);
30089 goto out_free_desc;
30090 }
30091
30092 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30093 ia_vcc = INPH_IA_VCC(vcc);
30094 if (ia_vcc == NULL)
30095 {
30096 - atomic_inc(&vcc->stats->rx_err);
30097 + atomic_inc_unchecked(&vcc->stats->rx_err);
30098 dev_kfree_skb_any(skb);
30099 atm_return(vcc, atm_guess_pdu2truesize(len));
30100 goto INCR_DLE;
30101 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30102 if ((length > iadev->rx_buf_sz) || (length >
30103 (skb->len - sizeof(struct cpcs_trailer))))
30104 {
30105 - atomic_inc(&vcc->stats->rx_err);
30106 + atomic_inc_unchecked(&vcc->stats->rx_err);
30107 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
30108 length, skb->len);)
30109 dev_kfree_skb_any(skb);
30110 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30111
30112 IF_RX(printk("rx_dle_intr: skb push");)
30113 vcc->push(vcc,skb);
30114 - atomic_inc(&vcc->stats->rx);
30115 + atomic_inc_unchecked(&vcc->stats->rx);
30116 iadev->rx_pkt_cnt++;
30117 }
30118 INCR_DLE:
30119 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
30120 {
30121 struct k_sonet_stats *stats;
30122 stats = &PRIV(_ia_dev[board])->sonet_stats;
30123 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
30124 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
30125 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
30126 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
30127 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
30128 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
30129 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
30130 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
30131 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
30132 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
30133 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
30134 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
30135 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
30136 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
30137 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
30138 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
30139 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
30140 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
30141 }
30142 ia_cmds.status = 0;
30143 break;
30144 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30145 if ((desc == 0) || (desc > iadev->num_tx_desc))
30146 {
30147 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
30148 - atomic_inc(&vcc->stats->tx);
30149 + atomic_inc_unchecked(&vcc->stats->tx);
30150 if (vcc->pop)
30151 vcc->pop(vcc, skb);
30152 else
30153 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30154 ATM_DESC(skb) = vcc->vci;
30155 skb_queue_tail(&iadev->tx_dma_q, skb);
30156
30157 - atomic_inc(&vcc->stats->tx);
30158 + atomic_inc_unchecked(&vcc->stats->tx);
30159 iadev->tx_pkt_cnt++;
30160 /* Increment transaction counter */
30161 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
30162
30163 #if 0
30164 /* add flow control logic */
30165 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
30166 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
30167 if (iavcc->vc_desc_cnt > 10) {
30168 vcc->tx_quota = vcc->tx_quota * 3 / 4;
30169 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
30170 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
30171 index cf97c34..8d30655 100644
30172 --- a/drivers/atm/lanai.c
30173 +++ b/drivers/atm/lanai.c
30174 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
30175 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
30176 lanai_endtx(lanai, lvcc);
30177 lanai_free_skb(lvcc->tx.atmvcc, skb);
30178 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
30179 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
30180 }
30181
30182 /* Try to fill the buffer - don't call unless there is backlog */
30183 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
30184 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
30185 __net_timestamp(skb);
30186 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
30187 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
30188 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
30189 out:
30190 lvcc->rx.buf.ptr = end;
30191 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
30192 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30193 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
30194 "vcc %d\n", lanai->number, (unsigned int) s, vci);
30195 lanai->stats.service_rxnotaal5++;
30196 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30197 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30198 return 0;
30199 }
30200 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
30201 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30202 int bytes;
30203 read_unlock(&vcc_sklist_lock);
30204 DPRINTK("got trashed rx pdu on vci %d\n", vci);
30205 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30206 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30207 lvcc->stats.x.aal5.service_trash++;
30208 bytes = (SERVICE_GET_END(s) * 16) -
30209 (((unsigned long) lvcc->rx.buf.ptr) -
30210 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30211 }
30212 if (s & SERVICE_STREAM) {
30213 read_unlock(&vcc_sklist_lock);
30214 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30215 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30216 lvcc->stats.x.aal5.service_stream++;
30217 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
30218 "PDU on VCI %d!\n", lanai->number, vci);
30219 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30220 return 0;
30221 }
30222 DPRINTK("got rx crc error on vci %d\n", vci);
30223 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30224 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30225 lvcc->stats.x.aal5.service_rxcrc++;
30226 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
30227 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
30228 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
30229 index 3da804b..d3b0eed 100644
30230 --- a/drivers/atm/nicstar.c
30231 +++ b/drivers/atm/nicstar.c
30232 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30233 if ((vc = (vc_map *) vcc->dev_data) == NULL)
30234 {
30235 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
30236 - atomic_inc(&vcc->stats->tx_err);
30237 + atomic_inc_unchecked(&vcc->stats->tx_err);
30238 dev_kfree_skb_any(skb);
30239 return -EINVAL;
30240 }
30241 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30242 if (!vc->tx)
30243 {
30244 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
30245 - atomic_inc(&vcc->stats->tx_err);
30246 + atomic_inc_unchecked(&vcc->stats->tx_err);
30247 dev_kfree_skb_any(skb);
30248 return -EINVAL;
30249 }
30250 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30251 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
30252 {
30253 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
30254 - atomic_inc(&vcc->stats->tx_err);
30255 + atomic_inc_unchecked(&vcc->stats->tx_err);
30256 dev_kfree_skb_any(skb);
30257 return -EINVAL;
30258 }
30259 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30260 if (skb_shinfo(skb)->nr_frags != 0)
30261 {
30262 printk("nicstar%d: No scatter-gather yet.\n", card->index);
30263 - atomic_inc(&vcc->stats->tx_err);
30264 + atomic_inc_unchecked(&vcc->stats->tx_err);
30265 dev_kfree_skb_any(skb);
30266 return -EINVAL;
30267 }
30268 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30269
30270 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
30271 {
30272 - atomic_inc(&vcc->stats->tx_err);
30273 + atomic_inc_unchecked(&vcc->stats->tx_err);
30274 dev_kfree_skb_any(skb);
30275 return -EIO;
30276 }
30277 - atomic_inc(&vcc->stats->tx);
30278 + atomic_inc_unchecked(&vcc->stats->tx);
30279
30280 return 0;
30281 }
30282 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30283 {
30284 printk("nicstar%d: Can't allocate buffers for aal0.\n",
30285 card->index);
30286 - atomic_add(i,&vcc->stats->rx_drop);
30287 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
30288 break;
30289 }
30290 if (!atm_charge(vcc, sb->truesize))
30291 {
30292 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
30293 card->index);
30294 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30295 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30296 dev_kfree_skb_any(sb);
30297 break;
30298 }
30299 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30300 ATM_SKB(sb)->vcc = vcc;
30301 __net_timestamp(sb);
30302 vcc->push(vcc, sb);
30303 - atomic_inc(&vcc->stats->rx);
30304 + atomic_inc_unchecked(&vcc->stats->rx);
30305 cell += ATM_CELL_PAYLOAD;
30306 }
30307
30308 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30309 if (iovb == NULL)
30310 {
30311 printk("nicstar%d: Out of iovec buffers.\n", card->index);
30312 - atomic_inc(&vcc->stats->rx_drop);
30313 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30314 recycle_rx_buf(card, skb);
30315 return;
30316 }
30317 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30318 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
30319 {
30320 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
30321 - atomic_inc(&vcc->stats->rx_err);
30322 + atomic_inc_unchecked(&vcc->stats->rx_err);
30323 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
30324 NS_SKB(iovb)->iovcnt = 0;
30325 iovb->len = 0;
30326 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30327 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
30328 card->index);
30329 which_list(card, skb);
30330 - atomic_inc(&vcc->stats->rx_err);
30331 + atomic_inc_unchecked(&vcc->stats->rx_err);
30332 recycle_rx_buf(card, skb);
30333 vc->rx_iov = NULL;
30334 recycle_iov_buf(card, iovb);
30335 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30336 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
30337 card->index);
30338 which_list(card, skb);
30339 - atomic_inc(&vcc->stats->rx_err);
30340 + atomic_inc_unchecked(&vcc->stats->rx_err);
30341 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30342 NS_SKB(iovb)->iovcnt);
30343 vc->rx_iov = NULL;
30344 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30345 printk(" - PDU size mismatch.\n");
30346 else
30347 printk(".\n");
30348 - atomic_inc(&vcc->stats->rx_err);
30349 + atomic_inc_unchecked(&vcc->stats->rx_err);
30350 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30351 NS_SKB(iovb)->iovcnt);
30352 vc->rx_iov = NULL;
30353 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30354 if (!atm_charge(vcc, skb->truesize))
30355 {
30356 push_rxbufs(card, skb);
30357 - atomic_inc(&vcc->stats->rx_drop);
30358 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30359 }
30360 else
30361 {
30362 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30363 ATM_SKB(skb)->vcc = vcc;
30364 __net_timestamp(skb);
30365 vcc->push(vcc, skb);
30366 - atomic_inc(&vcc->stats->rx);
30367 + atomic_inc_unchecked(&vcc->stats->rx);
30368 }
30369 }
30370 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
30371 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30372 if (!atm_charge(vcc, sb->truesize))
30373 {
30374 push_rxbufs(card, sb);
30375 - atomic_inc(&vcc->stats->rx_drop);
30376 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30377 }
30378 else
30379 {
30380 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30381 ATM_SKB(sb)->vcc = vcc;
30382 __net_timestamp(sb);
30383 vcc->push(vcc, sb);
30384 - atomic_inc(&vcc->stats->rx);
30385 + atomic_inc_unchecked(&vcc->stats->rx);
30386 }
30387
30388 push_rxbufs(card, skb);
30389 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30390 if (!atm_charge(vcc, skb->truesize))
30391 {
30392 push_rxbufs(card, skb);
30393 - atomic_inc(&vcc->stats->rx_drop);
30394 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30395 }
30396 else
30397 {
30398 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30399 ATM_SKB(skb)->vcc = vcc;
30400 __net_timestamp(skb);
30401 vcc->push(vcc, skb);
30402 - atomic_inc(&vcc->stats->rx);
30403 + atomic_inc_unchecked(&vcc->stats->rx);
30404 }
30405
30406 push_rxbufs(card, sb);
30407 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30408 if (hb == NULL)
30409 {
30410 printk("nicstar%d: Out of huge buffers.\n", card->index);
30411 - atomic_inc(&vcc->stats->rx_drop);
30412 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30413 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30414 NS_SKB(iovb)->iovcnt);
30415 vc->rx_iov = NULL;
30416 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30417 }
30418 else
30419 dev_kfree_skb_any(hb);
30420 - atomic_inc(&vcc->stats->rx_drop);
30421 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30422 }
30423 else
30424 {
30425 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30426 #endif /* NS_USE_DESTRUCTORS */
30427 __net_timestamp(hb);
30428 vcc->push(vcc, hb);
30429 - atomic_inc(&vcc->stats->rx);
30430 + atomic_inc_unchecked(&vcc->stats->rx);
30431 }
30432 }
30433
30434 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
30435 index 84c93ff..e6ed269 100644
30436 --- a/drivers/atm/solos-pci.c
30437 +++ b/drivers/atm/solos-pci.c
30438 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
30439 }
30440 atm_charge(vcc, skb->truesize);
30441 vcc->push(vcc, skb);
30442 - atomic_inc(&vcc->stats->rx);
30443 + atomic_inc_unchecked(&vcc->stats->rx);
30444 break;
30445
30446 case PKT_STATUS:
30447 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
30448 char msg[500];
30449 char item[10];
30450
30451 + pax_track_stack();
30452 +
30453 len = buf->len;
30454 for (i = 0; i < len; i++){
30455 if(i % 8 == 0)
30456 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
30457 vcc = SKB_CB(oldskb)->vcc;
30458
30459 if (vcc) {
30460 - atomic_inc(&vcc->stats->tx);
30461 + atomic_inc_unchecked(&vcc->stats->tx);
30462 solos_pop(vcc, oldskb);
30463 } else
30464 dev_kfree_skb_irq(oldskb);
30465 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
30466 index 6dd3f59..ee377f3 100644
30467 --- a/drivers/atm/suni.c
30468 +++ b/drivers/atm/suni.c
30469 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30470
30471
30472 #define ADD_LIMITED(s,v) \
30473 - atomic_add((v),&stats->s); \
30474 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30475 + atomic_add_unchecked((v),&stats->s); \
30476 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30477
30478
30479 static void suni_hz(unsigned long from_timer)
30480 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30481 index fc8cb07..4a80e53 100644
30482 --- a/drivers/atm/uPD98402.c
30483 +++ b/drivers/atm/uPD98402.c
30484 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
30485 struct sonet_stats tmp;
30486 int error = 0;
30487
30488 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30489 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30490 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
30491 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
30492 if (zero && !error) {
30493 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
30494
30495
30496 #define ADD_LIMITED(s,v) \
30497 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
30498 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
30499 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30500 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
30501 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
30502 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30503
30504
30505 static void stat_event(struct atm_dev *dev)
30506 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
30507 if (reason & uPD98402_INT_PFM) stat_event(dev);
30508 if (reason & uPD98402_INT_PCO) {
30509 (void) GET(PCOCR); /* clear interrupt cause */
30510 - atomic_add(GET(HECCT),
30511 + atomic_add_unchecked(GET(HECCT),
30512 &PRIV(dev)->sonet_stats.uncorr_hcs);
30513 }
30514 if ((reason & uPD98402_INT_RFO) &&
30515 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
30516 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
30517 uPD98402_INT_LOS),PIMR); /* enable them */
30518 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
30519 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30520 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
30521 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
30522 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30523 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
30524 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
30525 return 0;
30526 }
30527
30528 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
30529 index 2e9635b..32927b4 100644
30530 --- a/drivers/atm/zatm.c
30531 +++ b/drivers/atm/zatm.c
30532 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30533 }
30534 if (!size) {
30535 dev_kfree_skb_irq(skb);
30536 - if (vcc) atomic_inc(&vcc->stats->rx_err);
30537 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
30538 continue;
30539 }
30540 if (!atm_charge(vcc,skb->truesize)) {
30541 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30542 skb->len = size;
30543 ATM_SKB(skb)->vcc = vcc;
30544 vcc->push(vcc,skb);
30545 - atomic_inc(&vcc->stats->rx);
30546 + atomic_inc_unchecked(&vcc->stats->rx);
30547 }
30548 zout(pos & 0xffff,MTA(mbx));
30549 #if 0 /* probably a stupid idea */
30550 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
30551 skb_queue_head(&zatm_vcc->backlog,skb);
30552 break;
30553 }
30554 - atomic_inc(&vcc->stats->tx);
30555 + atomic_inc_unchecked(&vcc->stats->tx);
30556 wake_up(&zatm_vcc->tx_wait);
30557 }
30558
30559 diff --git a/drivers/base/bus.c b/drivers/base/bus.c
30560 index 63c143e..fece183 100644
30561 --- a/drivers/base/bus.c
30562 +++ b/drivers/base/bus.c
30563 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
30564 return ret;
30565 }
30566
30567 -static struct sysfs_ops driver_sysfs_ops = {
30568 +static const struct sysfs_ops driver_sysfs_ops = {
30569 .show = drv_attr_show,
30570 .store = drv_attr_store,
30571 };
30572 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
30573 return ret;
30574 }
30575
30576 -static struct sysfs_ops bus_sysfs_ops = {
30577 +static const struct sysfs_ops bus_sysfs_ops = {
30578 .show = bus_attr_show,
30579 .store = bus_attr_store,
30580 };
30581 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
30582 return 0;
30583 }
30584
30585 -static struct kset_uevent_ops bus_uevent_ops = {
30586 +static const struct kset_uevent_ops bus_uevent_ops = {
30587 .filter = bus_uevent_filter,
30588 };
30589
30590 diff --git a/drivers/base/class.c b/drivers/base/class.c
30591 index 6e2c3b0..cb61871 100644
30592 --- a/drivers/base/class.c
30593 +++ b/drivers/base/class.c
30594 @@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
30595 kfree(cp);
30596 }
30597
30598 -static struct sysfs_ops class_sysfs_ops = {
30599 +static const struct sysfs_ops class_sysfs_ops = {
30600 .show = class_attr_show,
30601 .store = class_attr_store,
30602 };
30603 diff --git a/drivers/base/core.c b/drivers/base/core.c
30604 index f33d768..a9358d0 100644
30605 --- a/drivers/base/core.c
30606 +++ b/drivers/base/core.c
30607 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
30608 return ret;
30609 }
30610
30611 -static struct sysfs_ops dev_sysfs_ops = {
30612 +static const struct sysfs_ops dev_sysfs_ops = {
30613 .show = dev_attr_show,
30614 .store = dev_attr_store,
30615 };
30616 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
30617 return retval;
30618 }
30619
30620 -static struct kset_uevent_ops device_uevent_ops = {
30621 +static const struct kset_uevent_ops device_uevent_ops = {
30622 .filter = dev_uevent_filter,
30623 .name = dev_uevent_name,
30624 .uevent = dev_uevent,
30625 diff --git a/drivers/base/memory.c b/drivers/base/memory.c
30626 index 989429c..2272b00 100644
30627 --- a/drivers/base/memory.c
30628 +++ b/drivers/base/memory.c
30629 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
30630 return retval;
30631 }
30632
30633 -static struct kset_uevent_ops memory_uevent_ops = {
30634 +static const struct kset_uevent_ops memory_uevent_ops = {
30635 .name = memory_uevent_name,
30636 .uevent = memory_uevent,
30637 };
30638 diff --git a/drivers/base/sys.c b/drivers/base/sys.c
30639 index 3f202f7..61c4a6f 100644
30640 --- a/drivers/base/sys.c
30641 +++ b/drivers/base/sys.c
30642 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
30643 return -EIO;
30644 }
30645
30646 -static struct sysfs_ops sysfs_ops = {
30647 +static const struct sysfs_ops sysfs_ops = {
30648 .show = sysdev_show,
30649 .store = sysdev_store,
30650 };
30651 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
30652 return -EIO;
30653 }
30654
30655 -static struct sysfs_ops sysfs_class_ops = {
30656 +static const struct sysfs_ops sysfs_class_ops = {
30657 .show = sysdev_class_show,
30658 .store = sysdev_class_store,
30659 };
30660 diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
30661 index eb4fa19..1954777 100644
30662 --- a/drivers/block/DAC960.c
30663 +++ b/drivers/block/DAC960.c
30664 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
30665 unsigned long flags;
30666 int Channel, TargetID;
30667
30668 + pax_track_stack();
30669 +
30670 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
30671 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
30672 sizeof(DAC960_SCSI_Inquiry_T) +
30673 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
30674 index 68b90d9..7e2e3f3 100644
30675 --- a/drivers/block/cciss.c
30676 +++ b/drivers/block/cciss.c
30677 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
30678 int err;
30679 u32 cp;
30680
30681 + memset(&arg64, 0, sizeof(arg64));
30682 +
30683 err = 0;
30684 err |=
30685 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
30686 @@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
30687 /* Wait (up to 20 seconds) for a command to complete */
30688
30689 for (i = 20 * HZ; i > 0; i--) {
30690 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
30691 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
30692 if (done == FIFO_EMPTY)
30693 schedule_timeout_uninterruptible(1);
30694 else
30695 @@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
30696 resend_cmd1:
30697
30698 /* Disable interrupt on the board. */
30699 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
30700 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
30701
30702 /* Make sure there is room in the command FIFO */
30703 /* Actually it should be completely empty at this time */
30704 @@ -2884,13 +2886,13 @@ resend_cmd1:
30705 /* tape side of the driver. */
30706 for (i = 200000; i > 0; i--) {
30707 /* if fifo isn't full go */
30708 - if (!(h->access.fifo_full(h)))
30709 + if (!(h->access->fifo_full(h)))
30710 break;
30711 udelay(10);
30712 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
30713 " waiting!\n", h->ctlr);
30714 }
30715 - h->access.submit_command(h, c); /* Send the cmd */
30716 + h->access->submit_command(h, c); /* Send the cmd */
30717 do {
30718 complete = pollcomplete(h->ctlr);
30719
30720 @@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
30721 while (!hlist_empty(&h->reqQ)) {
30722 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
30723 /* can't do anything if fifo is full */
30724 - if ((h->access.fifo_full(h))) {
30725 + if ((h->access->fifo_full(h))) {
30726 printk(KERN_WARNING "cciss: fifo full\n");
30727 break;
30728 }
30729 @@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
30730 h->Qdepth--;
30731
30732 /* Tell the controller execute command */
30733 - h->access.submit_command(h, c);
30734 + h->access->submit_command(h, c);
30735
30736 /* Put job onto the completed Q */
30737 addQ(&h->cmpQ, c);
30738 @@ -3393,17 +3395,17 @@ startio:
30739
30740 static inline unsigned long get_next_completion(ctlr_info_t *h)
30741 {
30742 - return h->access.command_completed(h);
30743 + return h->access->command_completed(h);
30744 }
30745
30746 static inline int interrupt_pending(ctlr_info_t *h)
30747 {
30748 - return h->access.intr_pending(h);
30749 + return h->access->intr_pending(h);
30750 }
30751
30752 static inline long interrupt_not_for_us(ctlr_info_t *h)
30753 {
30754 - return (((h->access.intr_pending(h) == 0) ||
30755 + return (((h->access->intr_pending(h) == 0) ||
30756 (h->interrupts_enabled == 0)));
30757 }
30758
30759 @@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
30760 */
30761 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
30762 c->product_name = products[prod_index].product_name;
30763 - c->access = *(products[prod_index].access);
30764 + c->access = products[prod_index].access;
30765 c->nr_cmds = c->max_commands - 4;
30766 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
30767 (readb(&c->cfgtable->Signature[1]) != 'I') ||
30768 @@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30769 }
30770
30771 /* make sure the board interrupts are off */
30772 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
30773 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
30774 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
30775 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
30776 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
30777 @@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30778 cciss_scsi_setup(i);
30779
30780 /* Turn the interrupts on so we can service requests */
30781 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
30782 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
30783
30784 /* Get the firmware version */
30785 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
30786 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
30787 index 04d6bf8..36e712d 100644
30788 --- a/drivers/block/cciss.h
30789 +++ b/drivers/block/cciss.h
30790 @@ -90,7 +90,7 @@ struct ctlr_info
30791 // information about each logical volume
30792 drive_info_struct *drv[CISS_MAX_LUN];
30793
30794 - struct access_method access;
30795 + struct access_method *access;
30796
30797 /* queue and queue Info */
30798 struct hlist_head reqQ;
30799 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
30800 index 6422651..bb1bdef 100644
30801 --- a/drivers/block/cpqarray.c
30802 +++ b/drivers/block/cpqarray.c
30803 @@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30804 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
30805 goto Enomem4;
30806 }
30807 - hba[i]->access.set_intr_mask(hba[i], 0);
30808 + hba[i]->access->set_intr_mask(hba[i], 0);
30809 if (request_irq(hba[i]->intr, do_ida_intr,
30810 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
30811 {
30812 @@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30813 add_timer(&hba[i]->timer);
30814
30815 /* Enable IRQ now that spinlock and rate limit timer are set up */
30816 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30817 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30818
30819 for(j=0; j<NWD; j++) {
30820 struct gendisk *disk = ida_gendisk[i][j];
30821 @@ -695,7 +695,7 @@ DBGINFO(
30822 for(i=0; i<NR_PRODUCTS; i++) {
30823 if (board_id == products[i].board_id) {
30824 c->product_name = products[i].product_name;
30825 - c->access = *(products[i].access);
30826 + c->access = products[i].access;
30827 break;
30828 }
30829 }
30830 @@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
30831 hba[ctlr]->intr = intr;
30832 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
30833 hba[ctlr]->product_name = products[j].product_name;
30834 - hba[ctlr]->access = *(products[j].access);
30835 + hba[ctlr]->access = products[j].access;
30836 hba[ctlr]->ctlr = ctlr;
30837 hba[ctlr]->board_id = board_id;
30838 hba[ctlr]->pci_dev = NULL; /* not PCI */
30839 @@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
30840 struct scatterlist tmp_sg[SG_MAX];
30841 int i, dir, seg;
30842
30843 + pax_track_stack();
30844 +
30845 if (blk_queue_plugged(q))
30846 goto startio;
30847
30848 @@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
30849
30850 while((c = h->reqQ) != NULL) {
30851 /* Can't do anything if we're busy */
30852 - if (h->access.fifo_full(h) == 0)
30853 + if (h->access->fifo_full(h) == 0)
30854 return;
30855
30856 /* Get the first entry from the request Q */
30857 @@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
30858 h->Qdepth--;
30859
30860 /* Tell the controller to do our bidding */
30861 - h->access.submit_command(h, c);
30862 + h->access->submit_command(h, c);
30863
30864 /* Get onto the completion Q */
30865 addQ(&h->cmpQ, c);
30866 @@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30867 unsigned long flags;
30868 __u32 a,a1;
30869
30870 - istat = h->access.intr_pending(h);
30871 + istat = h->access->intr_pending(h);
30872 /* Is this interrupt for us? */
30873 if (istat == 0)
30874 return IRQ_NONE;
30875 @@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30876 */
30877 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
30878 if (istat & FIFO_NOT_EMPTY) {
30879 - while((a = h->access.command_completed(h))) {
30880 + while((a = h->access->command_completed(h))) {
30881 a1 = a; a &= ~3;
30882 if ((c = h->cmpQ) == NULL)
30883 {
30884 @@ -1434,11 +1436,11 @@ static int sendcmd(
30885 /*
30886 * Disable interrupt
30887 */
30888 - info_p->access.set_intr_mask(info_p, 0);
30889 + info_p->access->set_intr_mask(info_p, 0);
30890 /* Make sure there is room in the command FIFO */
30891 /* Actually it should be completely empty at this time. */
30892 for (i = 200000; i > 0; i--) {
30893 - temp = info_p->access.fifo_full(info_p);
30894 + temp = info_p->access->fifo_full(info_p);
30895 if (temp != 0) {
30896 break;
30897 }
30898 @@ -1451,7 +1453,7 @@ DBG(
30899 /*
30900 * Send the cmd
30901 */
30902 - info_p->access.submit_command(info_p, c);
30903 + info_p->access->submit_command(info_p, c);
30904 complete = pollcomplete(ctlr);
30905
30906 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
30907 @@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
30908 * we check the new geometry. Then turn interrupts back on when
30909 * we're done.
30910 */
30911 - host->access.set_intr_mask(host, 0);
30912 + host->access->set_intr_mask(host, 0);
30913 getgeometry(ctlr);
30914 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
30915 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
30916
30917 for(i=0; i<NWD; i++) {
30918 struct gendisk *disk = ida_gendisk[ctlr][i];
30919 @@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
30920 /* Wait (up to 2 seconds) for a command to complete */
30921
30922 for (i = 200000; i > 0; i--) {
30923 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
30924 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
30925 if (done == 0) {
30926 udelay(10); /* a short fixed delay */
30927 } else
30928 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
30929 index be73e9d..7fbf140 100644
30930 --- a/drivers/block/cpqarray.h
30931 +++ b/drivers/block/cpqarray.h
30932 @@ -99,7 +99,7 @@ struct ctlr_info {
30933 drv_info_t drv[NWD];
30934 struct proc_dir_entry *proc;
30935
30936 - struct access_method access;
30937 + struct access_method *access;
30938
30939 cmdlist_t *reqQ;
30940 cmdlist_t *cmpQ;
30941 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
30942 index 8ec2d70..2804b30 100644
30943 --- a/drivers/block/loop.c
30944 +++ b/drivers/block/loop.c
30945 @@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
30946 mm_segment_t old_fs = get_fs();
30947
30948 set_fs(get_ds());
30949 - bw = file->f_op->write(file, buf, len, &pos);
30950 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
30951 set_fs(old_fs);
30952 if (likely(bw == len))
30953 return 0;
30954 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
30955 index 26ada47..083c480 100644
30956 --- a/drivers/block/nbd.c
30957 +++ b/drivers/block/nbd.c
30958 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
30959 struct kvec iov;
30960 sigset_t blocked, oldset;
30961
30962 + pax_track_stack();
30963 +
30964 if (unlikely(!sock)) {
30965 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
30966 lo->disk->disk_name, (send ? "send" : "recv"));
30967 @@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
30968 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
30969 unsigned int cmd, unsigned long arg)
30970 {
30971 + pax_track_stack();
30972 +
30973 switch (cmd) {
30974 case NBD_DISCONNECT: {
30975 struct request sreq;
30976 diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
30977 index a5d585d..d087be3 100644
30978 --- a/drivers/block/pktcdvd.c
30979 +++ b/drivers/block/pktcdvd.c
30980 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
30981 return len;
30982 }
30983
30984 -static struct sysfs_ops kobj_pkt_ops = {
30985 +static const struct sysfs_ops kobj_pkt_ops = {
30986 .show = kobj_pkt_show,
30987 .store = kobj_pkt_store
30988 };
30989 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
30990 index 6aad99e..89cd142 100644
30991 --- a/drivers/char/Kconfig
30992 +++ b/drivers/char/Kconfig
30993 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
30994
30995 config DEVKMEM
30996 bool "/dev/kmem virtual device support"
30997 - default y
30998 + default n
30999 + depends on !GRKERNSEC_KMEM
31000 help
31001 Say Y here if you want to support the /dev/kmem device. The
31002 /dev/kmem device is rarely used, but can be used for certain
31003 @@ -1114,6 +1115,7 @@ config DEVPORT
31004 bool
31005 depends on !M68K
31006 depends on ISA || PCI
31007 + depends on !GRKERNSEC_KMEM
31008 default y
31009
31010 source "drivers/s390/char/Kconfig"
31011 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
31012 index a96f319..a778a5b 100644
31013 --- a/drivers/char/agp/frontend.c
31014 +++ b/drivers/char/agp/frontend.c
31015 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
31016 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
31017 return -EFAULT;
31018
31019 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
31020 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
31021 return -EFAULT;
31022
31023 client = agp_find_client_by_pid(reserve.pid);
31024 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
31025 index d8cff90..9628e70 100644
31026 --- a/drivers/char/briq_panel.c
31027 +++ b/drivers/char/briq_panel.c
31028 @@ -10,6 +10,7 @@
31029 #include <linux/types.h>
31030 #include <linux/errno.h>
31031 #include <linux/tty.h>
31032 +#include <linux/mutex.h>
31033 #include <linux/timer.h>
31034 #include <linux/kernel.h>
31035 #include <linux/wait.h>
31036 @@ -36,6 +37,7 @@ static int vfd_is_open;
31037 static unsigned char vfd[40];
31038 static int vfd_cursor;
31039 static unsigned char ledpb, led;
31040 +static DEFINE_MUTEX(vfd_mutex);
31041
31042 static void update_vfd(void)
31043 {
31044 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
31045 if (!vfd_is_open)
31046 return -EBUSY;
31047
31048 + mutex_lock(&vfd_mutex);
31049 for (;;) {
31050 char c;
31051 if (!indx)
31052 break;
31053 - if (get_user(c, buf))
31054 + if (get_user(c, buf)) {
31055 + mutex_unlock(&vfd_mutex);
31056 return -EFAULT;
31057 + }
31058 if (esc) {
31059 set_led(c);
31060 esc = 0;
31061 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
31062 buf++;
31063 }
31064 update_vfd();
31065 + mutex_unlock(&vfd_mutex);
31066
31067 return len;
31068 }
31069 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
31070 index 31e7c91..161afc0 100644
31071 --- a/drivers/char/genrtc.c
31072 +++ b/drivers/char/genrtc.c
31073 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
31074 switch (cmd) {
31075
31076 case RTC_PLL_GET:
31077 + memset(&pll, 0, sizeof(pll));
31078 if (get_rtc_pll(&pll))
31079 return -EINVAL;
31080 else
31081 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
31082 index 006466d..a2bb21c 100644
31083 --- a/drivers/char/hpet.c
31084 +++ b/drivers/char/hpet.c
31085 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
31086 return 0;
31087 }
31088
31089 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
31090 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
31091
31092 static int
31093 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
31094 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
31095 }
31096
31097 static int
31098 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
31099 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
31100 {
31101 struct hpet_timer __iomem *timer;
31102 struct hpet __iomem *hpet;
31103 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
31104 {
31105 struct hpet_info info;
31106
31107 + memset(&info, 0, sizeof(info));
31108 +
31109 if (devp->hd_ireqfreq)
31110 info.hi_ireqfreq =
31111 hpet_time_div(hpetp, devp->hd_ireqfreq);
31112 - else
31113 - info.hi_ireqfreq = 0;
31114 info.hi_flags =
31115 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
31116 info.hi_hpet = hpetp->hp_which;
31117 diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
31118 index 0afc8b8..6913fc3 100644
31119 --- a/drivers/char/hvc_beat.c
31120 +++ b/drivers/char/hvc_beat.c
31121 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
31122 return cnt;
31123 }
31124
31125 -static struct hv_ops hvc_beat_get_put_ops = {
31126 +static const struct hv_ops hvc_beat_get_put_ops = {
31127 .get_chars = hvc_beat_get_chars,
31128 .put_chars = hvc_beat_put_chars,
31129 };
31130 diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
31131 index 98097f2..407dddc 100644
31132 --- a/drivers/char/hvc_console.c
31133 +++ b/drivers/char/hvc_console.c
31134 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
31135 * console interfaces but can still be used as a tty device. This has to be
31136 * static because kmalloc will not work during early console init.
31137 */
31138 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
31139 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
31140 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
31141 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
31142
31143 @@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
31144 * vty adapters do NOT get an hvc_instantiate() callback since they
31145 * appear after early console init.
31146 */
31147 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
31148 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
31149 {
31150 struct hvc_struct *hp;
31151
31152 @@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
31153 };
31154
31155 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
31156 - struct hv_ops *ops, int outbuf_size)
31157 + const struct hv_ops *ops, int outbuf_size)
31158 {
31159 struct hvc_struct *hp;
31160 int i;
31161 diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
31162 index 10950ca..ed176c3 100644
31163 --- a/drivers/char/hvc_console.h
31164 +++ b/drivers/char/hvc_console.h
31165 @@ -55,7 +55,7 @@ struct hvc_struct {
31166 int outbuf_size;
31167 int n_outbuf;
31168 uint32_t vtermno;
31169 - struct hv_ops *ops;
31170 + const struct hv_ops *ops;
31171 int irq_requested;
31172 int data;
31173 struct winsize ws;
31174 @@ -76,11 +76,11 @@ struct hv_ops {
31175 };
31176
31177 /* Register a vterm and a slot index for use as a console (console_init) */
31178 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
31179 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
31180
31181 /* register a vterm for hvc tty operation (module_init or hotplug add) */
31182 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
31183 - struct hv_ops *ops, int outbuf_size);
31184 + const struct hv_ops *ops, int outbuf_size);
31185 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
31186 extern int hvc_remove(struct hvc_struct *hp);
31187
31188 diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
31189 index 936d05b..fd02426 100644
31190 --- a/drivers/char/hvc_iseries.c
31191 +++ b/drivers/char/hvc_iseries.c
31192 @@ -197,7 +197,7 @@ done:
31193 return sent;
31194 }
31195
31196 -static struct hv_ops hvc_get_put_ops = {
31197 +static const struct hv_ops hvc_get_put_ops = {
31198 .get_chars = get_chars,
31199 .put_chars = put_chars,
31200 .notifier_add = notifier_add_irq,
31201 diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
31202 index b0e168f..69cda2a 100644
31203 --- a/drivers/char/hvc_iucv.c
31204 +++ b/drivers/char/hvc_iucv.c
31205 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
31206
31207
31208 /* HVC operations */
31209 -static struct hv_ops hvc_iucv_ops = {
31210 +static const struct hv_ops hvc_iucv_ops = {
31211 .get_chars = hvc_iucv_get_chars,
31212 .put_chars = hvc_iucv_put_chars,
31213 .notifier_add = hvc_iucv_notifier_add,
31214 diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
31215 index 88590d0..61c4a61 100644
31216 --- a/drivers/char/hvc_rtas.c
31217 +++ b/drivers/char/hvc_rtas.c
31218 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
31219 return i;
31220 }
31221
31222 -static struct hv_ops hvc_rtas_get_put_ops = {
31223 +static const struct hv_ops hvc_rtas_get_put_ops = {
31224 .get_chars = hvc_rtas_read_console,
31225 .put_chars = hvc_rtas_write_console,
31226 };
31227 diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
31228 index bd63ba8..b0957e6 100644
31229 --- a/drivers/char/hvc_udbg.c
31230 +++ b/drivers/char/hvc_udbg.c
31231 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
31232 return i;
31233 }
31234
31235 -static struct hv_ops hvc_udbg_ops = {
31236 +static const struct hv_ops hvc_udbg_ops = {
31237 .get_chars = hvc_udbg_get,
31238 .put_chars = hvc_udbg_put,
31239 };
31240 diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
31241 index 10be343..27370e9 100644
31242 --- a/drivers/char/hvc_vio.c
31243 +++ b/drivers/char/hvc_vio.c
31244 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
31245 return got;
31246 }
31247
31248 -static struct hv_ops hvc_get_put_ops = {
31249 +static const struct hv_ops hvc_get_put_ops = {
31250 .get_chars = filtered_get_chars,
31251 .put_chars = hvc_put_chars,
31252 .notifier_add = notifier_add_irq,
31253 diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
31254 index a6ee32b..94f8c26 100644
31255 --- a/drivers/char/hvc_xen.c
31256 +++ b/drivers/char/hvc_xen.c
31257 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
31258 return recv;
31259 }
31260
31261 -static struct hv_ops hvc_ops = {
31262 +static const struct hv_ops hvc_ops = {
31263 .get_chars = read_console,
31264 .put_chars = write_console,
31265 .notifier_add = notifier_add_irq,
31266 diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
31267 index 266b858..f3ee0bb 100644
31268 --- a/drivers/char/hvcs.c
31269 +++ b/drivers/char/hvcs.c
31270 @@ -82,6 +82,7 @@
31271 #include <asm/hvcserver.h>
31272 #include <asm/uaccess.h>
31273 #include <asm/vio.h>
31274 +#include <asm/local.h>
31275
31276 /*
31277 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
31278 @@ -269,7 +270,7 @@ struct hvcs_struct {
31279 unsigned int index;
31280
31281 struct tty_struct *tty;
31282 - int open_count;
31283 + local_t open_count;
31284
31285 /*
31286 * Used to tell the driver kernel_thread what operations need to take
31287 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
31288
31289 spin_lock_irqsave(&hvcsd->lock, flags);
31290
31291 - if (hvcsd->open_count > 0) {
31292 + if (local_read(&hvcsd->open_count) > 0) {
31293 spin_unlock_irqrestore(&hvcsd->lock, flags);
31294 printk(KERN_INFO "HVCS: vterm state unchanged. "
31295 "The hvcs device node is still in use.\n");
31296 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
31297 if ((retval = hvcs_partner_connect(hvcsd)))
31298 goto error_release;
31299
31300 - hvcsd->open_count = 1;
31301 + local_set(&hvcsd->open_count, 1);
31302 hvcsd->tty = tty;
31303 tty->driver_data = hvcsd;
31304
31305 @@ -1169,7 +1170,7 @@ fast_open:
31306
31307 spin_lock_irqsave(&hvcsd->lock, flags);
31308 kref_get(&hvcsd->kref);
31309 - hvcsd->open_count++;
31310 + local_inc(&hvcsd->open_count);
31311 hvcsd->todo_mask |= HVCS_SCHED_READ;
31312 spin_unlock_irqrestore(&hvcsd->lock, flags);
31313
31314 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31315 hvcsd = tty->driver_data;
31316
31317 spin_lock_irqsave(&hvcsd->lock, flags);
31318 - if (--hvcsd->open_count == 0) {
31319 + if (local_dec_and_test(&hvcsd->open_count)) {
31320
31321 vio_disable_interrupts(hvcsd->vdev);
31322
31323 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31324 free_irq(irq, hvcsd);
31325 kref_put(&hvcsd->kref, destroy_hvcs_struct);
31326 return;
31327 - } else if (hvcsd->open_count < 0) {
31328 + } else if (local_read(&hvcsd->open_count) < 0) {
31329 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
31330 " is missmanaged.\n",
31331 - hvcsd->vdev->unit_address, hvcsd->open_count);
31332 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
31333 }
31334
31335 spin_unlock_irqrestore(&hvcsd->lock, flags);
31336 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31337
31338 spin_lock_irqsave(&hvcsd->lock, flags);
31339 /* Preserve this so that we know how many kref refs to put */
31340 - temp_open_count = hvcsd->open_count;
31341 + temp_open_count = local_read(&hvcsd->open_count);
31342
31343 /*
31344 * Don't kref put inside the spinlock because the destruction
31345 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31346 hvcsd->tty->driver_data = NULL;
31347 hvcsd->tty = NULL;
31348
31349 - hvcsd->open_count = 0;
31350 + local_set(&hvcsd->open_count, 0);
31351
31352 /* This will drop any buffered data on the floor which is OK in a hangup
31353 * scenario. */
31354 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
31355 * the middle of a write operation? This is a crummy place to do this
31356 * but we want to keep it all in the spinlock.
31357 */
31358 - if (hvcsd->open_count <= 0) {
31359 + if (local_read(&hvcsd->open_count) <= 0) {
31360 spin_unlock_irqrestore(&hvcsd->lock, flags);
31361 return -ENODEV;
31362 }
31363 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
31364 {
31365 struct hvcs_struct *hvcsd = tty->driver_data;
31366
31367 - if (!hvcsd || hvcsd->open_count <= 0)
31368 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
31369 return 0;
31370
31371 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
31372 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
31373 index ec5e3f8..02455ba 100644
31374 --- a/drivers/char/ipmi/ipmi_msghandler.c
31375 +++ b/drivers/char/ipmi/ipmi_msghandler.c
31376 @@ -414,7 +414,7 @@ struct ipmi_smi {
31377 struct proc_dir_entry *proc_dir;
31378 char proc_dir_name[10];
31379
31380 - atomic_t stats[IPMI_NUM_STATS];
31381 + atomic_unchecked_t stats[IPMI_NUM_STATS];
31382
31383 /*
31384 * run_to_completion duplicate of smb_info, smi_info
31385 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
31386
31387
31388 #define ipmi_inc_stat(intf, stat) \
31389 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
31390 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
31391 #define ipmi_get_stat(intf, stat) \
31392 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
31393 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
31394
31395 static int is_lan_addr(struct ipmi_addr *addr)
31396 {
31397 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
31398 INIT_LIST_HEAD(&intf->cmd_rcvrs);
31399 init_waitqueue_head(&intf->waitq);
31400 for (i = 0; i < IPMI_NUM_STATS; i++)
31401 - atomic_set(&intf->stats[i], 0);
31402 + atomic_set_unchecked(&intf->stats[i], 0);
31403
31404 intf->proc_dir = NULL;
31405
31406 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
31407 struct ipmi_smi_msg smi_msg;
31408 struct ipmi_recv_msg recv_msg;
31409
31410 + pax_track_stack();
31411 +
31412 si = (struct ipmi_system_interface_addr *) &addr;
31413 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
31414 si->channel = IPMI_BMC_CHANNEL;
31415 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
31416 index abae8c9..8021979 100644
31417 --- a/drivers/char/ipmi/ipmi_si_intf.c
31418 +++ b/drivers/char/ipmi/ipmi_si_intf.c
31419 @@ -277,7 +277,7 @@ struct smi_info {
31420 unsigned char slave_addr;
31421
31422 /* Counters and things for the proc filesystem. */
31423 - atomic_t stats[SI_NUM_STATS];
31424 + atomic_unchecked_t stats[SI_NUM_STATS];
31425
31426 struct task_struct *thread;
31427
31428 @@ -285,9 +285,9 @@ struct smi_info {
31429 };
31430
31431 #define smi_inc_stat(smi, stat) \
31432 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
31433 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
31434 #define smi_get_stat(smi, stat) \
31435 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
31436 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
31437
31438 #define SI_MAX_PARMS 4
31439
31440 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
31441 atomic_set(&new_smi->req_events, 0);
31442 new_smi->run_to_completion = 0;
31443 for (i = 0; i < SI_NUM_STATS; i++)
31444 - atomic_set(&new_smi->stats[i], 0);
31445 + atomic_set_unchecked(&new_smi->stats[i], 0);
31446
31447 new_smi->interrupt_disabled = 0;
31448 atomic_set(&new_smi->stop_operation, 0);
31449 diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
31450 index 402838f..55e2200 100644
31451 --- a/drivers/char/istallion.c
31452 +++ b/drivers/char/istallion.c
31453 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
31454 * re-used for each stats call.
31455 */
31456 static comstats_t stli_comstats;
31457 -static combrd_t stli_brdstats;
31458 static struct asystats stli_cdkstats;
31459
31460 /*****************************************************************************/
31461 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
31462 {
31463 struct stlibrd *brdp;
31464 unsigned int i;
31465 + combrd_t stli_brdstats;
31466
31467 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
31468 return -EFAULT;
31469 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
31470 struct stliport stli_dummyport;
31471 struct stliport *portp;
31472
31473 + pax_track_stack();
31474 +
31475 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
31476 return -EFAULT;
31477 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
31478 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
31479 struct stlibrd stli_dummybrd;
31480 struct stlibrd *brdp;
31481
31482 + pax_track_stack();
31483 +
31484 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
31485 return -EFAULT;
31486 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
31487 diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
31488 index 950837c..e55a288 100644
31489 --- a/drivers/char/keyboard.c
31490 +++ b/drivers/char/keyboard.c
31491 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
31492 kbd->kbdmode == VC_MEDIUMRAW) &&
31493 value != KVAL(K_SAK))
31494 return; /* SAK is allowed even in raw mode */
31495 +
31496 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
31497 + {
31498 + void *func = fn_handler[value];
31499 + if (func == fn_show_state || func == fn_show_ptregs ||
31500 + func == fn_show_mem)
31501 + return;
31502 + }
31503 +#endif
31504 +
31505 fn_handler[value](vc);
31506 }
31507
31508 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
31509 .evbit = { BIT_MASK(EV_SND) },
31510 },
31511
31512 - { }, /* Terminating entry */
31513 + { 0 }, /* Terminating entry */
31514 };
31515
31516 MODULE_DEVICE_TABLE(input, kbd_ids);
31517 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
31518 index 87c67b4..230527a 100644
31519 --- a/drivers/char/mbcs.c
31520 +++ b/drivers/char/mbcs.c
31521 @@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
31522 return 0;
31523 }
31524
31525 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
31526 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
31527 {
31528 .part_num = MBCS_PART_NUM,
31529 .mfg_num = MBCS_MFG_NUM,
31530 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
31531 index 1270f64..8495f49 100644
31532 --- a/drivers/char/mem.c
31533 +++ b/drivers/char/mem.c
31534 @@ -18,6 +18,7 @@
31535 #include <linux/raw.h>
31536 #include <linux/tty.h>
31537 #include <linux/capability.h>
31538 +#include <linux/security.h>
31539 #include <linux/ptrace.h>
31540 #include <linux/device.h>
31541 #include <linux/highmem.h>
31542 @@ -35,6 +36,10 @@
31543 # include <linux/efi.h>
31544 #endif
31545
31546 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31547 +extern struct file_operations grsec_fops;
31548 +#endif
31549 +
31550 static inline unsigned long size_inside_page(unsigned long start,
31551 unsigned long size)
31552 {
31553 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31554
31555 while (cursor < to) {
31556 if (!devmem_is_allowed(pfn)) {
31557 +#ifdef CONFIG_GRKERNSEC_KMEM
31558 + gr_handle_mem_readwrite(from, to);
31559 +#else
31560 printk(KERN_INFO
31561 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
31562 current->comm, from, to);
31563 +#endif
31564 return 0;
31565 }
31566 cursor += PAGE_SIZE;
31567 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31568 }
31569 return 1;
31570 }
31571 +#elif defined(CONFIG_GRKERNSEC_KMEM)
31572 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31573 +{
31574 + return 0;
31575 +}
31576 #else
31577 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31578 {
31579 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31580 #endif
31581
31582 while (count > 0) {
31583 + char *temp;
31584 +
31585 /*
31586 * Handle first page in case it's not aligned
31587 */
31588 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31589 if (!ptr)
31590 return -EFAULT;
31591
31592 - if (copy_to_user(buf, ptr, sz)) {
31593 +#ifdef CONFIG_PAX_USERCOPY
31594 + temp = kmalloc(sz, GFP_KERNEL);
31595 + if (!temp) {
31596 + unxlate_dev_mem_ptr(p, ptr);
31597 + return -ENOMEM;
31598 + }
31599 + memcpy(temp, ptr, sz);
31600 +#else
31601 + temp = ptr;
31602 +#endif
31603 +
31604 + if (copy_to_user(buf, temp, sz)) {
31605 +
31606 +#ifdef CONFIG_PAX_USERCOPY
31607 + kfree(temp);
31608 +#endif
31609 +
31610 unxlate_dev_mem_ptr(p, ptr);
31611 return -EFAULT;
31612 }
31613
31614 +#ifdef CONFIG_PAX_USERCOPY
31615 + kfree(temp);
31616 +#endif
31617 +
31618 unxlate_dev_mem_ptr(p, ptr);
31619
31620 buf += sz;
31621 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31622 size_t count, loff_t *ppos)
31623 {
31624 unsigned long p = *ppos;
31625 - ssize_t low_count, read, sz;
31626 + ssize_t low_count, read, sz, err = 0;
31627 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
31628 - int err = 0;
31629
31630 read = 0;
31631 if (p < (unsigned long) high_memory) {
31632 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31633 }
31634 #endif
31635 while (low_count > 0) {
31636 + char *temp;
31637 +
31638 sz = size_inside_page(p, low_count);
31639
31640 /*
31641 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31642 */
31643 kbuf = xlate_dev_kmem_ptr((char *)p);
31644
31645 - if (copy_to_user(buf, kbuf, sz))
31646 +#ifdef CONFIG_PAX_USERCOPY
31647 + temp = kmalloc(sz, GFP_KERNEL);
31648 + if (!temp)
31649 + return -ENOMEM;
31650 + memcpy(temp, kbuf, sz);
31651 +#else
31652 + temp = kbuf;
31653 +#endif
31654 +
31655 + err = copy_to_user(buf, temp, sz);
31656 +
31657 +#ifdef CONFIG_PAX_USERCOPY
31658 + kfree(temp);
31659 +#endif
31660 +
31661 + if (err)
31662 return -EFAULT;
31663 buf += sz;
31664 p += sz;
31665 @@ -889,6 +941,9 @@ static const struct memdev {
31666 #ifdef CONFIG_CRASH_DUMP
31667 [12] = { "oldmem", 0, &oldmem_fops, NULL },
31668 #endif
31669 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31670 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
31671 +#endif
31672 };
31673
31674 static int memory_open(struct inode *inode, struct file *filp)
31675 diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
31676 index 918711a..4ffaf5e 100644
31677 --- a/drivers/char/mmtimer.c
31678 +++ b/drivers/char/mmtimer.c
31679 @@ -756,7 +756,7 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
31680 return err;
31681 }
31682
31683 -static struct k_clock sgi_clock = {
31684 +static k_clock_no_const sgi_clock = {
31685 .res = 0,
31686 .clock_set = sgi_clock_set,
31687 .clock_get = sgi_clock_get,
31688 diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
31689 index 674b3ab..a8d1970 100644
31690 --- a/drivers/char/pcmcia/ipwireless/tty.c
31691 +++ b/drivers/char/pcmcia/ipwireless/tty.c
31692 @@ -29,6 +29,7 @@
31693 #include <linux/tty_driver.h>
31694 #include <linux/tty_flip.h>
31695 #include <linux/uaccess.h>
31696 +#include <asm/local.h>
31697
31698 #include "tty.h"
31699 #include "network.h"
31700 @@ -51,7 +52,7 @@ struct ipw_tty {
31701 int tty_type;
31702 struct ipw_network *network;
31703 struct tty_struct *linux_tty;
31704 - int open_count;
31705 + local_t open_count;
31706 unsigned int control_lines;
31707 struct mutex ipw_tty_mutex;
31708 int tx_bytes_queued;
31709 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31710 mutex_unlock(&tty->ipw_tty_mutex);
31711 return -ENODEV;
31712 }
31713 - if (tty->open_count == 0)
31714 + if (local_read(&tty->open_count) == 0)
31715 tty->tx_bytes_queued = 0;
31716
31717 - tty->open_count++;
31718 + local_inc(&tty->open_count);
31719
31720 tty->linux_tty = linux_tty;
31721 linux_tty->driver_data = tty;
31722 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31723
31724 static void do_ipw_close(struct ipw_tty *tty)
31725 {
31726 - tty->open_count--;
31727 -
31728 - if (tty->open_count == 0) {
31729 + if (local_dec_return(&tty->open_count) == 0) {
31730 struct tty_struct *linux_tty = tty->linux_tty;
31731
31732 if (linux_tty != NULL) {
31733 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
31734 return;
31735
31736 mutex_lock(&tty->ipw_tty_mutex);
31737 - if (tty->open_count == 0) {
31738 + if (local_read(&tty->open_count) == 0) {
31739 mutex_unlock(&tty->ipw_tty_mutex);
31740 return;
31741 }
31742 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
31743 return;
31744 }
31745
31746 - if (!tty->open_count) {
31747 + if (!local_read(&tty->open_count)) {
31748 mutex_unlock(&tty->ipw_tty_mutex);
31749 return;
31750 }
31751 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
31752 return -ENODEV;
31753
31754 mutex_lock(&tty->ipw_tty_mutex);
31755 - if (!tty->open_count) {
31756 + if (!local_read(&tty->open_count)) {
31757 mutex_unlock(&tty->ipw_tty_mutex);
31758 return -EINVAL;
31759 }
31760 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
31761 if (!tty)
31762 return -ENODEV;
31763
31764 - if (!tty->open_count)
31765 + if (!local_read(&tty->open_count))
31766 return -EINVAL;
31767
31768 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
31769 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
31770 if (!tty)
31771 return 0;
31772
31773 - if (!tty->open_count)
31774 + if (!local_read(&tty->open_count))
31775 return 0;
31776
31777 return tty->tx_bytes_queued;
31778 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
31779 if (!tty)
31780 return -ENODEV;
31781
31782 - if (!tty->open_count)
31783 + if (!local_read(&tty->open_count))
31784 return -EINVAL;
31785
31786 return get_control_lines(tty);
31787 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
31788 if (!tty)
31789 return -ENODEV;
31790
31791 - if (!tty->open_count)
31792 + if (!local_read(&tty->open_count))
31793 return -EINVAL;
31794
31795 return set_control_lines(tty, set, clear);
31796 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
31797 if (!tty)
31798 return -ENODEV;
31799
31800 - if (!tty->open_count)
31801 + if (!local_read(&tty->open_count))
31802 return -EINVAL;
31803
31804 /* FIXME: Exactly how is the tty object locked here .. */
31805 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
31806 against a parallel ioctl etc */
31807 mutex_lock(&ttyj->ipw_tty_mutex);
31808 }
31809 - while (ttyj->open_count)
31810 + while (local_read(&ttyj->open_count))
31811 do_ipw_close(ttyj);
31812 ipwireless_disassociate_network_ttys(network,
31813 ttyj->channel_idx);
31814 diff --git a/drivers/char/pty.c b/drivers/char/pty.c
31815 index 62f282e..e45c45c 100644
31816 --- a/drivers/char/pty.c
31817 +++ b/drivers/char/pty.c
31818 @@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
31819 register_sysctl_table(pty_root_table);
31820
31821 /* Now create the /dev/ptmx special device */
31822 + pax_open_kernel();
31823 tty_default_fops(&ptmx_fops);
31824 - ptmx_fops.open = ptmx_open;
31825 + *(void **)&ptmx_fops.open = ptmx_open;
31826 + pax_close_kernel();
31827
31828 cdev_init(&ptmx_cdev, &ptmx_fops);
31829 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
31830 diff --git a/drivers/char/random.c b/drivers/char/random.c
31831 index 3a19e2d..6ed09d3 100644
31832 --- a/drivers/char/random.c
31833 +++ b/drivers/char/random.c
31834 @@ -254,8 +254,13 @@
31835 /*
31836 * Configuration information
31837 */
31838 +#ifdef CONFIG_GRKERNSEC_RANDNET
31839 +#define INPUT_POOL_WORDS 512
31840 +#define OUTPUT_POOL_WORDS 128
31841 +#else
31842 #define INPUT_POOL_WORDS 128
31843 #define OUTPUT_POOL_WORDS 32
31844 +#endif
31845 #define SEC_XFER_SIZE 512
31846
31847 /*
31848 @@ -292,10 +297,17 @@ static struct poolinfo {
31849 int poolwords;
31850 int tap1, tap2, tap3, tap4, tap5;
31851 } poolinfo_table[] = {
31852 +#ifdef CONFIG_GRKERNSEC_RANDNET
31853 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
31854 + { 512, 411, 308, 208, 104, 1 },
31855 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
31856 + { 128, 103, 76, 51, 25, 1 },
31857 +#else
31858 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
31859 { 128, 103, 76, 51, 25, 1 },
31860 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
31861 { 32, 26, 20, 14, 7, 1 },
31862 +#endif
31863 #if 0
31864 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
31865 { 2048, 1638, 1231, 819, 411, 1 },
31866 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
31867 #include <linux/sysctl.h>
31868
31869 static int min_read_thresh = 8, min_write_thresh;
31870 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
31871 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
31872 static int max_write_thresh = INPUT_POOL_WORDS * 32;
31873 static char sysctl_bootid[16];
31874
31875 diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
31876 index 0e29a23..0efc2c2 100644
31877 --- a/drivers/char/rocket.c
31878 +++ b/drivers/char/rocket.c
31879 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
31880 struct rocket_ports tmp;
31881 int board;
31882
31883 + pax_track_stack();
31884 +
31885 if (!retports)
31886 return -EFAULT;
31887 memset(&tmp, 0, sizeof (tmp));
31888 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
31889 index 8c262aa..4d3b058 100644
31890 --- a/drivers/char/sonypi.c
31891 +++ b/drivers/char/sonypi.c
31892 @@ -55,6 +55,7 @@
31893 #include <asm/uaccess.h>
31894 #include <asm/io.h>
31895 #include <asm/system.h>
31896 +#include <asm/local.h>
31897
31898 #include <linux/sonypi.h>
31899
31900 @@ -491,7 +492,7 @@ static struct sonypi_device {
31901 spinlock_t fifo_lock;
31902 wait_queue_head_t fifo_proc_list;
31903 struct fasync_struct *fifo_async;
31904 - int open_count;
31905 + local_t open_count;
31906 int model;
31907 struct input_dev *input_jog_dev;
31908 struct input_dev *input_key_dev;
31909 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
31910 static int sonypi_misc_release(struct inode *inode, struct file *file)
31911 {
31912 mutex_lock(&sonypi_device.lock);
31913 - sonypi_device.open_count--;
31914 + local_dec(&sonypi_device.open_count);
31915 mutex_unlock(&sonypi_device.lock);
31916 return 0;
31917 }
31918 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
31919 lock_kernel();
31920 mutex_lock(&sonypi_device.lock);
31921 /* Flush input queue on first open */
31922 - if (!sonypi_device.open_count)
31923 + if (!local_read(&sonypi_device.open_count))
31924 kfifo_reset(sonypi_device.fifo);
31925 - sonypi_device.open_count++;
31926 + local_inc(&sonypi_device.open_count);
31927 mutex_unlock(&sonypi_device.lock);
31928 unlock_kernel();
31929 return 0;
31930 diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
31931 index db6dcfa..13834cb 100644
31932 --- a/drivers/char/stallion.c
31933 +++ b/drivers/char/stallion.c
31934 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
31935 struct stlport stl_dummyport;
31936 struct stlport *portp;
31937
31938 + pax_track_stack();
31939 +
31940 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
31941 return -EFAULT;
31942 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
31943 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
31944 index a0789f6..cea3902 100644
31945 --- a/drivers/char/tpm/tpm.c
31946 +++ b/drivers/char/tpm/tpm.c
31947 @@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
31948 chip->vendor.req_complete_val)
31949 goto out_recv;
31950
31951 - if ((status == chip->vendor.req_canceled)) {
31952 + if (status == chip->vendor.req_canceled) {
31953 dev_err(chip->dev, "Operation Canceled\n");
31954 rc = -ECANCELED;
31955 goto out;
31956 @@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
31957
31958 struct tpm_chip *chip = dev_get_drvdata(dev);
31959
31960 + pax_track_stack();
31961 +
31962 tpm_cmd.header.in = tpm_readpubek_header;
31963 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
31964 "attempting to read the PUBEK");
31965 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
31966 index bf2170f..ce8cab9 100644
31967 --- a/drivers/char/tpm/tpm_bios.c
31968 +++ b/drivers/char/tpm/tpm_bios.c
31969 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
31970 event = addr;
31971
31972 if ((event->event_type == 0 && event->event_size == 0) ||
31973 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
31974 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
31975 return NULL;
31976
31977 return addr;
31978 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
31979 return NULL;
31980
31981 if ((event->event_type == 0 && event->event_size == 0) ||
31982 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
31983 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
31984 return NULL;
31985
31986 (*pos)++;
31987 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
31988 int i;
31989
31990 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
31991 - seq_putc(m, data[i]);
31992 + if (!seq_putc(m, data[i]))
31993 + return -EFAULT;
31994
31995 return 0;
31996 }
31997 @@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
31998 log->bios_event_log_end = log->bios_event_log + len;
31999
32000 virt = acpi_os_map_memory(start, len);
32001 + if (!virt) {
32002 + kfree(log->bios_event_log);
32003 + log->bios_event_log = NULL;
32004 + return -EFAULT;
32005 + }
32006
32007 - memcpy(log->bios_event_log, virt, len);
32008 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
32009
32010 acpi_os_unmap_memory(virt, len);
32011 return 0;
32012 diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
32013 index 123cedf..6664cb4 100644
32014 --- a/drivers/char/tty_io.c
32015 +++ b/drivers/char/tty_io.c
32016 @@ -146,7 +146,7 @@ static int tty_open(struct inode *, struct file *);
32017 static int tty_release(struct inode *, struct file *);
32018 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
32019 #ifdef CONFIG_COMPAT
32020 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32021 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
32022 unsigned long arg);
32023 #else
32024 #define tty_compat_ioctl NULL
32025 @@ -1774,6 +1774,7 @@ got_driver:
32026
32027 if (IS_ERR(tty)) {
32028 mutex_unlock(&tty_mutex);
32029 + tty_driver_kref_put(driver);
32030 return PTR_ERR(tty);
32031 }
32032 }
32033 @@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
32034 return retval;
32035 }
32036
32037 +EXPORT_SYMBOL(tty_ioctl);
32038 +
32039 #ifdef CONFIG_COMPAT
32040 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32041 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
32042 unsigned long arg)
32043 {
32044 struct inode *inode = file->f_dentry->d_inode;
32045 @@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32046
32047 return retval;
32048 }
32049 +
32050 +EXPORT_SYMBOL(tty_compat_ioctl);
32051 #endif
32052
32053 /*
32054 @@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
32055
32056 void tty_default_fops(struct file_operations *fops)
32057 {
32058 - *fops = tty_fops;
32059 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
32060 }
32061
32062 /*
32063 diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
32064 index d814a3d..b55b9c9 100644
32065 --- a/drivers/char/tty_ldisc.c
32066 +++ b/drivers/char/tty_ldisc.c
32067 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
32068 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
32069 struct tty_ldisc_ops *ldo = ld->ops;
32070
32071 - ldo->refcount--;
32072 + atomic_dec(&ldo->refcount);
32073 module_put(ldo->owner);
32074 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32075
32076 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
32077 spin_lock_irqsave(&tty_ldisc_lock, flags);
32078 tty_ldiscs[disc] = new_ldisc;
32079 new_ldisc->num = disc;
32080 - new_ldisc->refcount = 0;
32081 + atomic_set(&new_ldisc->refcount, 0);
32082 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32083
32084 return ret;
32085 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
32086 return -EINVAL;
32087
32088 spin_lock_irqsave(&tty_ldisc_lock, flags);
32089 - if (tty_ldiscs[disc]->refcount)
32090 + if (atomic_read(&tty_ldiscs[disc]->refcount))
32091 ret = -EBUSY;
32092 else
32093 tty_ldiscs[disc] = NULL;
32094 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
32095 if (ldops) {
32096 ret = ERR_PTR(-EAGAIN);
32097 if (try_module_get(ldops->owner)) {
32098 - ldops->refcount++;
32099 + atomic_inc(&ldops->refcount);
32100 ret = ldops;
32101 }
32102 }
32103 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
32104 unsigned long flags;
32105
32106 spin_lock_irqsave(&tty_ldisc_lock, flags);
32107 - ldops->refcount--;
32108 + atomic_dec(&ldops->refcount);
32109 module_put(ldops->owner);
32110 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32111 }
32112 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
32113 index a035ae3..c27fe2c 100644
32114 --- a/drivers/char/virtio_console.c
32115 +++ b/drivers/char/virtio_console.c
32116 @@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
32117 * virtqueue, so we let the drivers do some boutique early-output thing. */
32118 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
32119 {
32120 - virtio_cons.put_chars = put_chars;
32121 + pax_open_kernel();
32122 + *(void **)&virtio_cons.put_chars = put_chars;
32123 + pax_close_kernel();
32124 return hvc_instantiate(0, 0, &virtio_cons);
32125 }
32126
32127 @@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
32128 out_vq = vqs[1];
32129
32130 /* Start using the new console output. */
32131 - virtio_cons.get_chars = get_chars;
32132 - virtio_cons.put_chars = put_chars;
32133 - virtio_cons.notifier_add = notifier_add_vio;
32134 - virtio_cons.notifier_del = notifier_del_vio;
32135 - virtio_cons.notifier_hangup = notifier_del_vio;
32136 + pax_open_kernel();
32137 + *(void **)&virtio_cons.get_chars = get_chars;
32138 + *(void **)&virtio_cons.put_chars = put_chars;
32139 + *(void **)&virtio_cons.notifier_add = notifier_add_vio;
32140 + *(void **)&virtio_cons.notifier_del = notifier_del_vio;
32141 + *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
32142 + pax_close_kernel();
32143
32144 /* The first argument of hvc_alloc() is the virtual console number, so
32145 * we use zero. The second argument is the parameter for the
32146 diff --git a/drivers/char/vt.c b/drivers/char/vt.c
32147 index 0c80c68..53d59c1 100644
32148 --- a/drivers/char/vt.c
32149 +++ b/drivers/char/vt.c
32150 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
32151
32152 static void notify_write(struct vc_data *vc, unsigned int unicode)
32153 {
32154 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
32155 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
32156 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
32157 }
32158
32159 diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
32160 index 6351a26..999af95 100644
32161 --- a/drivers/char/vt_ioctl.c
32162 +++ b/drivers/char/vt_ioctl.c
32163 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
32164 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
32165 return -EFAULT;
32166
32167 - if (!capable(CAP_SYS_TTY_CONFIG))
32168 - perm = 0;
32169 -
32170 switch (cmd) {
32171 case KDGKBENT:
32172 key_map = key_maps[s];
32173 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
32174 val = (i ? K_HOLE : K_NOSUCHMAP);
32175 return put_user(val, &user_kbe->kb_value);
32176 case KDSKBENT:
32177 + if (!capable(CAP_SYS_TTY_CONFIG))
32178 + perm = 0;
32179 +
32180 if (!perm)
32181 return -EPERM;
32182 +
32183 if (!i && v == K_NOSUCHMAP) {
32184 /* deallocate map */
32185 key_map = key_maps[s];
32186 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
32187 int i, j, k;
32188 int ret;
32189
32190 - if (!capable(CAP_SYS_TTY_CONFIG))
32191 - perm = 0;
32192 -
32193 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
32194 if (!kbs) {
32195 ret = -ENOMEM;
32196 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
32197 kfree(kbs);
32198 return ((p && *p) ? -EOVERFLOW : 0);
32199 case KDSKBSENT:
32200 + if (!capable(CAP_SYS_TTY_CONFIG))
32201 + perm = 0;
32202 +
32203 if (!perm) {
32204 ret = -EPERM;
32205 goto reterr;
32206 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
32207 index c7ae026..1769c1d 100644
32208 --- a/drivers/cpufreq/cpufreq.c
32209 +++ b/drivers/cpufreq/cpufreq.c
32210 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
32211 complete(&policy->kobj_unregister);
32212 }
32213
32214 -static struct sysfs_ops sysfs_ops = {
32215 +static const struct sysfs_ops sysfs_ops = {
32216 .show = show,
32217 .store = store,
32218 };
32219 diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
32220 index 97b0038..2056670 100644
32221 --- a/drivers/cpuidle/sysfs.c
32222 +++ b/drivers/cpuidle/sysfs.c
32223 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
32224 return ret;
32225 }
32226
32227 -static struct sysfs_ops cpuidle_sysfs_ops = {
32228 +static const struct sysfs_ops cpuidle_sysfs_ops = {
32229 .show = cpuidle_show,
32230 .store = cpuidle_store,
32231 };
32232 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
32233 return ret;
32234 }
32235
32236 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
32237 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
32238 .show = cpuidle_state_show,
32239 };
32240
32241 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
32242 .release = cpuidle_state_sysfs_release,
32243 };
32244
32245 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
32246 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
32247 {
32248 kobject_put(&device->kobjs[i]->kobj);
32249 wait_for_completion(&device->kobjs[i]->kobj_unregister);
32250 diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
32251 index 5f753fc..0377ae9 100644
32252 --- a/drivers/crypto/hifn_795x.c
32253 +++ b/drivers/crypto/hifn_795x.c
32254 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
32255 0xCA, 0x34, 0x2B, 0x2E};
32256 struct scatterlist sg;
32257
32258 + pax_track_stack();
32259 +
32260 memset(src, 0, sizeof(src));
32261 memset(ctx.key, 0, sizeof(ctx.key));
32262
32263 diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
32264 index 71e6482..de8d96c 100644
32265 --- a/drivers/crypto/padlock-aes.c
32266 +++ b/drivers/crypto/padlock-aes.c
32267 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
32268 struct crypto_aes_ctx gen_aes;
32269 int cpu;
32270
32271 + pax_track_stack();
32272 +
32273 if (key_len % 8) {
32274 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
32275 return -EINVAL;
32276 diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
32277 index dcc4ab7..cc834bb 100644
32278 --- a/drivers/dma/ioat/dma.c
32279 +++ b/drivers/dma/ioat/dma.c
32280 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
32281 return entry->show(&chan->common, page);
32282 }
32283
32284 -struct sysfs_ops ioat_sysfs_ops = {
32285 +const struct sysfs_ops ioat_sysfs_ops = {
32286 .show = ioat_attr_show,
32287 };
32288
32289 diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
32290 index bbc3e78..f2db62c 100644
32291 --- a/drivers/dma/ioat/dma.h
32292 +++ b/drivers/dma/ioat/dma.h
32293 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
32294 unsigned long *phys_complete);
32295 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
32296 void ioat_kobject_del(struct ioatdma_device *device);
32297 -extern struct sysfs_ops ioat_sysfs_ops;
32298 +extern const struct sysfs_ops ioat_sysfs_ops;
32299 extern struct ioat_sysfs_entry ioat_version_attr;
32300 extern struct ioat_sysfs_entry ioat_cap_attr;
32301 #endif /* IOATDMA_H */
32302 diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
32303 index 9908c9e..3ceb0e5 100644
32304 --- a/drivers/dma/ioat/dma_v3.c
32305 +++ b/drivers/dma/ioat/dma_v3.c
32306 @@ -71,10 +71,10 @@
32307 /* provide a lookup table for setting the source address in the base or
32308 * extended descriptor of an xor or pq descriptor
32309 */
32310 -static const u8 xor_idx_to_desc __read_mostly = 0xd0;
32311 -static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
32312 -static const u8 pq_idx_to_desc __read_mostly = 0xf8;
32313 -static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
32314 +static const u8 xor_idx_to_desc = 0xd0;
32315 +static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
32316 +static const u8 pq_idx_to_desc = 0xf8;
32317 +static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
32318
32319 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
32320 {
32321 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
32322 index 85c464a..afd1e73 100644
32323 --- a/drivers/edac/amd64_edac.c
32324 +++ b/drivers/edac/amd64_edac.c
32325 @@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
32326 * PCI core identifies what devices are on a system during boot, and then
32327 * inquiry this table to see if this driver is for a given device found.
32328 */
32329 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
32330 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
32331 {
32332 .vendor = PCI_VENDOR_ID_AMD,
32333 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
32334 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
32335 index 2b95f1a..4f52793 100644
32336 --- a/drivers/edac/amd76x_edac.c
32337 +++ b/drivers/edac/amd76x_edac.c
32338 @@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
32339 edac_mc_free(mci);
32340 }
32341
32342 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
32343 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
32344 {
32345 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32346 AMD762},
32347 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
32348 index d205d49..74c9672 100644
32349 --- a/drivers/edac/e752x_edac.c
32350 +++ b/drivers/edac/e752x_edac.c
32351 @@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
32352 edac_mc_free(mci);
32353 }
32354
32355 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
32356 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
32357 {
32358 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32359 E7520},
32360 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
32361 index c7d11cc..c59c1ca 100644
32362 --- a/drivers/edac/e7xxx_edac.c
32363 +++ b/drivers/edac/e7xxx_edac.c
32364 @@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
32365 edac_mc_free(mci);
32366 }
32367
32368 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
32369 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
32370 {
32371 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32372 E7205},
32373 diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
32374 index 5376457..5fdedbc 100644
32375 --- a/drivers/edac/edac_device_sysfs.c
32376 +++ b/drivers/edac/edac_device_sysfs.c
32377 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
32378 }
32379
32380 /* edac_dev file operations for an 'ctl_info' */
32381 -static struct sysfs_ops device_ctl_info_ops = {
32382 +static const struct sysfs_ops device_ctl_info_ops = {
32383 .show = edac_dev_ctl_info_show,
32384 .store = edac_dev_ctl_info_store
32385 };
32386 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
32387 }
32388
32389 /* edac_dev file operations for an 'instance' */
32390 -static struct sysfs_ops device_instance_ops = {
32391 +static const struct sysfs_ops device_instance_ops = {
32392 .show = edac_dev_instance_show,
32393 .store = edac_dev_instance_store
32394 };
32395 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
32396 }
32397
32398 /* edac_dev file operations for a 'block' */
32399 -static struct sysfs_ops device_block_ops = {
32400 +static const struct sysfs_ops device_block_ops = {
32401 .show = edac_dev_block_show,
32402 .store = edac_dev_block_store
32403 };
32404 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
32405 index e1d4ce0..88840e9 100644
32406 --- a/drivers/edac/edac_mc_sysfs.c
32407 +++ b/drivers/edac/edac_mc_sysfs.c
32408 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
32409 return -EIO;
32410 }
32411
32412 -static struct sysfs_ops csrowfs_ops = {
32413 +static const struct sysfs_ops csrowfs_ops = {
32414 .show = csrowdev_show,
32415 .store = csrowdev_store
32416 };
32417 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
32418 }
32419
32420 /* Intermediate show/store table */
32421 -static struct sysfs_ops mci_ops = {
32422 +static const struct sysfs_ops mci_ops = {
32423 .show = mcidev_show,
32424 .store = mcidev_store
32425 };
32426 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
32427 index 422728c..d8d9c88 100644
32428 --- a/drivers/edac/edac_pci_sysfs.c
32429 +++ b/drivers/edac/edac_pci_sysfs.c
32430 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
32431 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
32432 static int edac_pci_poll_msec = 1000; /* one second workq period */
32433
32434 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
32435 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
32436 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
32437 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
32438
32439 static struct kobject *edac_pci_top_main_kobj;
32440 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
32441 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
32442 }
32443
32444 /* fs_ops table */
32445 -static struct sysfs_ops pci_instance_ops = {
32446 +static const struct sysfs_ops pci_instance_ops = {
32447 .show = edac_pci_instance_show,
32448 .store = edac_pci_instance_store
32449 };
32450 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
32451 return -EIO;
32452 }
32453
32454 -static struct sysfs_ops edac_pci_sysfs_ops = {
32455 +static const struct sysfs_ops edac_pci_sysfs_ops = {
32456 .show = edac_pci_dev_show,
32457 .store = edac_pci_dev_store
32458 };
32459 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32460 edac_printk(KERN_CRIT, EDAC_PCI,
32461 "Signaled System Error on %s\n",
32462 pci_name(dev));
32463 - atomic_inc(&pci_nonparity_count);
32464 + atomic_inc_unchecked(&pci_nonparity_count);
32465 }
32466
32467 if (status & (PCI_STATUS_PARITY)) {
32468 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32469 "Master Data Parity Error on %s\n",
32470 pci_name(dev));
32471
32472 - atomic_inc(&pci_parity_count);
32473 + atomic_inc_unchecked(&pci_parity_count);
32474 }
32475
32476 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32477 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32478 "Detected Parity Error on %s\n",
32479 pci_name(dev));
32480
32481 - atomic_inc(&pci_parity_count);
32482 + atomic_inc_unchecked(&pci_parity_count);
32483 }
32484 }
32485
32486 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32487 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
32488 "Signaled System Error on %s\n",
32489 pci_name(dev));
32490 - atomic_inc(&pci_nonparity_count);
32491 + atomic_inc_unchecked(&pci_nonparity_count);
32492 }
32493
32494 if (status & (PCI_STATUS_PARITY)) {
32495 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32496 "Master Data Parity Error on "
32497 "%s\n", pci_name(dev));
32498
32499 - atomic_inc(&pci_parity_count);
32500 + atomic_inc_unchecked(&pci_parity_count);
32501 }
32502
32503 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32504 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32505 "Detected Parity Error on %s\n",
32506 pci_name(dev));
32507
32508 - atomic_inc(&pci_parity_count);
32509 + atomic_inc_unchecked(&pci_parity_count);
32510 }
32511 }
32512 }
32513 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
32514 if (!check_pci_errors)
32515 return;
32516
32517 - before_count = atomic_read(&pci_parity_count);
32518 + before_count = atomic_read_unchecked(&pci_parity_count);
32519
32520 /* scan all PCI devices looking for a Parity Error on devices and
32521 * bridges.
32522 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
32523 /* Only if operator has selected panic on PCI Error */
32524 if (edac_pci_get_panic_on_pe()) {
32525 /* If the count is different 'after' from 'before' */
32526 - if (before_count != atomic_read(&pci_parity_count))
32527 + if (before_count != atomic_read_unchecked(&pci_parity_count))
32528 panic("EDAC: PCI Parity Error");
32529 }
32530 }
32531 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
32532 index 6c9a0f2..9c1cf7e 100644
32533 --- a/drivers/edac/i3000_edac.c
32534 +++ b/drivers/edac/i3000_edac.c
32535 @@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
32536 edac_mc_free(mci);
32537 }
32538
32539 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
32540 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
32541 {
32542 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32543 I3000},
32544 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
32545 index fde4db9..fe108f9 100644
32546 --- a/drivers/edac/i3200_edac.c
32547 +++ b/drivers/edac/i3200_edac.c
32548 @@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
32549 edac_mc_free(mci);
32550 }
32551
32552 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
32553 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
32554 {
32555 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32556 I3200},
32557 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
32558 index adc10a2..57d4ccf 100644
32559 --- a/drivers/edac/i5000_edac.c
32560 +++ b/drivers/edac/i5000_edac.c
32561 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
32562 *
32563 * The "E500P" device is the first device supported.
32564 */
32565 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
32566 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
32567 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
32568 .driver_data = I5000P},
32569
32570 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
32571 index 22db05a..b2b5503 100644
32572 --- a/drivers/edac/i5100_edac.c
32573 +++ b/drivers/edac/i5100_edac.c
32574 @@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
32575 edac_mc_free(mci);
32576 }
32577
32578 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
32579 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
32580 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
32581 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
32582 { 0, }
32583 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
32584 index f99d106..f050710 100644
32585 --- a/drivers/edac/i5400_edac.c
32586 +++ b/drivers/edac/i5400_edac.c
32587 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
32588 *
32589 * The "E500P" device is the first device supported.
32590 */
32591 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
32592 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
32593 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
32594 {0,} /* 0 terminated list. */
32595 };
32596 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
32597 index 577760a..9ce16ce 100644
32598 --- a/drivers/edac/i82443bxgx_edac.c
32599 +++ b/drivers/edac/i82443bxgx_edac.c
32600 @@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
32601
32602 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
32603
32604 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
32605 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
32606 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
32607 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
32608 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
32609 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
32610 index c0088ba..64a7b98 100644
32611 --- a/drivers/edac/i82860_edac.c
32612 +++ b/drivers/edac/i82860_edac.c
32613 @@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
32614 edac_mc_free(mci);
32615 }
32616
32617 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
32618 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
32619 {
32620 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32621 I82860},
32622 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
32623 index b2d83b9..a34357b 100644
32624 --- a/drivers/edac/i82875p_edac.c
32625 +++ b/drivers/edac/i82875p_edac.c
32626 @@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
32627 edac_mc_free(mci);
32628 }
32629
32630 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
32631 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
32632 {
32633 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32634 I82875P},
32635 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
32636 index 2eed3ea..87bbbd1 100644
32637 --- a/drivers/edac/i82975x_edac.c
32638 +++ b/drivers/edac/i82975x_edac.c
32639 @@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
32640 edac_mc_free(mci);
32641 }
32642
32643 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
32644 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
32645 {
32646 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32647 I82975X
32648 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
32649 index 9900675..78ac2b6 100644
32650 --- a/drivers/edac/r82600_edac.c
32651 +++ b/drivers/edac/r82600_edac.c
32652 @@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
32653 edac_mc_free(mci);
32654 }
32655
32656 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
32657 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
32658 {
32659 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
32660 },
32661 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
32662 index d4ec605..4cfec4e 100644
32663 --- a/drivers/edac/x38_edac.c
32664 +++ b/drivers/edac/x38_edac.c
32665 @@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
32666 edac_mc_free(mci);
32667 }
32668
32669 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
32670 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
32671 {
32672 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32673 X38},
32674 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
32675 index 3fc2ceb..daf098f 100644
32676 --- a/drivers/firewire/core-card.c
32677 +++ b/drivers/firewire/core-card.c
32678 @@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
32679
32680 void fw_core_remove_card(struct fw_card *card)
32681 {
32682 - struct fw_card_driver dummy_driver = dummy_driver_template;
32683 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
32684
32685 card->driver->update_phy_reg(card, 4,
32686 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
32687 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
32688 index 4560d8f..36db24a 100644
32689 --- a/drivers/firewire/core-cdev.c
32690 +++ b/drivers/firewire/core-cdev.c
32691 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
32692 int ret;
32693
32694 if ((request->channels == 0 && request->bandwidth == 0) ||
32695 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
32696 - request->bandwidth < 0)
32697 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
32698 return -EINVAL;
32699
32700 r = kmalloc(sizeof(*r), GFP_KERNEL);
32701 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
32702 index da628c7..cf54a2c 100644
32703 --- a/drivers/firewire/core-transaction.c
32704 +++ b/drivers/firewire/core-transaction.c
32705 @@ -36,6 +36,7 @@
32706 #include <linux/string.h>
32707 #include <linux/timer.h>
32708 #include <linux/types.h>
32709 +#include <linux/sched.h>
32710
32711 #include <asm/byteorder.h>
32712
32713 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
32714 struct transaction_callback_data d;
32715 struct fw_transaction t;
32716
32717 + pax_track_stack();
32718 +
32719 init_completion(&d.done);
32720 d.payload = payload;
32721 fw_send_request(card, &t, tcode, destination_id, generation, speed,
32722 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
32723 index 7ff6e75..a2965d9 100644
32724 --- a/drivers/firewire/core.h
32725 +++ b/drivers/firewire/core.h
32726 @@ -86,6 +86,7 @@ struct fw_card_driver {
32727
32728 int (*stop_iso)(struct fw_iso_context *ctx);
32729 };
32730 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
32731
32732 void fw_card_initialize(struct fw_card *card,
32733 const struct fw_card_driver *driver, struct device *device);
32734 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
32735 index 3a2ccb0..82fd7c4 100644
32736 --- a/drivers/firmware/dmi_scan.c
32737 +++ b/drivers/firmware/dmi_scan.c
32738 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
32739 }
32740 }
32741 else {
32742 - /*
32743 - * no iounmap() for that ioremap(); it would be a no-op, but
32744 - * it's so early in setup that sucker gets confused into doing
32745 - * what it shouldn't if we actually call it.
32746 - */
32747 p = dmi_ioremap(0xF0000, 0x10000);
32748 if (p == NULL)
32749 goto error;
32750 @@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
32751 if (buf == NULL)
32752 return -1;
32753
32754 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
32755 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
32756
32757 iounmap(buf);
32758 return 0;
32759 diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
32760 index 9e4f59d..110e24e 100644
32761 --- a/drivers/firmware/edd.c
32762 +++ b/drivers/firmware/edd.c
32763 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
32764 return ret;
32765 }
32766
32767 -static struct sysfs_ops edd_attr_ops = {
32768 +static const struct sysfs_ops edd_attr_ops = {
32769 .show = edd_attr_show,
32770 };
32771
32772 diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
32773 index f4f709d..082f06e 100644
32774 --- a/drivers/firmware/efivars.c
32775 +++ b/drivers/firmware/efivars.c
32776 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
32777 return ret;
32778 }
32779
32780 -static struct sysfs_ops efivar_attr_ops = {
32781 +static const struct sysfs_ops efivar_attr_ops = {
32782 .show = efivar_attr_show,
32783 .store = efivar_attr_store,
32784 };
32785 diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
32786 index 051d1eb..0a5d4e7 100644
32787 --- a/drivers/firmware/iscsi_ibft.c
32788 +++ b/drivers/firmware/iscsi_ibft.c
32789 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
32790 return ret;
32791 }
32792
32793 -static struct sysfs_ops ibft_attr_ops = {
32794 +static const struct sysfs_ops ibft_attr_ops = {
32795 .show = ibft_show_attribute,
32796 };
32797
32798 diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
32799 index 56f9234..8c58c7b 100644
32800 --- a/drivers/firmware/memmap.c
32801 +++ b/drivers/firmware/memmap.c
32802 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
32803 NULL
32804 };
32805
32806 -static struct sysfs_ops memmap_attr_ops = {
32807 +static const struct sysfs_ops memmap_attr_ops = {
32808 .show = memmap_attr_show,
32809 };
32810
32811 diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
32812 index b16c9a8..2af7d3f 100644
32813 --- a/drivers/gpio/vr41xx_giu.c
32814 +++ b/drivers/gpio/vr41xx_giu.c
32815 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
32816 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
32817 maskl, pendl, maskh, pendh);
32818
32819 - atomic_inc(&irq_err_count);
32820 + atomic_inc_unchecked(&irq_err_count);
32821
32822 return -EINVAL;
32823 }
32824 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
32825 index bea6efc..3dc0f42 100644
32826 --- a/drivers/gpu/drm/drm_crtc.c
32827 +++ b/drivers/gpu/drm/drm_crtc.c
32828 @@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32829 */
32830 if ((out_resp->count_modes >= mode_count) && mode_count) {
32831 copied = 0;
32832 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
32833 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
32834 list_for_each_entry(mode, &connector->modes, head) {
32835 drm_crtc_convert_to_umode(&u_mode, mode);
32836 if (copy_to_user(mode_ptr + copied,
32837 @@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32838
32839 if ((out_resp->count_props >= props_count) && props_count) {
32840 copied = 0;
32841 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
32842 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
32843 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
32844 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
32845 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
32846 if (connector->property_ids[i] != 0) {
32847 if (put_user(connector->property_ids[i],
32848 @@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32849
32850 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
32851 copied = 0;
32852 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
32853 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
32854 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
32855 if (connector->encoder_ids[i] != 0) {
32856 if (put_user(connector->encoder_ids[i],
32857 @@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
32858 }
32859
32860 for (i = 0; i < crtc_req->count_connectors; i++) {
32861 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
32862 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
32863 if (get_user(out_id, &set_connectors_ptr[i])) {
32864 ret = -EFAULT;
32865 goto out;
32866 @@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32867 out_resp->flags = property->flags;
32868
32869 if ((out_resp->count_values >= value_count) && value_count) {
32870 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
32871 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
32872 for (i = 0; i < value_count; i++) {
32873 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
32874 ret = -EFAULT;
32875 @@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32876 if (property->flags & DRM_MODE_PROP_ENUM) {
32877 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
32878 copied = 0;
32879 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
32880 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
32881 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
32882
32883 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
32884 @@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32885 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
32886 copied = 0;
32887 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
32888 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
32889 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
32890
32891 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
32892 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
32893 @@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
32894 blob = obj_to_blob(obj);
32895
32896 if (out_resp->length == blob->length) {
32897 - blob_ptr = (void *)(unsigned long)out_resp->data;
32898 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
32899 if (copy_to_user(blob_ptr, blob->data, blob->length)){
32900 ret = -EFAULT;
32901 goto done;
32902 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
32903 index 1b8745d..92fdbf6 100644
32904 --- a/drivers/gpu/drm/drm_crtc_helper.c
32905 +++ b/drivers/gpu/drm/drm_crtc_helper.c
32906 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
32907 struct drm_crtc *tmp;
32908 int crtc_mask = 1;
32909
32910 - WARN(!crtc, "checking null crtc?");
32911 + BUG_ON(!crtc);
32912
32913 dev = crtc->dev;
32914
32915 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
32916
32917 adjusted_mode = drm_mode_duplicate(dev, mode);
32918
32919 + pax_track_stack();
32920 +
32921 crtc->enabled = drm_helper_crtc_in_use(crtc);
32922
32923 if (!crtc->enabled)
32924 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
32925 index 0e27d98..dec8768 100644
32926 --- a/drivers/gpu/drm/drm_drv.c
32927 +++ b/drivers/gpu/drm/drm_drv.c
32928 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
32929 char *kdata = NULL;
32930
32931 atomic_inc(&dev->ioctl_count);
32932 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
32933 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
32934 ++file_priv->ioctl_count;
32935
32936 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
32937 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
32938 index 519161e..98c840c 100644
32939 --- a/drivers/gpu/drm/drm_fops.c
32940 +++ b/drivers/gpu/drm/drm_fops.c
32941 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
32942 }
32943
32944 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
32945 - atomic_set(&dev->counts[i], 0);
32946 + atomic_set_unchecked(&dev->counts[i], 0);
32947
32948 dev->sigdata.lock = NULL;
32949
32950 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
32951
32952 retcode = drm_open_helper(inode, filp, dev);
32953 if (!retcode) {
32954 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
32955 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
32956 spin_lock(&dev->count_lock);
32957 - if (!dev->open_count++) {
32958 + if (local_inc_return(&dev->open_count) == 1) {
32959 spin_unlock(&dev->count_lock);
32960 retcode = drm_setup(dev);
32961 goto out;
32962 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
32963
32964 lock_kernel();
32965
32966 - DRM_DEBUG("open_count = %d\n", dev->open_count);
32967 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
32968
32969 if (dev->driver->preclose)
32970 dev->driver->preclose(dev, file_priv);
32971 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
32972 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
32973 task_pid_nr(current),
32974 (long)old_encode_dev(file_priv->minor->device),
32975 - dev->open_count);
32976 + local_read(&dev->open_count));
32977
32978 /* Release any auth tokens that might point to this file_priv,
32979 (do that under the drm_global_mutex) */
32980 @@ -529,9 +529,9 @@ int drm_release(struct inode *inode, struct file *filp)
32981 * End inline drm_release
32982 */
32983
32984 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
32985 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
32986 spin_lock(&dev->count_lock);
32987 - if (!--dev->open_count) {
32988 + if (local_dec_and_test(&dev->open_count)) {
32989 if (atomic_read(&dev->ioctl_count)) {
32990 DRM_ERROR("Device busy: %d\n",
32991 atomic_read(&dev->ioctl_count));
32992 diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
32993 index 8bf3770..79422805 100644
32994 --- a/drivers/gpu/drm/drm_gem.c
32995 +++ b/drivers/gpu/drm/drm_gem.c
32996 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
32997 spin_lock_init(&dev->object_name_lock);
32998 idr_init(&dev->object_name_idr);
32999 atomic_set(&dev->object_count, 0);
33000 - atomic_set(&dev->object_memory, 0);
33001 + atomic_set_unchecked(&dev->object_memory, 0);
33002 atomic_set(&dev->pin_count, 0);
33003 - atomic_set(&dev->pin_memory, 0);
33004 + atomic_set_unchecked(&dev->pin_memory, 0);
33005 atomic_set(&dev->gtt_count, 0);
33006 - atomic_set(&dev->gtt_memory, 0);
33007 + atomic_set_unchecked(&dev->gtt_memory, 0);
33008
33009 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
33010 if (!mm) {
33011 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
33012 goto fput;
33013 }
33014 atomic_inc(&dev->object_count);
33015 - atomic_add(obj->size, &dev->object_memory);
33016 + atomic_add_unchecked(obj->size, &dev->object_memory);
33017 return obj;
33018 fput:
33019 fput(obj->filp);
33020 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
33021
33022 fput(obj->filp);
33023 atomic_dec(&dev->object_count);
33024 - atomic_sub(obj->size, &dev->object_memory);
33025 + atomic_sub_unchecked(obj->size, &dev->object_memory);
33026 kfree(obj);
33027 }
33028 EXPORT_SYMBOL(drm_gem_object_free);
33029 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
33030 index f0f6c6b..34af322 100644
33031 --- a/drivers/gpu/drm/drm_info.c
33032 +++ b/drivers/gpu/drm/drm_info.c
33033 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
33034 struct drm_local_map *map;
33035 struct drm_map_list *r_list;
33036
33037 - /* Hardcoded from _DRM_FRAME_BUFFER,
33038 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
33039 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
33040 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
33041 + static const char * const types[] = {
33042 + [_DRM_FRAME_BUFFER] = "FB",
33043 + [_DRM_REGISTERS] = "REG",
33044 + [_DRM_SHM] = "SHM",
33045 + [_DRM_AGP] = "AGP",
33046 + [_DRM_SCATTER_GATHER] = "SG",
33047 + [_DRM_CONSISTENT] = "PCI",
33048 + [_DRM_GEM] = "GEM" };
33049 const char *type;
33050 int i;
33051
33052 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
33053 map = r_list->map;
33054 if (!map)
33055 continue;
33056 - if (map->type < 0 || map->type > 5)
33057 + if (map->type >= ARRAY_SIZE(types))
33058 type = "??";
33059 else
33060 type = types[map->type];
33061 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
33062 struct drm_device *dev = node->minor->dev;
33063
33064 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
33065 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
33066 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
33067 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
33068 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
33069 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
33070 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
33071 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
33072 seq_printf(m, "%d gtt total\n", dev->gtt_total);
33073 return 0;
33074 }
33075 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
33076 mutex_lock(&dev->struct_mutex);
33077 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
33078 atomic_read(&dev->vma_count),
33079 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33080 + NULL, 0);
33081 +#else
33082 high_memory, (u64)virt_to_phys(high_memory));
33083 +#endif
33084
33085 list_for_each_entry(pt, &dev->vmalist, head) {
33086 vma = pt->vma;
33087 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
33088 continue;
33089 seq_printf(m,
33090 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
33091 - pt->pid, vma->vm_start, vma->vm_end,
33092 + pt->pid,
33093 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33094 + 0, 0,
33095 +#else
33096 + vma->vm_start, vma->vm_end,
33097 +#endif
33098 vma->vm_flags & VM_READ ? 'r' : '-',
33099 vma->vm_flags & VM_WRITE ? 'w' : '-',
33100 vma->vm_flags & VM_EXEC ? 'x' : '-',
33101 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
33102 vma->vm_flags & VM_LOCKED ? 'l' : '-',
33103 vma->vm_flags & VM_IO ? 'i' : '-',
33104 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33105 + 0);
33106 +#else
33107 vma->vm_pgoff);
33108 +#endif
33109
33110 #if defined(__i386__)
33111 pgprot = pgprot_val(vma->vm_page_prot);
33112 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
33113 index 282d9fd..71e5f11 100644
33114 --- a/drivers/gpu/drm/drm_ioc32.c
33115 +++ b/drivers/gpu/drm/drm_ioc32.c
33116 @@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
33117 request = compat_alloc_user_space(nbytes);
33118 if (!access_ok(VERIFY_WRITE, request, nbytes))
33119 return -EFAULT;
33120 - list = (struct drm_buf_desc *) (request + 1);
33121 + list = (struct drm_buf_desc __user *) (request + 1);
33122
33123 if (__put_user(count, &request->count)
33124 || __put_user(list, &request->list))
33125 @@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
33126 request = compat_alloc_user_space(nbytes);
33127 if (!access_ok(VERIFY_WRITE, request, nbytes))
33128 return -EFAULT;
33129 - list = (struct drm_buf_pub *) (request + 1);
33130 + list = (struct drm_buf_pub __user *) (request + 1);
33131
33132 if (__put_user(count, &request->count)
33133 || __put_user(list, &request->list))
33134 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
33135 index 9b9ff46..4ea724c 100644
33136 --- a/drivers/gpu/drm/drm_ioctl.c
33137 +++ b/drivers/gpu/drm/drm_ioctl.c
33138 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
33139 stats->data[i].value =
33140 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
33141 else
33142 - stats->data[i].value = atomic_read(&dev->counts[i]);
33143 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
33144 stats->data[i].type = dev->types[i];
33145 }
33146
33147 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
33148 index e2f70a5..c703e86 100644
33149 --- a/drivers/gpu/drm/drm_lock.c
33150 +++ b/drivers/gpu/drm/drm_lock.c
33151 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
33152 if (drm_lock_take(&master->lock, lock->context)) {
33153 master->lock.file_priv = file_priv;
33154 master->lock.lock_time = jiffies;
33155 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
33156 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
33157 break; /* Got lock */
33158 }
33159
33160 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
33161 return -EINVAL;
33162 }
33163
33164 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
33165 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
33166
33167 /* kernel_context_switch isn't used by any of the x86 drm
33168 * modules but is required by the Sparc driver.
33169 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
33170 index 7d1d88c..b9131b2 100644
33171 --- a/drivers/gpu/drm/i810/i810_dma.c
33172 +++ b/drivers/gpu/drm/i810/i810_dma.c
33173 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
33174 dma->buflist[vertex->idx],
33175 vertex->discard, vertex->used);
33176
33177 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33178 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33179 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33180 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33181 sarea_priv->last_enqueue = dev_priv->counter - 1;
33182 sarea_priv->last_dispatch = (int)hw_status[5];
33183
33184 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
33185 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
33186 mc->last_render);
33187
33188 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33189 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33190 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33191 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33192 sarea_priv->last_enqueue = dev_priv->counter - 1;
33193 sarea_priv->last_dispatch = (int)hw_status[5];
33194
33195 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
33196 index 21e2691..7321edd 100644
33197 --- a/drivers/gpu/drm/i810/i810_drv.h
33198 +++ b/drivers/gpu/drm/i810/i810_drv.h
33199 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
33200 int page_flipping;
33201
33202 wait_queue_head_t irq_queue;
33203 - atomic_t irq_received;
33204 - atomic_t irq_emitted;
33205 + atomic_unchecked_t irq_received;
33206 + atomic_unchecked_t irq_emitted;
33207
33208 int front_offset;
33209 } drm_i810_private_t;
33210 diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
33211 index da82afe..48a45de 100644
33212 --- a/drivers/gpu/drm/i830/i830_drv.h
33213 +++ b/drivers/gpu/drm/i830/i830_drv.h
33214 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
33215 int page_flipping;
33216
33217 wait_queue_head_t irq_queue;
33218 - atomic_t irq_received;
33219 - atomic_t irq_emitted;
33220 + atomic_unchecked_t irq_received;
33221 + atomic_unchecked_t irq_emitted;
33222
33223 int use_mi_batchbuffer_start;
33224
33225 diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
33226 index 91ec2bb..6f21fab 100644
33227 --- a/drivers/gpu/drm/i830/i830_irq.c
33228 +++ b/drivers/gpu/drm/i830/i830_irq.c
33229 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
33230
33231 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
33232
33233 - atomic_inc(&dev_priv->irq_received);
33234 + atomic_inc_unchecked(&dev_priv->irq_received);
33235 wake_up_interruptible(&dev_priv->irq_queue);
33236
33237 return IRQ_HANDLED;
33238 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
33239
33240 DRM_DEBUG("%s\n", __func__);
33241
33242 - atomic_inc(&dev_priv->irq_emitted);
33243 + atomic_inc_unchecked(&dev_priv->irq_emitted);
33244
33245 BEGIN_LP_RING(2);
33246 OUT_RING(0);
33247 OUT_RING(GFX_OP_USER_INTERRUPT);
33248 ADVANCE_LP_RING();
33249
33250 - return atomic_read(&dev_priv->irq_emitted);
33251 + return atomic_read_unchecked(&dev_priv->irq_emitted);
33252 }
33253
33254 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33255 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33256
33257 DRM_DEBUG("%s\n", __func__);
33258
33259 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
33260 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
33261 return 0;
33262
33263 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
33264 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33265
33266 for (;;) {
33267 __set_current_state(TASK_INTERRUPTIBLE);
33268 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
33269 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
33270 break;
33271 if ((signed)(end - jiffies) <= 0) {
33272 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
33273 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
33274 I830_WRITE16(I830REG_HWSTAM, 0xffff);
33275 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
33276 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
33277 - atomic_set(&dev_priv->irq_received, 0);
33278 - atomic_set(&dev_priv->irq_emitted, 0);
33279 + atomic_set_unchecked(&dev_priv->irq_received, 0);
33280 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
33281 init_waitqueue_head(&dev_priv->irq_queue);
33282 }
33283
33284 diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
33285 index 288fc50..c6092055 100644
33286 --- a/drivers/gpu/drm/i915/dvo.h
33287 +++ b/drivers/gpu/drm/i915/dvo.h
33288 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
33289 *
33290 * \return singly-linked list of modes or NULL if no modes found.
33291 */
33292 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
33293 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
33294
33295 /**
33296 * Clean up driver-specific bits of the output
33297 */
33298 - void (*destroy) (struct intel_dvo_device *dvo);
33299 + void (* const destroy) (struct intel_dvo_device *dvo);
33300
33301 /**
33302 * Debugging hook to dump device registers to log file
33303 */
33304 - void (*dump_regs)(struct intel_dvo_device *dvo);
33305 + void (* const dump_regs)(struct intel_dvo_device *dvo);
33306 };
33307
33308 -extern struct intel_dvo_dev_ops sil164_ops;
33309 -extern struct intel_dvo_dev_ops ch7xxx_ops;
33310 -extern struct intel_dvo_dev_ops ivch_ops;
33311 -extern struct intel_dvo_dev_ops tfp410_ops;
33312 -extern struct intel_dvo_dev_ops ch7017_ops;
33313 +extern const struct intel_dvo_dev_ops sil164_ops;
33314 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
33315 +extern const struct intel_dvo_dev_ops ivch_ops;
33316 +extern const struct intel_dvo_dev_ops tfp410_ops;
33317 +extern const struct intel_dvo_dev_ops ch7017_ops;
33318
33319 #endif /* _INTEL_DVO_H */
33320 diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
33321 index 621815b..499d82e 100644
33322 --- a/drivers/gpu/drm/i915/dvo_ch7017.c
33323 +++ b/drivers/gpu/drm/i915/dvo_ch7017.c
33324 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
33325 }
33326 }
33327
33328 -struct intel_dvo_dev_ops ch7017_ops = {
33329 +const struct intel_dvo_dev_ops ch7017_ops = {
33330 .init = ch7017_init,
33331 .detect = ch7017_detect,
33332 .mode_valid = ch7017_mode_valid,
33333 diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33334 index a9b8962..ac769ba 100644
33335 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
33336 +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33337 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
33338 }
33339 }
33340
33341 -struct intel_dvo_dev_ops ch7xxx_ops = {
33342 +const struct intel_dvo_dev_ops ch7xxx_ops = {
33343 .init = ch7xxx_init,
33344 .detect = ch7xxx_detect,
33345 .mode_valid = ch7xxx_mode_valid,
33346 diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
33347 index aa176f9..ed2930c 100644
33348 --- a/drivers/gpu/drm/i915/dvo_ivch.c
33349 +++ b/drivers/gpu/drm/i915/dvo_ivch.c
33350 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
33351 }
33352 }
33353
33354 -struct intel_dvo_dev_ops ivch_ops= {
33355 +const struct intel_dvo_dev_ops ivch_ops= {
33356 .init = ivch_init,
33357 .dpms = ivch_dpms,
33358 .save = ivch_save,
33359 diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
33360 index e1c1f73..7dbebcf 100644
33361 --- a/drivers/gpu/drm/i915/dvo_sil164.c
33362 +++ b/drivers/gpu/drm/i915/dvo_sil164.c
33363 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
33364 }
33365 }
33366
33367 -struct intel_dvo_dev_ops sil164_ops = {
33368 +const struct intel_dvo_dev_ops sil164_ops = {
33369 .init = sil164_init,
33370 .detect = sil164_detect,
33371 .mode_valid = sil164_mode_valid,
33372 diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
33373 index 16dce84..7e1b6f8 100644
33374 --- a/drivers/gpu/drm/i915/dvo_tfp410.c
33375 +++ b/drivers/gpu/drm/i915/dvo_tfp410.c
33376 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
33377 }
33378 }
33379
33380 -struct intel_dvo_dev_ops tfp410_ops = {
33381 +const struct intel_dvo_dev_ops tfp410_ops = {
33382 .init = tfp410_init,
33383 .detect = tfp410_detect,
33384 .mode_valid = tfp410_mode_valid,
33385 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
33386 index 7e859d6..7d1cf2b 100644
33387 --- a/drivers/gpu/drm/i915/i915_debugfs.c
33388 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
33389 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
33390 I915_READ(GTIMR));
33391 }
33392 seq_printf(m, "Interrupts received: %d\n",
33393 - atomic_read(&dev_priv->irq_received));
33394 + atomic_read_unchecked(&dev_priv->irq_received));
33395 if (dev_priv->hw_status_page != NULL) {
33396 seq_printf(m, "Current sequence: %d\n",
33397 i915_get_gem_seqno(dev));
33398 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
33399 index 5449239..7e4f68d 100644
33400 --- a/drivers/gpu/drm/i915/i915_drv.c
33401 +++ b/drivers/gpu/drm/i915/i915_drv.c
33402 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
33403 return i915_resume(dev);
33404 }
33405
33406 -static struct vm_operations_struct i915_gem_vm_ops = {
33407 +static const struct vm_operations_struct i915_gem_vm_ops = {
33408 .fault = i915_gem_fault,
33409 .open = drm_gem_vm_open,
33410 .close = drm_gem_vm_close,
33411 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
33412 index 97163f7..c24c7c7 100644
33413 --- a/drivers/gpu/drm/i915/i915_drv.h
33414 +++ b/drivers/gpu/drm/i915/i915_drv.h
33415 @@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
33416 /* display clock increase/decrease */
33417 /* pll clock increase/decrease */
33418 /* clock gating init */
33419 -};
33420 +} __no_const;
33421
33422 typedef struct drm_i915_private {
33423 struct drm_device *dev;
33424 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
33425 int page_flipping;
33426
33427 wait_queue_head_t irq_queue;
33428 - atomic_t irq_received;
33429 + atomic_unchecked_t irq_received;
33430 /** Protects user_irq_refcount and irq_mask_reg */
33431 spinlock_t user_irq_lock;
33432 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
33433 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
33434 index 27a3074..eb3f959 100644
33435 --- a/drivers/gpu/drm/i915/i915_gem.c
33436 +++ b/drivers/gpu/drm/i915/i915_gem.c
33437 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
33438
33439 args->aper_size = dev->gtt_total;
33440 args->aper_available_size = (args->aper_size -
33441 - atomic_read(&dev->pin_memory));
33442 + atomic_read_unchecked(&dev->pin_memory));
33443
33444 return 0;
33445 }
33446 @@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
33447
33448 if (obj_priv->gtt_space) {
33449 atomic_dec(&dev->gtt_count);
33450 - atomic_sub(obj->size, &dev->gtt_memory);
33451 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
33452
33453 drm_mm_put_block(obj_priv->gtt_space);
33454 obj_priv->gtt_space = NULL;
33455 @@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
33456 goto search_free;
33457 }
33458 atomic_inc(&dev->gtt_count);
33459 - atomic_add(obj->size, &dev->gtt_memory);
33460 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
33461
33462 /* Assert that the object is not currently in any GPU domain. As it
33463 * wasn't in the GTT, there shouldn't be any way it could have been in
33464 @@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
33465 "%d/%d gtt bytes\n",
33466 atomic_read(&dev->object_count),
33467 atomic_read(&dev->pin_count),
33468 - atomic_read(&dev->object_memory),
33469 - atomic_read(&dev->pin_memory),
33470 - atomic_read(&dev->gtt_memory),
33471 + atomic_read_unchecked(&dev->object_memory),
33472 + atomic_read_unchecked(&dev->pin_memory),
33473 + atomic_read_unchecked(&dev->gtt_memory),
33474 dev->gtt_total);
33475 }
33476 goto err;
33477 @@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
33478 */
33479 if (obj_priv->pin_count == 1) {
33480 atomic_inc(&dev->pin_count);
33481 - atomic_add(obj->size, &dev->pin_memory);
33482 + atomic_add_unchecked(obj->size, &dev->pin_memory);
33483 if (!obj_priv->active &&
33484 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
33485 !list_empty(&obj_priv->list))
33486 @@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
33487 list_move_tail(&obj_priv->list,
33488 &dev_priv->mm.inactive_list);
33489 atomic_dec(&dev->pin_count);
33490 - atomic_sub(obj->size, &dev->pin_memory);
33491 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
33492 }
33493 i915_verify_inactive(dev, __FILE__, __LINE__);
33494 }
33495 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
33496 index 63f28ad..f5469da 100644
33497 --- a/drivers/gpu/drm/i915/i915_irq.c
33498 +++ b/drivers/gpu/drm/i915/i915_irq.c
33499 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
33500 int irq_received;
33501 int ret = IRQ_NONE;
33502
33503 - atomic_inc(&dev_priv->irq_received);
33504 + atomic_inc_unchecked(&dev_priv->irq_received);
33505
33506 if (IS_IGDNG(dev))
33507 return igdng_irq_handler(dev);
33508 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
33509 {
33510 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33511
33512 - atomic_set(&dev_priv->irq_received, 0);
33513 + atomic_set_unchecked(&dev_priv->irq_received, 0);
33514
33515 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
33516 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
33517 diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
33518 index 5d9c6a7..d1b0e29 100644
33519 --- a/drivers/gpu/drm/i915/intel_sdvo.c
33520 +++ b/drivers/gpu/drm/i915/intel_sdvo.c
33521 @@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
33522 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
33523
33524 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
33525 - intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33526 + pax_open_kernel();
33527 + *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33528 + pax_close_kernel();
33529
33530 /* Read the regs to test if we can talk to the device */
33531 for (i = 0; i < 0x40; i++) {
33532 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
33533 index be6c6b9..8615d9c 100644
33534 --- a/drivers/gpu/drm/mga/mga_drv.h
33535 +++ b/drivers/gpu/drm/mga/mga_drv.h
33536 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
33537 u32 clear_cmd;
33538 u32 maccess;
33539
33540 - atomic_t vbl_received; /**< Number of vblanks received. */
33541 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
33542 wait_queue_head_t fence_queue;
33543 - atomic_t last_fence_retired;
33544 + atomic_unchecked_t last_fence_retired;
33545 u32 next_fence_to_post;
33546
33547 unsigned int fb_cpp;
33548 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
33549 index daa6041..a28a5da 100644
33550 --- a/drivers/gpu/drm/mga/mga_irq.c
33551 +++ b/drivers/gpu/drm/mga/mga_irq.c
33552 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
33553 if (crtc != 0)
33554 return 0;
33555
33556 - return atomic_read(&dev_priv->vbl_received);
33557 + return atomic_read_unchecked(&dev_priv->vbl_received);
33558 }
33559
33560
33561 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33562 /* VBLANK interrupt */
33563 if (status & MGA_VLINEPEN) {
33564 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
33565 - atomic_inc(&dev_priv->vbl_received);
33566 + atomic_inc_unchecked(&dev_priv->vbl_received);
33567 drm_handle_vblank(dev, 0);
33568 handled = 1;
33569 }
33570 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33571 MGA_WRITE(MGA_PRIMEND, prim_end);
33572 }
33573
33574 - atomic_inc(&dev_priv->last_fence_retired);
33575 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
33576 DRM_WAKEUP(&dev_priv->fence_queue);
33577 handled = 1;
33578 }
33579 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
33580 * using fences.
33581 */
33582 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
33583 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
33584 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
33585 - *sequence) <= (1 << 23)));
33586
33587 *sequence = cur_fence;
33588 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
33589 index 4c39a40..b22a9ea 100644
33590 --- a/drivers/gpu/drm/r128/r128_cce.c
33591 +++ b/drivers/gpu/drm/r128/r128_cce.c
33592 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
33593
33594 /* GH: Simple idle check.
33595 */
33596 - atomic_set(&dev_priv->idle_count, 0);
33597 + atomic_set_unchecked(&dev_priv->idle_count, 0);
33598
33599 /* We don't support anything other than bus-mastering ring mode,
33600 * but the ring can be in either AGP or PCI space for the ring
33601 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
33602 index 3c60829..4faf484 100644
33603 --- a/drivers/gpu/drm/r128/r128_drv.h
33604 +++ b/drivers/gpu/drm/r128/r128_drv.h
33605 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
33606 int is_pci;
33607 unsigned long cce_buffers_offset;
33608
33609 - atomic_t idle_count;
33610 + atomic_unchecked_t idle_count;
33611
33612 int page_flipping;
33613 int current_page;
33614 u32 crtc_offset;
33615 u32 crtc_offset_cntl;
33616
33617 - atomic_t vbl_received;
33618 + atomic_unchecked_t vbl_received;
33619
33620 u32 color_fmt;
33621 unsigned int front_offset;
33622 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
33623 index 69810fb..97bf17a 100644
33624 --- a/drivers/gpu/drm/r128/r128_irq.c
33625 +++ b/drivers/gpu/drm/r128/r128_irq.c
33626 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
33627 if (crtc != 0)
33628 return 0;
33629
33630 - return atomic_read(&dev_priv->vbl_received);
33631 + return atomic_read_unchecked(&dev_priv->vbl_received);
33632 }
33633
33634 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33635 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33636 /* VBLANK interrupt */
33637 if (status & R128_CRTC_VBLANK_INT) {
33638 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
33639 - atomic_inc(&dev_priv->vbl_received);
33640 + atomic_inc_unchecked(&dev_priv->vbl_received);
33641 drm_handle_vblank(dev, 0);
33642 return IRQ_HANDLED;
33643 }
33644 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
33645 index af2665c..51922d2 100644
33646 --- a/drivers/gpu/drm/r128/r128_state.c
33647 +++ b/drivers/gpu/drm/r128/r128_state.c
33648 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
33649
33650 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
33651 {
33652 - if (atomic_read(&dev_priv->idle_count) == 0) {
33653 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
33654 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
33655 } else {
33656 - atomic_set(&dev_priv->idle_count, 0);
33657 + atomic_set_unchecked(&dev_priv->idle_count, 0);
33658 }
33659 }
33660
33661 diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
33662 index dd72b91..8644b3c 100644
33663 --- a/drivers/gpu/drm/radeon/atom.c
33664 +++ b/drivers/gpu/drm/radeon/atom.c
33665 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
33666 char name[512];
33667 int i;
33668
33669 + pax_track_stack();
33670 +
33671 ctx->card = card;
33672 ctx->bios = bios;
33673
33674 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
33675 index 0d79577..efaa7a5 100644
33676 --- a/drivers/gpu/drm/radeon/mkregtable.c
33677 +++ b/drivers/gpu/drm/radeon/mkregtable.c
33678 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
33679 regex_t mask_rex;
33680 regmatch_t match[4];
33681 char buf[1024];
33682 - size_t end;
33683 + long end;
33684 int len;
33685 int done = 0;
33686 int r;
33687 unsigned o;
33688 struct offset *offset;
33689 char last_reg_s[10];
33690 - int last_reg;
33691 + unsigned long last_reg;
33692
33693 if (regcomp
33694 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
33695 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
33696 index 6735213..38c2c67 100644
33697 --- a/drivers/gpu/drm/radeon/radeon.h
33698 +++ b/drivers/gpu/drm/radeon/radeon.h
33699 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
33700 */
33701 struct radeon_fence_driver {
33702 uint32_t scratch_reg;
33703 - atomic_t seq;
33704 + atomic_unchecked_t seq;
33705 uint32_t last_seq;
33706 unsigned long count_timeout;
33707 wait_queue_head_t queue;
33708 @@ -640,7 +640,7 @@ struct radeon_asic {
33709 uint32_t offset, uint32_t obj_size);
33710 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
33711 void (*bandwidth_update)(struct radeon_device *rdev);
33712 -};
33713 +} __no_const;
33714
33715 /*
33716 * Asic structures
33717 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
33718 index 4e928b9..d8b6008 100644
33719 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
33720 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
33721 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
33722 bool linkb;
33723 struct radeon_i2c_bus_rec ddc_bus;
33724
33725 + pax_track_stack();
33726 +
33727 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33728
33729 if (data_offset == 0)
33730 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
33731 }
33732 }
33733
33734 -struct bios_connector {
33735 +static struct bios_connector {
33736 bool valid;
33737 uint16_t line_mux;
33738 uint16_t devices;
33739 int connector_type;
33740 struct radeon_i2c_bus_rec ddc_bus;
33741 -};
33742 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33743
33744 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33745 drm_device
33746 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33747 uint8_t dac;
33748 union atom_supported_devices *supported_devices;
33749 int i, j;
33750 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33751
33752 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33753
33754 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
33755 index 083a181..ccccae0 100644
33756 --- a/drivers/gpu/drm/radeon/radeon_display.c
33757 +++ b/drivers/gpu/drm/radeon/radeon_display.c
33758 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
33759
33760 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
33761 error = freq - current_freq;
33762 - error = error < 0 ? 0xffffffff : error;
33763 + error = (int32_t)error < 0 ? 0xffffffff : error;
33764 } else
33765 error = abs(current_freq - freq);
33766 vco_diff = abs(vco - best_vco);
33767 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
33768 index 76e4070..193fa7f 100644
33769 --- a/drivers/gpu/drm/radeon/radeon_drv.h
33770 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
33771 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
33772
33773 /* SW interrupt */
33774 wait_queue_head_t swi_queue;
33775 - atomic_t swi_emitted;
33776 + atomic_unchecked_t swi_emitted;
33777 int vblank_crtc;
33778 uint32_t irq_enable_reg;
33779 uint32_t r500_disp_irq_reg;
33780 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
33781 index 3beb26d..6ce9c4a 100644
33782 --- a/drivers/gpu/drm/radeon/radeon_fence.c
33783 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
33784 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
33785 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
33786 return 0;
33787 }
33788 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
33789 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
33790 if (!rdev->cp.ready) {
33791 /* FIXME: cp is not running assume everythings is done right
33792 * away
33793 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
33794 return r;
33795 }
33796 WREG32(rdev->fence_drv.scratch_reg, 0);
33797 - atomic_set(&rdev->fence_drv.seq, 0);
33798 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
33799 INIT_LIST_HEAD(&rdev->fence_drv.created);
33800 INIT_LIST_HEAD(&rdev->fence_drv.emited);
33801 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
33802 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
33803 index a1bf11d..4a123c0 100644
33804 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
33805 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
33806 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
33807 request = compat_alloc_user_space(sizeof(*request));
33808 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
33809 || __put_user(req32.param, &request->param)
33810 - || __put_user((void __user *)(unsigned long)req32.value,
33811 + || __put_user((unsigned long)req32.value,
33812 &request->value))
33813 return -EFAULT;
33814
33815 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
33816 index b79ecc4..8dab92d 100644
33817 --- a/drivers/gpu/drm/radeon/radeon_irq.c
33818 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
33819 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
33820 unsigned int ret;
33821 RING_LOCALS;
33822
33823 - atomic_inc(&dev_priv->swi_emitted);
33824 - ret = atomic_read(&dev_priv->swi_emitted);
33825 + atomic_inc_unchecked(&dev_priv->swi_emitted);
33826 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
33827
33828 BEGIN_RING(4);
33829 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
33830 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
33831 drm_radeon_private_t *dev_priv =
33832 (drm_radeon_private_t *) dev->dev_private;
33833
33834 - atomic_set(&dev_priv->swi_emitted, 0);
33835 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
33836 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
33837
33838 dev->max_vblank_count = 0x001fffff;
33839 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
33840 index 4747910..48ca4b3 100644
33841 --- a/drivers/gpu/drm/radeon/radeon_state.c
33842 +++ b/drivers/gpu/drm/radeon/radeon_state.c
33843 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
33844 {
33845 drm_radeon_private_t *dev_priv = dev->dev_private;
33846 drm_radeon_getparam_t *param = data;
33847 - int value;
33848 + int value = 0;
33849
33850 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
33851
33852 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
33853 index 1381e06..0e53b17 100644
33854 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
33855 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
33856 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
33857 DRM_INFO("radeon: ttm finalized\n");
33858 }
33859
33860 -static struct vm_operations_struct radeon_ttm_vm_ops;
33861 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
33862 -
33863 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33864 -{
33865 - struct ttm_buffer_object *bo;
33866 - int r;
33867 -
33868 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
33869 - if (bo == NULL) {
33870 - return VM_FAULT_NOPAGE;
33871 - }
33872 - r = ttm_vm_ops->fault(vma, vmf);
33873 - return r;
33874 -}
33875 -
33876 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33877 {
33878 struct drm_file *file_priv;
33879 struct radeon_device *rdev;
33880 - int r;
33881
33882 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
33883 return drm_mmap(filp, vma);
33884 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33885
33886 file_priv = (struct drm_file *)filp->private_data;
33887 rdev = file_priv->minor->dev->dev_private;
33888 - if (rdev == NULL) {
33889 + if (!rdev)
33890 return -EINVAL;
33891 - }
33892 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33893 - if (unlikely(r != 0)) {
33894 - return r;
33895 - }
33896 - if (unlikely(ttm_vm_ops == NULL)) {
33897 - ttm_vm_ops = vma->vm_ops;
33898 - radeon_ttm_vm_ops = *ttm_vm_ops;
33899 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33900 - }
33901 - vma->vm_ops = &radeon_ttm_vm_ops;
33902 - return 0;
33903 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33904 }
33905
33906
33907 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
33908 index b12ff76..0bd0c6e 100644
33909 --- a/drivers/gpu/drm/radeon/rs690.c
33910 +++ b/drivers/gpu/drm/radeon/rs690.c
33911 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
33912 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
33913 rdev->pm.sideport_bandwidth.full)
33914 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
33915 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
33916 + read_delay_latency.full = rfixed_const(800 * 1000);
33917 read_delay_latency.full = rfixed_div(read_delay_latency,
33918 rdev->pm.igp_sideport_mclk);
33919 + a.full = rfixed_const(370);
33920 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
33921 } else {
33922 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
33923 rdev->pm.k8_bandwidth.full)
33924 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
33925 index 0ed436e..e6e7ce3 100644
33926 --- a/drivers/gpu/drm/ttm/ttm_bo.c
33927 +++ b/drivers/gpu/drm/ttm/ttm_bo.c
33928 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
33929 NULL
33930 };
33931
33932 -static struct sysfs_ops ttm_bo_global_ops = {
33933 +static const struct sysfs_ops ttm_bo_global_ops = {
33934 .show = &ttm_bo_global_show
33935 };
33936
33937 diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33938 index 1c040d0..f9e4af8 100644
33939 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
33940 +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33941 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33942 {
33943 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
33944 vma->vm_private_data;
33945 - struct ttm_bo_device *bdev = bo->bdev;
33946 + struct ttm_bo_device *bdev;
33947 unsigned long bus_base;
33948 unsigned long bus_offset;
33949 unsigned long bus_size;
33950 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33951 unsigned long address = (unsigned long)vmf->virtual_address;
33952 int retval = VM_FAULT_NOPAGE;
33953
33954 + if (!bo)
33955 + return VM_FAULT_NOPAGE;
33956 + bdev = bo->bdev;
33957 +
33958 /*
33959 * Work around locking order reversal in fault / nopfn
33960 * between mmap_sem and bo_reserve: Perform a trylock operation
33961 diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
33962 index b170071..28ae90e 100644
33963 --- a/drivers/gpu/drm/ttm/ttm_global.c
33964 +++ b/drivers/gpu/drm/ttm/ttm_global.c
33965 @@ -36,7 +36,7 @@
33966 struct ttm_global_item {
33967 struct mutex mutex;
33968 void *object;
33969 - int refcount;
33970 + atomic_t refcount;
33971 };
33972
33973 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
33974 @@ -49,7 +49,7 @@ void ttm_global_init(void)
33975 struct ttm_global_item *item = &glob[i];
33976 mutex_init(&item->mutex);
33977 item->object = NULL;
33978 - item->refcount = 0;
33979 + atomic_set(&item->refcount, 0);
33980 }
33981 }
33982
33983 @@ -59,7 +59,7 @@ void ttm_global_release(void)
33984 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
33985 struct ttm_global_item *item = &glob[i];
33986 BUG_ON(item->object != NULL);
33987 - BUG_ON(item->refcount != 0);
33988 + BUG_ON(atomic_read(&item->refcount) != 0);
33989 }
33990 }
33991
33992 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33993 void *object;
33994
33995 mutex_lock(&item->mutex);
33996 - if (item->refcount == 0) {
33997 + if (atomic_read(&item->refcount) == 0) {
33998 item->object = kzalloc(ref->size, GFP_KERNEL);
33999 if (unlikely(item->object == NULL)) {
34000 ret = -ENOMEM;
34001 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
34002 goto out_err;
34003
34004 }
34005 - ++item->refcount;
34006 + atomic_inc(&item->refcount);
34007 ref->object = item->object;
34008 object = item->object;
34009 mutex_unlock(&item->mutex);
34010 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
34011 struct ttm_global_item *item = &glob[ref->global_type];
34012
34013 mutex_lock(&item->mutex);
34014 - BUG_ON(item->refcount == 0);
34015 + BUG_ON(atomic_read(&item->refcount) == 0);
34016 BUG_ON(ref->object != item->object);
34017 - if (--item->refcount == 0) {
34018 + if (atomic_dec_and_test(&item->refcount)) {
34019 ref->release(ref);
34020 item->object = NULL;
34021 }
34022 diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
34023 index 072c281..d8ef483 100644
34024 --- a/drivers/gpu/drm/ttm/ttm_memory.c
34025 +++ b/drivers/gpu/drm/ttm/ttm_memory.c
34026 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
34027 NULL
34028 };
34029
34030 -static struct sysfs_ops ttm_mem_zone_ops = {
34031 +static const struct sysfs_ops ttm_mem_zone_ops = {
34032 .show = &ttm_mem_zone_show,
34033 .store = &ttm_mem_zone_store
34034 };
34035 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
34036 index cafcb84..b8e66cc 100644
34037 --- a/drivers/gpu/drm/via/via_drv.h
34038 +++ b/drivers/gpu/drm/via/via_drv.h
34039 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
34040 typedef uint32_t maskarray_t[5];
34041
34042 typedef struct drm_via_irq {
34043 - atomic_t irq_received;
34044 + atomic_unchecked_t irq_received;
34045 uint32_t pending_mask;
34046 uint32_t enable_mask;
34047 wait_queue_head_t irq_queue;
34048 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
34049 struct timeval last_vblank;
34050 int last_vblank_valid;
34051 unsigned usec_per_vblank;
34052 - atomic_t vbl_received;
34053 + atomic_unchecked_t vbl_received;
34054 drm_via_state_t hc_state;
34055 char pci_buf[VIA_PCI_BUF_SIZE];
34056 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
34057 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
34058 index 5935b88..127a8a6 100644
34059 --- a/drivers/gpu/drm/via/via_irq.c
34060 +++ b/drivers/gpu/drm/via/via_irq.c
34061 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
34062 if (crtc != 0)
34063 return 0;
34064
34065 - return atomic_read(&dev_priv->vbl_received);
34066 + return atomic_read_unchecked(&dev_priv->vbl_received);
34067 }
34068
34069 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34070 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34071
34072 status = VIA_READ(VIA_REG_INTERRUPT);
34073 if (status & VIA_IRQ_VBLANK_PENDING) {
34074 - atomic_inc(&dev_priv->vbl_received);
34075 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
34076 + atomic_inc_unchecked(&dev_priv->vbl_received);
34077 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
34078 do_gettimeofday(&cur_vblank);
34079 if (dev_priv->last_vblank_valid) {
34080 dev_priv->usec_per_vblank =
34081 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34082 dev_priv->last_vblank = cur_vblank;
34083 dev_priv->last_vblank_valid = 1;
34084 }
34085 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
34086 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
34087 DRM_DEBUG("US per vblank is: %u\n",
34088 dev_priv->usec_per_vblank);
34089 }
34090 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34091
34092 for (i = 0; i < dev_priv->num_irqs; ++i) {
34093 if (status & cur_irq->pending_mask) {
34094 - atomic_inc(&cur_irq->irq_received);
34095 + atomic_inc_unchecked(&cur_irq->irq_received);
34096 DRM_WAKEUP(&cur_irq->irq_queue);
34097 handled = 1;
34098 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
34099 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
34100 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
34101 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
34102 masks[irq][4]));
34103 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
34104 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
34105 } else {
34106 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
34107 (((cur_irq_sequence =
34108 - atomic_read(&cur_irq->irq_received)) -
34109 + atomic_read_unchecked(&cur_irq->irq_received)) -
34110 *sequence) <= (1 << 23)));
34111 }
34112 *sequence = cur_irq_sequence;
34113 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
34114 }
34115
34116 for (i = 0; i < dev_priv->num_irqs; ++i) {
34117 - atomic_set(&cur_irq->irq_received, 0);
34118 + atomic_set_unchecked(&cur_irq->irq_received, 0);
34119 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
34120 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
34121 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
34122 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
34123 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
34124 case VIA_IRQ_RELATIVE:
34125 irqwait->request.sequence +=
34126 - atomic_read(&cur_irq->irq_received);
34127 + atomic_read_unchecked(&cur_irq->irq_received);
34128 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
34129 case VIA_IRQ_ABSOLUTE:
34130 break;
34131 diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
34132 index aa8688d..6a0140c 100644
34133 --- a/drivers/gpu/vga/vgaarb.c
34134 +++ b/drivers/gpu/vga/vgaarb.c
34135 @@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
34136 uc = &priv->cards[i];
34137 }
34138
34139 - if (!uc)
34140 - return -EINVAL;
34141 + if (!uc) {
34142 + ret_val = -EINVAL;
34143 + goto done;
34144 + }
34145
34146 - if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
34147 - return -EINVAL;
34148 + if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
34149 + ret_val = -EINVAL;
34150 + goto done;
34151 + }
34152
34153 - if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
34154 - return -EINVAL;
34155 + if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
34156 + ret_val = -EINVAL;
34157 + goto done;
34158 + }
34159
34160 vga_put(pdev, io_state);
34161
34162 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
34163 index 11f8069..4783396 100644
34164 --- a/drivers/hid/hid-core.c
34165 +++ b/drivers/hid/hid-core.c
34166 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
34167
34168 int hid_add_device(struct hid_device *hdev)
34169 {
34170 - static atomic_t id = ATOMIC_INIT(0);
34171 + static atomic_unchecked_t id = ATOMIC_INIT(0);
34172 int ret;
34173
34174 if (WARN_ON(hdev->status & HID_STAT_ADDED))
34175 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
34176 /* XXX hack, any other cleaner solution after the driver core
34177 * is converted to allow more than 20 bytes as the device name? */
34178 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
34179 - hdev->vendor, hdev->product, atomic_inc_return(&id));
34180 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
34181
34182 ret = device_add(&hdev->dev);
34183 if (!ret)
34184 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
34185 index 8b6ee24..70f657d 100644
34186 --- a/drivers/hid/usbhid/hiddev.c
34187 +++ b/drivers/hid/usbhid/hiddev.c
34188 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
34189 return put_user(HID_VERSION, (int __user *)arg);
34190
34191 case HIDIOCAPPLICATION:
34192 - if (arg < 0 || arg >= hid->maxapplication)
34193 + if (arg >= hid->maxapplication)
34194 return -EINVAL;
34195
34196 for (i = 0; i < hid->maxcollection; i++)
34197 diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
34198 index 5d5ed69..f40533e 100644
34199 --- a/drivers/hwmon/lis3lv02d.c
34200 +++ b/drivers/hwmon/lis3lv02d.c
34201 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
34202 * the lid is closed. This leads to interrupts as soon as a little move
34203 * is done.
34204 */
34205 - atomic_inc(&lis3_dev.count);
34206 + atomic_inc_unchecked(&lis3_dev.count);
34207
34208 wake_up_interruptible(&lis3_dev.misc_wait);
34209 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
34210 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34211 if (test_and_set_bit(0, &lis3_dev.misc_opened))
34212 return -EBUSY; /* already open */
34213
34214 - atomic_set(&lis3_dev.count, 0);
34215 + atomic_set_unchecked(&lis3_dev.count, 0);
34216
34217 /*
34218 * The sensor can generate interrupts for free-fall and direction
34219 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34220 add_wait_queue(&lis3_dev.misc_wait, &wait);
34221 while (true) {
34222 set_current_state(TASK_INTERRUPTIBLE);
34223 - data = atomic_xchg(&lis3_dev.count, 0);
34224 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
34225 if (data)
34226 break;
34227
34228 @@ -244,7 +244,7 @@ out:
34229 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34230 {
34231 poll_wait(file, &lis3_dev.misc_wait, wait);
34232 - if (atomic_read(&lis3_dev.count))
34233 + if (atomic_read_unchecked(&lis3_dev.count))
34234 return POLLIN | POLLRDNORM;
34235 return 0;
34236 }
34237 diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
34238 index 7cdd76f..fe0efdf 100644
34239 --- a/drivers/hwmon/lis3lv02d.h
34240 +++ b/drivers/hwmon/lis3lv02d.h
34241 @@ -201,7 +201,7 @@ struct lis3lv02d {
34242
34243 struct input_polled_dev *idev; /* input device */
34244 struct platform_device *pdev; /* platform device */
34245 - atomic_t count; /* interrupt count after last read */
34246 + atomic_unchecked_t count; /* interrupt count after last read */
34247 int xcalib; /* calibrated null value for x */
34248 int ycalib; /* calibrated null value for y */
34249 int zcalib; /* calibrated null value for z */
34250 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
34251 index 740785e..5a5c6c6 100644
34252 --- a/drivers/hwmon/sht15.c
34253 +++ b/drivers/hwmon/sht15.c
34254 @@ -112,7 +112,7 @@ struct sht15_data {
34255 int supply_uV;
34256 int supply_uV_valid;
34257 struct work_struct update_supply_work;
34258 - atomic_t interrupt_handled;
34259 + atomic_unchecked_t interrupt_handled;
34260 };
34261
34262 /**
34263 @@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
34264 return ret;
34265
34266 gpio_direction_input(data->pdata->gpio_data);
34267 - atomic_set(&data->interrupt_handled, 0);
34268 + atomic_set_unchecked(&data->interrupt_handled, 0);
34269
34270 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34271 if (gpio_get_value(data->pdata->gpio_data) == 0) {
34272 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
34273 /* Only relevant if the interrupt hasn't occured. */
34274 - if (!atomic_read(&data->interrupt_handled))
34275 + if (!atomic_read_unchecked(&data->interrupt_handled))
34276 schedule_work(&data->read_work);
34277 }
34278 ret = wait_event_timeout(data->wait_queue,
34279 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
34280 struct sht15_data *data = d;
34281 /* First disable the interrupt */
34282 disable_irq_nosync(irq);
34283 - atomic_inc(&data->interrupt_handled);
34284 + atomic_inc_unchecked(&data->interrupt_handled);
34285 /* Then schedule a reading work struct */
34286 if (data->flag != SHT15_READING_NOTHING)
34287 schedule_work(&data->read_work);
34288 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
34289 here as could have gone low in meantime so verify
34290 it hasn't!
34291 */
34292 - atomic_set(&data->interrupt_handled, 0);
34293 + atomic_set_unchecked(&data->interrupt_handled, 0);
34294 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34295 /* If still not occured or another handler has been scheduled */
34296 if (gpio_get_value(data->pdata->gpio_data)
34297 - || atomic_read(&data->interrupt_handled))
34298 + || atomic_read_unchecked(&data->interrupt_handled))
34299 return;
34300 }
34301 /* Read the data back from the device */
34302 diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
34303 index 97851c5..cb40626 100644
34304 --- a/drivers/hwmon/w83791d.c
34305 +++ b/drivers/hwmon/w83791d.c
34306 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
34307 struct i2c_board_info *info);
34308 static int w83791d_remove(struct i2c_client *client);
34309
34310 -static int w83791d_read(struct i2c_client *client, u8 register);
34311 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
34312 +static int w83791d_read(struct i2c_client *client, u8 reg);
34313 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
34314 static struct w83791d_data *w83791d_update_device(struct device *dev);
34315
34316 #ifdef DEBUG
34317 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
34318 index 378fcb5..5e91fa8 100644
34319 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
34320 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
34321 @@ -43,7 +43,7 @@
34322 extern struct i2c_adapter amd756_smbus;
34323
34324 static struct i2c_adapter *s4882_adapter;
34325 -static struct i2c_algorithm *s4882_algo;
34326 +static i2c_algorithm_no_const *s4882_algo;
34327
34328 /* Wrapper access functions for multiplexed SMBus */
34329 static DEFINE_MUTEX(amd756_lock);
34330 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
34331 index 29015eb..af2d8e9 100644
34332 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
34333 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
34334 @@ -41,7 +41,7 @@
34335 extern struct i2c_adapter *nforce2_smbus;
34336
34337 static struct i2c_adapter *s4985_adapter;
34338 -static struct i2c_algorithm *s4985_algo;
34339 +static i2c_algorithm_no_const *s4985_algo;
34340
34341 /* Wrapper access functions for multiplexed SMBus */
34342 static DEFINE_MUTEX(nforce2_lock);
34343 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
34344 index 878f8ec..12376fc 100644
34345 --- a/drivers/ide/aec62xx.c
34346 +++ b/drivers/ide/aec62xx.c
34347 @@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
34348 .cable_detect = atp86x_cable_detect,
34349 };
34350
34351 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
34352 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
34353 { /* 0: AEC6210 */
34354 .name = DRV_NAME,
34355 .init_chipset = init_chipset_aec62xx,
34356 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
34357 index e59b6de..4b4fc65 100644
34358 --- a/drivers/ide/alim15x3.c
34359 +++ b/drivers/ide/alim15x3.c
34360 @@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
34361 .dma_sff_read_status = ide_dma_sff_read_status,
34362 };
34363
34364 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
34365 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
34366 .name = DRV_NAME,
34367 .init_chipset = init_chipset_ali15x3,
34368 .init_hwif = init_hwif_ali15x3,
34369 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
34370 index 628cd2e..087a414 100644
34371 --- a/drivers/ide/amd74xx.c
34372 +++ b/drivers/ide/amd74xx.c
34373 @@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
34374 .udma_mask = udma, \
34375 }
34376
34377 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
34378 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
34379 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
34380 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
34381 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
34382 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
34383 index 837322b..837fd71 100644
34384 --- a/drivers/ide/atiixp.c
34385 +++ b/drivers/ide/atiixp.c
34386 @@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
34387 .cable_detect = atiixp_cable_detect,
34388 };
34389
34390 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
34391 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
34392 { /* 0: IXP200/300/400/700 */
34393 .name = DRV_NAME,
34394 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
34395 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
34396 index ca0c46f..d55318a 100644
34397 --- a/drivers/ide/cmd64x.c
34398 +++ b/drivers/ide/cmd64x.c
34399 @@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
34400 .dma_sff_read_status = ide_dma_sff_read_status,
34401 };
34402
34403 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
34404 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
34405 { /* 0: CMD643 */
34406 .name = DRV_NAME,
34407 .init_chipset = init_chipset_cmd64x,
34408 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
34409 index 09f98ed..cebc5bc 100644
34410 --- a/drivers/ide/cs5520.c
34411 +++ b/drivers/ide/cs5520.c
34412 @@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
34413 .set_dma_mode = cs5520_set_dma_mode,
34414 };
34415
34416 -static const struct ide_port_info cyrix_chipset __devinitdata = {
34417 +static const struct ide_port_info cyrix_chipset __devinitconst = {
34418 .name = DRV_NAME,
34419 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
34420 .port_ops = &cs5520_port_ops,
34421 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
34422 index 40bf05e..7d58ca0 100644
34423 --- a/drivers/ide/cs5530.c
34424 +++ b/drivers/ide/cs5530.c
34425 @@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
34426 .udma_filter = cs5530_udma_filter,
34427 };
34428
34429 -static const struct ide_port_info cs5530_chipset __devinitdata = {
34430 +static const struct ide_port_info cs5530_chipset __devinitconst = {
34431 .name = DRV_NAME,
34432 .init_chipset = init_chipset_cs5530,
34433 .init_hwif = init_hwif_cs5530,
34434 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
34435 index 983d957..53e6172 100644
34436 --- a/drivers/ide/cs5535.c
34437 +++ b/drivers/ide/cs5535.c
34438 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
34439 .cable_detect = cs5535_cable_detect,
34440 };
34441
34442 -static const struct ide_port_info cs5535_chipset __devinitdata = {
34443 +static const struct ide_port_info cs5535_chipset __devinitconst = {
34444 .name = DRV_NAME,
34445 .port_ops = &cs5535_port_ops,
34446 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
34447 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
34448 index 74fc540..8e933d8 100644
34449 --- a/drivers/ide/cy82c693.c
34450 +++ b/drivers/ide/cy82c693.c
34451 @@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
34452 .set_dma_mode = cy82c693_set_dma_mode,
34453 };
34454
34455 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
34456 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
34457 .name = DRV_NAME,
34458 .init_iops = init_iops_cy82c693,
34459 .port_ops = &cy82c693_port_ops,
34460 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
34461 index 7ce68ef..e78197d 100644
34462 --- a/drivers/ide/hpt366.c
34463 +++ b/drivers/ide/hpt366.c
34464 @@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
34465 }
34466 };
34467
34468 -static const struct hpt_info hpt36x __devinitdata = {
34469 +static const struct hpt_info hpt36x __devinitconst = {
34470 .chip_name = "HPT36x",
34471 .chip_type = HPT36x,
34472 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
34473 @@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
34474 .timings = &hpt36x_timings
34475 };
34476
34477 -static const struct hpt_info hpt370 __devinitdata = {
34478 +static const struct hpt_info hpt370 __devinitconst = {
34479 .chip_name = "HPT370",
34480 .chip_type = HPT370,
34481 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34482 @@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
34483 .timings = &hpt37x_timings
34484 };
34485
34486 -static const struct hpt_info hpt370a __devinitdata = {
34487 +static const struct hpt_info hpt370a __devinitconst = {
34488 .chip_name = "HPT370A",
34489 .chip_type = HPT370A,
34490 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34491 @@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
34492 .timings = &hpt37x_timings
34493 };
34494
34495 -static const struct hpt_info hpt374 __devinitdata = {
34496 +static const struct hpt_info hpt374 __devinitconst = {
34497 .chip_name = "HPT374",
34498 .chip_type = HPT374,
34499 .udma_mask = ATA_UDMA5,
34500 @@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
34501 .timings = &hpt37x_timings
34502 };
34503
34504 -static const struct hpt_info hpt372 __devinitdata = {
34505 +static const struct hpt_info hpt372 __devinitconst = {
34506 .chip_name = "HPT372",
34507 .chip_type = HPT372,
34508 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34509 @@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
34510 .timings = &hpt37x_timings
34511 };
34512
34513 -static const struct hpt_info hpt372a __devinitdata = {
34514 +static const struct hpt_info hpt372a __devinitconst = {
34515 .chip_name = "HPT372A",
34516 .chip_type = HPT372A,
34517 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34518 @@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
34519 .timings = &hpt37x_timings
34520 };
34521
34522 -static const struct hpt_info hpt302 __devinitdata = {
34523 +static const struct hpt_info hpt302 __devinitconst = {
34524 .chip_name = "HPT302",
34525 .chip_type = HPT302,
34526 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34527 @@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
34528 .timings = &hpt37x_timings
34529 };
34530
34531 -static const struct hpt_info hpt371 __devinitdata = {
34532 +static const struct hpt_info hpt371 __devinitconst = {
34533 .chip_name = "HPT371",
34534 .chip_type = HPT371,
34535 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34536 @@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
34537 .timings = &hpt37x_timings
34538 };
34539
34540 -static const struct hpt_info hpt372n __devinitdata = {
34541 +static const struct hpt_info hpt372n __devinitconst = {
34542 .chip_name = "HPT372N",
34543 .chip_type = HPT372N,
34544 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34545 @@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
34546 .timings = &hpt37x_timings
34547 };
34548
34549 -static const struct hpt_info hpt302n __devinitdata = {
34550 +static const struct hpt_info hpt302n __devinitconst = {
34551 .chip_name = "HPT302N",
34552 .chip_type = HPT302N,
34553 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34554 @@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
34555 .timings = &hpt37x_timings
34556 };
34557
34558 -static const struct hpt_info hpt371n __devinitdata = {
34559 +static const struct hpt_info hpt371n __devinitconst = {
34560 .chip_name = "HPT371N",
34561 .chip_type = HPT371N,
34562 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34563 @@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
34564 .dma_sff_read_status = ide_dma_sff_read_status,
34565 };
34566
34567 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
34568 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
34569 { /* 0: HPT36x */
34570 .name = DRV_NAME,
34571 .init_chipset = init_chipset_hpt366,
34572 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
34573 index 2de76cc..74186a1 100644
34574 --- a/drivers/ide/ide-cd.c
34575 +++ b/drivers/ide/ide-cd.c
34576 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
34577 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
34578 if ((unsigned long)buf & alignment
34579 || blk_rq_bytes(rq) & q->dma_pad_mask
34580 - || object_is_on_stack(buf))
34581 + || object_starts_on_stack(buf))
34582 drive->dma = 0;
34583 }
34584 }
34585 diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
34586 index fefbdfc..62ff465 100644
34587 --- a/drivers/ide/ide-floppy.c
34588 +++ b/drivers/ide/ide-floppy.c
34589 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
34590 u8 pc_buf[256], header_len, desc_cnt;
34591 int i, rc = 1, blocks, length;
34592
34593 + pax_track_stack();
34594 +
34595 ide_debug_log(IDE_DBG_FUNC, "enter");
34596
34597 drive->bios_cyl = 0;
34598 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
34599 index 39d4e01..11538ce 100644
34600 --- a/drivers/ide/ide-pci-generic.c
34601 +++ b/drivers/ide/ide-pci-generic.c
34602 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
34603 .udma_mask = ATA_UDMA6, \
34604 }
34605
34606 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
34607 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
34608 /* 0: Unknown */
34609 DECLARE_GENERIC_PCI_DEV(0),
34610
34611 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
34612 index 0d266a5..aaca790 100644
34613 --- a/drivers/ide/it8172.c
34614 +++ b/drivers/ide/it8172.c
34615 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
34616 .set_dma_mode = it8172_set_dma_mode,
34617 };
34618
34619 -static const struct ide_port_info it8172_port_info __devinitdata = {
34620 +static const struct ide_port_info it8172_port_info __devinitconst = {
34621 .name = DRV_NAME,
34622 .port_ops = &it8172_port_ops,
34623 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
34624 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
34625 index 4797616..4be488a 100644
34626 --- a/drivers/ide/it8213.c
34627 +++ b/drivers/ide/it8213.c
34628 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
34629 .cable_detect = it8213_cable_detect,
34630 };
34631
34632 -static const struct ide_port_info it8213_chipset __devinitdata = {
34633 +static const struct ide_port_info it8213_chipset __devinitconst = {
34634 .name = DRV_NAME,
34635 .enablebits = { {0x41, 0x80, 0x80} },
34636 .port_ops = &it8213_port_ops,
34637 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
34638 index 51aa745..146ee60 100644
34639 --- a/drivers/ide/it821x.c
34640 +++ b/drivers/ide/it821x.c
34641 @@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
34642 .cable_detect = it821x_cable_detect,
34643 };
34644
34645 -static const struct ide_port_info it821x_chipset __devinitdata = {
34646 +static const struct ide_port_info it821x_chipset __devinitconst = {
34647 .name = DRV_NAME,
34648 .init_chipset = init_chipset_it821x,
34649 .init_hwif = init_hwif_it821x,
34650 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
34651 index bf2be64..9270098 100644
34652 --- a/drivers/ide/jmicron.c
34653 +++ b/drivers/ide/jmicron.c
34654 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
34655 .cable_detect = jmicron_cable_detect,
34656 };
34657
34658 -static const struct ide_port_info jmicron_chipset __devinitdata = {
34659 +static const struct ide_port_info jmicron_chipset __devinitconst = {
34660 .name = DRV_NAME,
34661 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
34662 .port_ops = &jmicron_port_ops,
34663 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
34664 index 95327a2..73f78d8 100644
34665 --- a/drivers/ide/ns87415.c
34666 +++ b/drivers/ide/ns87415.c
34667 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
34668 .dma_sff_read_status = superio_dma_sff_read_status,
34669 };
34670
34671 -static const struct ide_port_info ns87415_chipset __devinitdata = {
34672 +static const struct ide_port_info ns87415_chipset __devinitconst = {
34673 .name = DRV_NAME,
34674 .init_hwif = init_hwif_ns87415,
34675 .tp_ops = &ns87415_tp_ops,
34676 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
34677 index f1d70d6..e1de05b 100644
34678 --- a/drivers/ide/opti621.c
34679 +++ b/drivers/ide/opti621.c
34680 @@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
34681 .set_pio_mode = opti621_set_pio_mode,
34682 };
34683
34684 -static const struct ide_port_info opti621_chipset __devinitdata = {
34685 +static const struct ide_port_info opti621_chipset __devinitconst = {
34686 .name = DRV_NAME,
34687 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
34688 .port_ops = &opti621_port_ops,
34689 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
34690 index 65ba823..7311f4d 100644
34691 --- a/drivers/ide/pdc202xx_new.c
34692 +++ b/drivers/ide/pdc202xx_new.c
34693 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
34694 .udma_mask = udma, \
34695 }
34696
34697 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
34698 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
34699 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
34700 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
34701 };
34702 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
34703 index cb812f3..af816ef 100644
34704 --- a/drivers/ide/pdc202xx_old.c
34705 +++ b/drivers/ide/pdc202xx_old.c
34706 @@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
34707 .max_sectors = sectors, \
34708 }
34709
34710 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
34711 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
34712 { /* 0: PDC20246 */
34713 .name = DRV_NAME,
34714 .init_chipset = init_chipset_pdc202xx,
34715 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
34716 index bf14f39..15c4b98 100644
34717 --- a/drivers/ide/piix.c
34718 +++ b/drivers/ide/piix.c
34719 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
34720 .udma_mask = udma, \
34721 }
34722
34723 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
34724 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
34725 /* 0: MPIIX */
34726 { /*
34727 * MPIIX actually has only a single IDE channel mapped to
34728 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
34729 index a6414a8..c04173e 100644
34730 --- a/drivers/ide/rz1000.c
34731 +++ b/drivers/ide/rz1000.c
34732 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
34733 }
34734 }
34735
34736 -static const struct ide_port_info rz1000_chipset __devinitdata = {
34737 +static const struct ide_port_info rz1000_chipset __devinitconst = {
34738 .name = DRV_NAME,
34739 .host_flags = IDE_HFLAG_NO_DMA,
34740 };
34741 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
34742 index d467478..9203942 100644
34743 --- a/drivers/ide/sc1200.c
34744 +++ b/drivers/ide/sc1200.c
34745 @@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
34746 .dma_sff_read_status = ide_dma_sff_read_status,
34747 };
34748
34749 -static const struct ide_port_info sc1200_chipset __devinitdata = {
34750 +static const struct ide_port_info sc1200_chipset __devinitconst = {
34751 .name = DRV_NAME,
34752 .port_ops = &sc1200_port_ops,
34753 .dma_ops = &sc1200_dma_ops,
34754 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
34755 index 1104bb3..59c5194 100644
34756 --- a/drivers/ide/scc_pata.c
34757 +++ b/drivers/ide/scc_pata.c
34758 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
34759 .dma_sff_read_status = scc_dma_sff_read_status,
34760 };
34761
34762 -static const struct ide_port_info scc_chipset __devinitdata = {
34763 +static const struct ide_port_info scc_chipset __devinitconst = {
34764 .name = "sccIDE",
34765 .init_iops = init_iops_scc,
34766 .init_dma = scc_init_dma,
34767 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
34768 index b6554ef..6cc2cc3 100644
34769 --- a/drivers/ide/serverworks.c
34770 +++ b/drivers/ide/serverworks.c
34771 @@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
34772 .cable_detect = svwks_cable_detect,
34773 };
34774
34775 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
34776 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
34777 { /* 0: OSB4 */
34778 .name = DRV_NAME,
34779 .init_chipset = init_chipset_svwks,
34780 diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
34781 index ab3db61..afed580 100644
34782 --- a/drivers/ide/setup-pci.c
34783 +++ b/drivers/ide/setup-pci.c
34784 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
34785 int ret, i, n_ports = dev2 ? 4 : 2;
34786 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
34787
34788 + pax_track_stack();
34789 +
34790 for (i = 0; i < n_ports / 2; i++) {
34791 ret = ide_setup_pci_controller(pdev[i], d, !i);
34792 if (ret < 0)
34793 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
34794 index d95df52..0b03a39 100644
34795 --- a/drivers/ide/siimage.c
34796 +++ b/drivers/ide/siimage.c
34797 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
34798 .udma_mask = ATA_UDMA6, \
34799 }
34800
34801 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
34802 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
34803 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
34804 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
34805 };
34806 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
34807 index 3b88eba..ca8699d 100644
34808 --- a/drivers/ide/sis5513.c
34809 +++ b/drivers/ide/sis5513.c
34810 @@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
34811 .cable_detect = sis_cable_detect,
34812 };
34813
34814 -static const struct ide_port_info sis5513_chipset __devinitdata = {
34815 +static const struct ide_port_info sis5513_chipset __devinitconst = {
34816 .name = DRV_NAME,
34817 .init_chipset = init_chipset_sis5513,
34818 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
34819 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
34820 index d698da4..fca42a4 100644
34821 --- a/drivers/ide/sl82c105.c
34822 +++ b/drivers/ide/sl82c105.c
34823 @@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
34824 .dma_sff_read_status = ide_dma_sff_read_status,
34825 };
34826
34827 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
34828 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
34829 .name = DRV_NAME,
34830 .init_chipset = init_chipset_sl82c105,
34831 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
34832 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
34833 index 1ccfb40..83d5779 100644
34834 --- a/drivers/ide/slc90e66.c
34835 +++ b/drivers/ide/slc90e66.c
34836 @@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
34837 .cable_detect = slc90e66_cable_detect,
34838 };
34839
34840 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
34841 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
34842 .name = DRV_NAME,
34843 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
34844 .port_ops = &slc90e66_port_ops,
34845 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
34846 index 05a93d6..5f9e325 100644
34847 --- a/drivers/ide/tc86c001.c
34848 +++ b/drivers/ide/tc86c001.c
34849 @@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
34850 .dma_sff_read_status = ide_dma_sff_read_status,
34851 };
34852
34853 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
34854 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
34855 .name = DRV_NAME,
34856 .init_hwif = init_hwif_tc86c001,
34857 .port_ops = &tc86c001_port_ops,
34858 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
34859 index 8773c3b..7907d6c 100644
34860 --- a/drivers/ide/triflex.c
34861 +++ b/drivers/ide/triflex.c
34862 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
34863 .set_dma_mode = triflex_set_mode,
34864 };
34865
34866 -static const struct ide_port_info triflex_device __devinitdata = {
34867 +static const struct ide_port_info triflex_device __devinitconst = {
34868 .name = DRV_NAME,
34869 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
34870 .port_ops = &triflex_port_ops,
34871 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
34872 index 4b42ca0..e494a98 100644
34873 --- a/drivers/ide/trm290.c
34874 +++ b/drivers/ide/trm290.c
34875 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
34876 .dma_check = trm290_dma_check,
34877 };
34878
34879 -static const struct ide_port_info trm290_chipset __devinitdata = {
34880 +static const struct ide_port_info trm290_chipset __devinitconst = {
34881 .name = DRV_NAME,
34882 .init_hwif = init_hwif_trm290,
34883 .tp_ops = &trm290_tp_ops,
34884 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
34885 index 028de26..520d5d5 100644
34886 --- a/drivers/ide/via82cxxx.c
34887 +++ b/drivers/ide/via82cxxx.c
34888 @@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
34889 .cable_detect = via82cxxx_cable_detect,
34890 };
34891
34892 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
34893 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
34894 .name = DRV_NAME,
34895 .init_chipset = init_chipset_via82cxxx,
34896 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
34897 diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
34898 index 2cd00b5..14de699 100644
34899 --- a/drivers/ieee1394/dv1394.c
34900 +++ b/drivers/ieee1394/dv1394.c
34901 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
34902 based upon DIF section and sequence
34903 */
34904
34905 -static void inline
34906 +static inline void
34907 frame_put_packet (struct frame *f, struct packet *p)
34908 {
34909 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
34910 diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
34911 index e947d8f..6a966b9 100644
34912 --- a/drivers/ieee1394/hosts.c
34913 +++ b/drivers/ieee1394/hosts.c
34914 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
34915 }
34916
34917 static struct hpsb_host_driver dummy_driver = {
34918 + .name = "dummy",
34919 .transmit_packet = dummy_transmit_packet,
34920 .devctl = dummy_devctl,
34921 .isoctl = dummy_isoctl
34922 diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
34923 index ddaab6e..8d37435 100644
34924 --- a/drivers/ieee1394/init_ohci1394_dma.c
34925 +++ b/drivers/ieee1394/init_ohci1394_dma.c
34926 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
34927 for (func = 0; func < 8; func++) {
34928 u32 class = read_pci_config(num,slot,func,
34929 PCI_CLASS_REVISION);
34930 - if ((class == 0xffffffff))
34931 + if (class == 0xffffffff)
34932 continue; /* No device at this func */
34933
34934 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
34935 diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
34936 index 65c1429..5d8c11f 100644
34937 --- a/drivers/ieee1394/ohci1394.c
34938 +++ b/drivers/ieee1394/ohci1394.c
34939 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
34940 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
34941
34942 /* Module Parameters */
34943 -static int phys_dma = 1;
34944 +static int phys_dma;
34945 module_param(phys_dma, int, 0444);
34946 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
34947 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
34948
34949 static void dma_trm_tasklet(unsigned long data);
34950 static void dma_trm_reset(struct dma_trm_ctx *d);
34951 diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
34952 index f199896..78c9fc8 100644
34953 --- a/drivers/ieee1394/sbp2.c
34954 +++ b/drivers/ieee1394/sbp2.c
34955 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
34956 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
34957 MODULE_LICENSE("GPL");
34958
34959 -static int sbp2_module_init(void)
34960 +static int __init sbp2_module_init(void)
34961 {
34962 int ret;
34963
34964 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
34965 index a5dea6b..0cefe8f 100644
34966 --- a/drivers/infiniband/core/cm.c
34967 +++ b/drivers/infiniband/core/cm.c
34968 @@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
34969
34970 struct cm_counter_group {
34971 struct kobject obj;
34972 - atomic_long_t counter[CM_ATTR_COUNT];
34973 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
34974 };
34975
34976 struct cm_counter_attribute {
34977 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
34978 struct ib_mad_send_buf *msg = NULL;
34979 int ret;
34980
34981 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34982 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34983 counter[CM_REQ_COUNTER]);
34984
34985 /* Quick state check to discard duplicate REQs. */
34986 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
34987 if (!cm_id_priv)
34988 return;
34989
34990 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34991 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34992 counter[CM_REP_COUNTER]);
34993 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
34994 if (ret)
34995 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
34996 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
34997 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
34998 spin_unlock_irq(&cm_id_priv->lock);
34999 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35000 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35001 counter[CM_RTU_COUNTER]);
35002 goto out;
35003 }
35004 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
35005 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
35006 dreq_msg->local_comm_id);
35007 if (!cm_id_priv) {
35008 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35009 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35010 counter[CM_DREQ_COUNTER]);
35011 cm_issue_drep(work->port, work->mad_recv_wc);
35012 return -EINVAL;
35013 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
35014 case IB_CM_MRA_REP_RCVD:
35015 break;
35016 case IB_CM_TIMEWAIT:
35017 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35018 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35019 counter[CM_DREQ_COUNTER]);
35020 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
35021 goto unlock;
35022 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
35023 cm_free_msg(msg);
35024 goto deref;
35025 case IB_CM_DREQ_RCVD:
35026 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35027 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35028 counter[CM_DREQ_COUNTER]);
35029 goto unlock;
35030 default:
35031 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
35032 ib_modify_mad(cm_id_priv->av.port->mad_agent,
35033 cm_id_priv->msg, timeout)) {
35034 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
35035 - atomic_long_inc(&work->port->
35036 + atomic_long_inc_unchecked(&work->port->
35037 counter_group[CM_RECV_DUPLICATES].
35038 counter[CM_MRA_COUNTER]);
35039 goto out;
35040 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
35041 break;
35042 case IB_CM_MRA_REQ_RCVD:
35043 case IB_CM_MRA_REP_RCVD:
35044 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35045 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35046 counter[CM_MRA_COUNTER]);
35047 /* fall through */
35048 default:
35049 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
35050 case IB_CM_LAP_IDLE:
35051 break;
35052 case IB_CM_MRA_LAP_SENT:
35053 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35054 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35055 counter[CM_LAP_COUNTER]);
35056 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
35057 goto unlock;
35058 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
35059 cm_free_msg(msg);
35060 goto deref;
35061 case IB_CM_LAP_RCVD:
35062 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35063 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35064 counter[CM_LAP_COUNTER]);
35065 goto unlock;
35066 default:
35067 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
35068 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
35069 if (cur_cm_id_priv) {
35070 spin_unlock_irq(&cm.lock);
35071 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35072 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35073 counter[CM_SIDR_REQ_COUNTER]);
35074 goto out; /* Duplicate message. */
35075 }
35076 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
35077 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
35078 msg->retries = 1;
35079
35080 - atomic_long_add(1 + msg->retries,
35081 + atomic_long_add_unchecked(1 + msg->retries,
35082 &port->counter_group[CM_XMIT].counter[attr_index]);
35083 if (msg->retries)
35084 - atomic_long_add(msg->retries,
35085 + atomic_long_add_unchecked(msg->retries,
35086 &port->counter_group[CM_XMIT_RETRIES].
35087 counter[attr_index]);
35088
35089 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
35090 }
35091
35092 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
35093 - atomic_long_inc(&port->counter_group[CM_RECV].
35094 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
35095 counter[attr_id - CM_ATTR_ID_OFFSET]);
35096
35097 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
35098 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
35099 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
35100
35101 return sprintf(buf, "%ld\n",
35102 - atomic_long_read(&group->counter[cm_attr->index]));
35103 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
35104 }
35105
35106 -static struct sysfs_ops cm_counter_ops = {
35107 +static const struct sysfs_ops cm_counter_ops = {
35108 .show = cm_show_counter
35109 };
35110
35111 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
35112 index 8fd3a6f..61d8075 100644
35113 --- a/drivers/infiniband/core/cma.c
35114 +++ b/drivers/infiniband/core/cma.c
35115 @@ -2267,6 +2267,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
35116
35117 req.private_data_len = sizeof(struct cma_hdr) +
35118 conn_param->private_data_len;
35119 + if (req.private_data_len < conn_param->private_data_len)
35120 + return -EINVAL;
35121 +
35122 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
35123 if (!req.private_data)
35124 return -ENOMEM;
35125 @@ -2314,6 +2317,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
35126 memset(&req, 0, sizeof req);
35127 offset = cma_user_data_offset(id_priv->id.ps);
35128 req.private_data_len = offset + conn_param->private_data_len;
35129 + if (req.private_data_len < conn_param->private_data_len)
35130 + return -EINVAL;
35131 +
35132 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
35133 if (!private_data)
35134 return -ENOMEM;
35135 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
35136 index 4507043..14ad522 100644
35137 --- a/drivers/infiniband/core/fmr_pool.c
35138 +++ b/drivers/infiniband/core/fmr_pool.c
35139 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
35140
35141 struct task_struct *thread;
35142
35143 - atomic_t req_ser;
35144 - atomic_t flush_ser;
35145 + atomic_unchecked_t req_ser;
35146 + atomic_unchecked_t flush_ser;
35147
35148 wait_queue_head_t force_wait;
35149 };
35150 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
35151 struct ib_fmr_pool *pool = pool_ptr;
35152
35153 do {
35154 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
35155 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
35156 ib_fmr_batch_release(pool);
35157
35158 - atomic_inc(&pool->flush_ser);
35159 + atomic_inc_unchecked(&pool->flush_ser);
35160 wake_up_interruptible(&pool->force_wait);
35161
35162 if (pool->flush_function)
35163 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
35164 }
35165
35166 set_current_state(TASK_INTERRUPTIBLE);
35167 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
35168 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
35169 !kthread_should_stop())
35170 schedule();
35171 __set_current_state(TASK_RUNNING);
35172 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
35173 pool->dirty_watermark = params->dirty_watermark;
35174 pool->dirty_len = 0;
35175 spin_lock_init(&pool->pool_lock);
35176 - atomic_set(&pool->req_ser, 0);
35177 - atomic_set(&pool->flush_ser, 0);
35178 + atomic_set_unchecked(&pool->req_ser, 0);
35179 + atomic_set_unchecked(&pool->flush_ser, 0);
35180 init_waitqueue_head(&pool->force_wait);
35181
35182 pool->thread = kthread_run(ib_fmr_cleanup_thread,
35183 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
35184 }
35185 spin_unlock_irq(&pool->pool_lock);
35186
35187 - serial = atomic_inc_return(&pool->req_ser);
35188 + serial = atomic_inc_return_unchecked(&pool->req_ser);
35189 wake_up_process(pool->thread);
35190
35191 if (wait_event_interruptible(pool->force_wait,
35192 - atomic_read(&pool->flush_ser) - serial >= 0))
35193 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
35194 return -EINTR;
35195
35196 return 0;
35197 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
35198 } else {
35199 list_add_tail(&fmr->list, &pool->dirty_list);
35200 if (++pool->dirty_len >= pool->dirty_watermark) {
35201 - atomic_inc(&pool->req_ser);
35202 + atomic_inc_unchecked(&pool->req_ser);
35203 wake_up_process(pool->thread);
35204 }
35205 }
35206 diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
35207 index 158a214..1558bb7 100644
35208 --- a/drivers/infiniband/core/sysfs.c
35209 +++ b/drivers/infiniband/core/sysfs.c
35210 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
35211 return port_attr->show(p, port_attr, buf);
35212 }
35213
35214 -static struct sysfs_ops port_sysfs_ops = {
35215 +static const struct sysfs_ops port_sysfs_ops = {
35216 .show = port_attr_show
35217 };
35218
35219 diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
35220 index 5440da0..1194ecb 100644
35221 --- a/drivers/infiniband/core/uverbs_marshall.c
35222 +++ b/drivers/infiniband/core/uverbs_marshall.c
35223 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
35224 dst->grh.sgid_index = src->grh.sgid_index;
35225 dst->grh.hop_limit = src->grh.hop_limit;
35226 dst->grh.traffic_class = src->grh.traffic_class;
35227 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
35228 dst->dlid = src->dlid;
35229 dst->sl = src->sl;
35230 dst->src_path_bits = src->src_path_bits;
35231 dst->static_rate = src->static_rate;
35232 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
35233 dst->port_num = src->port_num;
35234 + dst->reserved = 0;
35235 }
35236 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
35237
35238 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
35239 struct ib_qp_attr *src)
35240 {
35241 + dst->qp_state = src->qp_state;
35242 dst->cur_qp_state = src->cur_qp_state;
35243 dst->path_mtu = src->path_mtu;
35244 dst->path_mig_state = src->path_mig_state;
35245 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
35246 dst->rnr_retry = src->rnr_retry;
35247 dst->alt_port_num = src->alt_port_num;
35248 dst->alt_timeout = src->alt_timeout;
35249 + memset(dst->reserved, 0, sizeof(dst->reserved));
35250 }
35251 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
35252
35253 diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
35254 index 100da85..62e6b88 100644
35255 --- a/drivers/infiniband/hw/ipath/ipath_fs.c
35256 +++ b/drivers/infiniband/hw/ipath/ipath_fs.c
35257 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
35258 struct infinipath_counters counters;
35259 struct ipath_devdata *dd;
35260
35261 + pax_track_stack();
35262 +
35263 dd = file->f_path.dentry->d_inode->i_private;
35264 dd->ipath_f_read_counters(dd, &counters);
35265
35266 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
35267 index cbde0cf..afaf55c 100644
35268 --- a/drivers/infiniband/hw/nes/nes.c
35269 +++ b/drivers/infiniband/hw/nes/nes.c
35270 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
35271 LIST_HEAD(nes_adapter_list);
35272 static LIST_HEAD(nes_dev_list);
35273
35274 -atomic_t qps_destroyed;
35275 +atomic_unchecked_t qps_destroyed;
35276
35277 static unsigned int ee_flsh_adapter;
35278 static unsigned int sysfs_nonidx_addr;
35279 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
35280 struct nes_adapter *nesadapter = nesdev->nesadapter;
35281 u32 qp_id;
35282
35283 - atomic_inc(&qps_destroyed);
35284 + atomic_inc_unchecked(&qps_destroyed);
35285
35286 /* Free the control structures */
35287
35288 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
35289 index bcc6abc..9c76b2f 100644
35290 --- a/drivers/infiniband/hw/nes/nes.h
35291 +++ b/drivers/infiniband/hw/nes/nes.h
35292 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
35293 extern unsigned int wqm_quanta;
35294 extern struct list_head nes_adapter_list;
35295
35296 -extern atomic_t cm_connects;
35297 -extern atomic_t cm_accepts;
35298 -extern atomic_t cm_disconnects;
35299 -extern atomic_t cm_closes;
35300 -extern atomic_t cm_connecteds;
35301 -extern atomic_t cm_connect_reqs;
35302 -extern atomic_t cm_rejects;
35303 -extern atomic_t mod_qp_timouts;
35304 -extern atomic_t qps_created;
35305 -extern atomic_t qps_destroyed;
35306 -extern atomic_t sw_qps_destroyed;
35307 +extern atomic_unchecked_t cm_connects;
35308 +extern atomic_unchecked_t cm_accepts;
35309 +extern atomic_unchecked_t cm_disconnects;
35310 +extern atomic_unchecked_t cm_closes;
35311 +extern atomic_unchecked_t cm_connecteds;
35312 +extern atomic_unchecked_t cm_connect_reqs;
35313 +extern atomic_unchecked_t cm_rejects;
35314 +extern atomic_unchecked_t mod_qp_timouts;
35315 +extern atomic_unchecked_t qps_created;
35316 +extern atomic_unchecked_t qps_destroyed;
35317 +extern atomic_unchecked_t sw_qps_destroyed;
35318 extern u32 mh_detected;
35319 extern u32 mh_pauses_sent;
35320 extern u32 cm_packets_sent;
35321 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
35322 extern u32 cm_listens_created;
35323 extern u32 cm_listens_destroyed;
35324 extern u32 cm_backlog_drops;
35325 -extern atomic_t cm_loopbacks;
35326 -extern atomic_t cm_nodes_created;
35327 -extern atomic_t cm_nodes_destroyed;
35328 -extern atomic_t cm_accel_dropped_pkts;
35329 -extern atomic_t cm_resets_recvd;
35330 +extern atomic_unchecked_t cm_loopbacks;
35331 +extern atomic_unchecked_t cm_nodes_created;
35332 +extern atomic_unchecked_t cm_nodes_destroyed;
35333 +extern atomic_unchecked_t cm_accel_dropped_pkts;
35334 +extern atomic_unchecked_t cm_resets_recvd;
35335
35336 extern u32 int_mod_timer_init;
35337 extern u32 int_mod_cq_depth_256;
35338 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
35339 index 73473db..5ed06e8 100644
35340 --- a/drivers/infiniband/hw/nes/nes_cm.c
35341 +++ b/drivers/infiniband/hw/nes/nes_cm.c
35342 @@ -69,11 +69,11 @@ u32 cm_packets_received;
35343 u32 cm_listens_created;
35344 u32 cm_listens_destroyed;
35345 u32 cm_backlog_drops;
35346 -atomic_t cm_loopbacks;
35347 -atomic_t cm_nodes_created;
35348 -atomic_t cm_nodes_destroyed;
35349 -atomic_t cm_accel_dropped_pkts;
35350 -atomic_t cm_resets_recvd;
35351 +atomic_unchecked_t cm_loopbacks;
35352 +atomic_unchecked_t cm_nodes_created;
35353 +atomic_unchecked_t cm_nodes_destroyed;
35354 +atomic_unchecked_t cm_accel_dropped_pkts;
35355 +atomic_unchecked_t cm_resets_recvd;
35356
35357 static inline int mini_cm_accelerated(struct nes_cm_core *,
35358 struct nes_cm_node *);
35359 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
35360
35361 static struct nes_cm_core *g_cm_core;
35362
35363 -atomic_t cm_connects;
35364 -atomic_t cm_accepts;
35365 -atomic_t cm_disconnects;
35366 -atomic_t cm_closes;
35367 -atomic_t cm_connecteds;
35368 -atomic_t cm_connect_reqs;
35369 -atomic_t cm_rejects;
35370 +atomic_unchecked_t cm_connects;
35371 +atomic_unchecked_t cm_accepts;
35372 +atomic_unchecked_t cm_disconnects;
35373 +atomic_unchecked_t cm_closes;
35374 +atomic_unchecked_t cm_connecteds;
35375 +atomic_unchecked_t cm_connect_reqs;
35376 +atomic_unchecked_t cm_rejects;
35377
35378
35379 /**
35380 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
35381 cm_node->rem_mac);
35382
35383 add_hte_node(cm_core, cm_node);
35384 - atomic_inc(&cm_nodes_created);
35385 + atomic_inc_unchecked(&cm_nodes_created);
35386
35387 return cm_node;
35388 }
35389 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
35390 }
35391
35392 atomic_dec(&cm_core->node_cnt);
35393 - atomic_inc(&cm_nodes_destroyed);
35394 + atomic_inc_unchecked(&cm_nodes_destroyed);
35395 nesqp = cm_node->nesqp;
35396 if (nesqp) {
35397 nesqp->cm_node = NULL;
35398 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
35399
35400 static void drop_packet(struct sk_buff *skb)
35401 {
35402 - atomic_inc(&cm_accel_dropped_pkts);
35403 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
35404 dev_kfree_skb_any(skb);
35405 }
35406
35407 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
35408
35409 int reset = 0; /* whether to send reset in case of err.. */
35410 int passive_state;
35411 - atomic_inc(&cm_resets_recvd);
35412 + atomic_inc_unchecked(&cm_resets_recvd);
35413 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
35414 " refcnt=%d\n", cm_node, cm_node->state,
35415 atomic_read(&cm_node->ref_count));
35416 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
35417 rem_ref_cm_node(cm_node->cm_core, cm_node);
35418 return NULL;
35419 }
35420 - atomic_inc(&cm_loopbacks);
35421 + atomic_inc_unchecked(&cm_loopbacks);
35422 loopbackremotenode->loopbackpartner = cm_node;
35423 loopbackremotenode->tcp_cntxt.rcv_wscale =
35424 NES_CM_DEFAULT_RCV_WND_SCALE;
35425 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
35426 add_ref_cm_node(cm_node);
35427 } else if (cm_node->state == NES_CM_STATE_TSA) {
35428 rem_ref_cm_node(cm_core, cm_node);
35429 - atomic_inc(&cm_accel_dropped_pkts);
35430 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
35431 dev_kfree_skb_any(skb);
35432 break;
35433 }
35434 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35435
35436 if ((cm_id) && (cm_id->event_handler)) {
35437 if (issue_disconn) {
35438 - atomic_inc(&cm_disconnects);
35439 + atomic_inc_unchecked(&cm_disconnects);
35440 cm_event.event = IW_CM_EVENT_DISCONNECT;
35441 cm_event.status = disconn_status;
35442 cm_event.local_addr = cm_id->local_addr;
35443 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35444 }
35445
35446 if (issue_close) {
35447 - atomic_inc(&cm_closes);
35448 + atomic_inc_unchecked(&cm_closes);
35449 nes_disconnect(nesqp, 1);
35450
35451 cm_id->provider_data = nesqp;
35452 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35453
35454 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
35455 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
35456 - atomic_inc(&cm_accepts);
35457 + atomic_inc_unchecked(&cm_accepts);
35458
35459 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
35460 atomic_read(&nesvnic->netdev->refcnt));
35461 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
35462
35463 struct nes_cm_core *cm_core;
35464
35465 - atomic_inc(&cm_rejects);
35466 + atomic_inc_unchecked(&cm_rejects);
35467 cm_node = (struct nes_cm_node *) cm_id->provider_data;
35468 loopback = cm_node->loopbackpartner;
35469 cm_core = cm_node->cm_core;
35470 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35471 ntohl(cm_id->local_addr.sin_addr.s_addr),
35472 ntohs(cm_id->local_addr.sin_port));
35473
35474 - atomic_inc(&cm_connects);
35475 + atomic_inc_unchecked(&cm_connects);
35476 nesqp->active_conn = 1;
35477
35478 /* cache the cm_id in the qp */
35479 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
35480 if (nesqp->destroyed) {
35481 return;
35482 }
35483 - atomic_inc(&cm_connecteds);
35484 + atomic_inc_unchecked(&cm_connecteds);
35485 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
35486 " local port 0x%04X. jiffies = %lu.\n",
35487 nesqp->hwqp.qp_id,
35488 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
35489
35490 ret = cm_id->event_handler(cm_id, &cm_event);
35491 cm_id->add_ref(cm_id);
35492 - atomic_inc(&cm_closes);
35493 + atomic_inc_unchecked(&cm_closes);
35494 cm_event.event = IW_CM_EVENT_CLOSE;
35495 cm_event.status = IW_CM_EVENT_STATUS_OK;
35496 cm_event.provider_data = cm_id->provider_data;
35497 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
35498 return;
35499 cm_id = cm_node->cm_id;
35500
35501 - atomic_inc(&cm_connect_reqs);
35502 + atomic_inc_unchecked(&cm_connect_reqs);
35503 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35504 cm_node, cm_id, jiffies);
35505
35506 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
35507 return;
35508 cm_id = cm_node->cm_id;
35509
35510 - atomic_inc(&cm_connect_reqs);
35511 + atomic_inc_unchecked(&cm_connect_reqs);
35512 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35513 cm_node, cm_id, jiffies);
35514
35515 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
35516 index e593af3..870694a 100644
35517 --- a/drivers/infiniband/hw/nes/nes_nic.c
35518 +++ b/drivers/infiniband/hw/nes/nes_nic.c
35519 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35520 target_stat_values[++index] = mh_detected;
35521 target_stat_values[++index] = mh_pauses_sent;
35522 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
35523 - target_stat_values[++index] = atomic_read(&cm_connects);
35524 - target_stat_values[++index] = atomic_read(&cm_accepts);
35525 - target_stat_values[++index] = atomic_read(&cm_disconnects);
35526 - target_stat_values[++index] = atomic_read(&cm_connecteds);
35527 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
35528 - target_stat_values[++index] = atomic_read(&cm_rejects);
35529 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
35530 - target_stat_values[++index] = atomic_read(&qps_created);
35531 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
35532 - target_stat_values[++index] = atomic_read(&qps_destroyed);
35533 - target_stat_values[++index] = atomic_read(&cm_closes);
35534 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
35535 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
35536 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
35537 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
35538 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
35539 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
35540 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
35541 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
35542 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
35543 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
35544 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
35545 target_stat_values[++index] = cm_packets_sent;
35546 target_stat_values[++index] = cm_packets_bounced;
35547 target_stat_values[++index] = cm_packets_created;
35548 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35549 target_stat_values[++index] = cm_listens_created;
35550 target_stat_values[++index] = cm_listens_destroyed;
35551 target_stat_values[++index] = cm_backlog_drops;
35552 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
35553 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
35554 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
35555 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
35556 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
35557 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
35558 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
35559 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
35560 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
35561 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
35562 target_stat_values[++index] = int_mod_timer_init;
35563 target_stat_values[++index] = int_mod_cq_depth_1;
35564 target_stat_values[++index] = int_mod_cq_depth_4;
35565 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
35566 index a680c42..f914deb 100644
35567 --- a/drivers/infiniband/hw/nes/nes_verbs.c
35568 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
35569 @@ -45,9 +45,9 @@
35570
35571 #include <rdma/ib_umem.h>
35572
35573 -atomic_t mod_qp_timouts;
35574 -atomic_t qps_created;
35575 -atomic_t sw_qps_destroyed;
35576 +atomic_unchecked_t mod_qp_timouts;
35577 +atomic_unchecked_t qps_created;
35578 +atomic_unchecked_t sw_qps_destroyed;
35579
35580 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
35581
35582 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
35583 if (init_attr->create_flags)
35584 return ERR_PTR(-EINVAL);
35585
35586 - atomic_inc(&qps_created);
35587 + atomic_inc_unchecked(&qps_created);
35588 switch (init_attr->qp_type) {
35589 case IB_QPT_RC:
35590 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
35591 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
35592 struct iw_cm_event cm_event;
35593 int ret;
35594
35595 - atomic_inc(&sw_qps_destroyed);
35596 + atomic_inc_unchecked(&sw_qps_destroyed);
35597 nesqp->destroyed = 1;
35598
35599 /* Blow away the connection if it exists. */
35600 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
35601 index ac11be0..3883c04 100644
35602 --- a/drivers/input/gameport/gameport.c
35603 +++ b/drivers/input/gameport/gameport.c
35604 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
35605 */
35606 static void gameport_init_port(struct gameport *gameport)
35607 {
35608 - static atomic_t gameport_no = ATOMIC_INIT(0);
35609 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
35610
35611 __module_get(THIS_MODULE);
35612
35613 mutex_init(&gameport->drv_mutex);
35614 device_initialize(&gameport->dev);
35615 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
35616 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
35617 gameport->dev.bus = &gameport_bus;
35618 gameport->dev.release = gameport_release_port;
35619 if (gameport->parent)
35620 diff --git a/drivers/input/input.c b/drivers/input/input.c
35621 index c82ae82..8cfb9cb 100644
35622 --- a/drivers/input/input.c
35623 +++ b/drivers/input/input.c
35624 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
35625 */
35626 int input_register_device(struct input_dev *dev)
35627 {
35628 - static atomic_t input_no = ATOMIC_INIT(0);
35629 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
35630 struct input_handler *handler;
35631 const char *path;
35632 int error;
35633 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
35634 dev->setkeycode = input_default_setkeycode;
35635
35636 dev_set_name(&dev->dev, "input%ld",
35637 - (unsigned long) atomic_inc_return(&input_no) - 1);
35638 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
35639
35640 error = device_add(&dev->dev);
35641 if (error)
35642 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
35643 index ca13a6b..b032b0c 100644
35644 --- a/drivers/input/joystick/sidewinder.c
35645 +++ b/drivers/input/joystick/sidewinder.c
35646 @@ -30,6 +30,7 @@
35647 #include <linux/kernel.h>
35648 #include <linux/module.h>
35649 #include <linux/slab.h>
35650 +#include <linux/sched.h>
35651 #include <linux/init.h>
35652 #include <linux/input.h>
35653 #include <linux/gameport.h>
35654 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
35655 unsigned char buf[SW_LENGTH];
35656 int i;
35657
35658 + pax_track_stack();
35659 +
35660 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
35661
35662 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
35663 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
35664 index 79e3edc..01412b9 100644
35665 --- a/drivers/input/joystick/xpad.c
35666 +++ b/drivers/input/joystick/xpad.c
35667 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
35668
35669 static int xpad_led_probe(struct usb_xpad *xpad)
35670 {
35671 - static atomic_t led_seq = ATOMIC_INIT(0);
35672 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
35673 long led_no;
35674 struct xpad_led *led;
35675 struct led_classdev *led_cdev;
35676 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
35677 if (!led)
35678 return -ENOMEM;
35679
35680 - led_no = (long)atomic_inc_return(&led_seq) - 1;
35681 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
35682
35683 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
35684 led->xpad = xpad;
35685 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
35686 index 0236f0d..c7327f1 100644
35687 --- a/drivers/input/serio/serio.c
35688 +++ b/drivers/input/serio/serio.c
35689 @@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
35690 */
35691 static void serio_init_port(struct serio *serio)
35692 {
35693 - static atomic_t serio_no = ATOMIC_INIT(0);
35694 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
35695
35696 __module_get(THIS_MODULE);
35697
35698 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
35699 mutex_init(&serio->drv_mutex);
35700 device_initialize(&serio->dev);
35701 dev_set_name(&serio->dev, "serio%ld",
35702 - (long)atomic_inc_return(&serio_no) - 1);
35703 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
35704 serio->dev.bus = &serio_bus;
35705 serio->dev.release = serio_release_port;
35706 if (serio->parent) {
35707 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
35708 index 33dcd8d..2783d25 100644
35709 --- a/drivers/isdn/gigaset/common.c
35710 +++ b/drivers/isdn/gigaset/common.c
35711 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
35712 cs->commands_pending = 0;
35713 cs->cur_at_seq = 0;
35714 cs->gotfwver = -1;
35715 - cs->open_count = 0;
35716 + local_set(&cs->open_count, 0);
35717 cs->dev = NULL;
35718 cs->tty = NULL;
35719 cs->tty_dev = NULL;
35720 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
35721 index a2f6125..6a70677 100644
35722 --- a/drivers/isdn/gigaset/gigaset.h
35723 +++ b/drivers/isdn/gigaset/gigaset.h
35724 @@ -34,6 +34,7 @@
35725 #include <linux/tty_driver.h>
35726 #include <linux/list.h>
35727 #include <asm/atomic.h>
35728 +#include <asm/local.h>
35729
35730 #define GIG_VERSION {0,5,0,0}
35731 #define GIG_COMPAT {0,4,0,0}
35732 @@ -446,7 +447,7 @@ struct cardstate {
35733 spinlock_t cmdlock;
35734 unsigned curlen, cmdbytes;
35735
35736 - unsigned open_count;
35737 + local_t open_count;
35738 struct tty_struct *tty;
35739 struct tasklet_struct if_wake_tasklet;
35740 unsigned control_state;
35741 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
35742 index b3065b8..c7e8cc9 100644
35743 --- a/drivers/isdn/gigaset/interface.c
35744 +++ b/drivers/isdn/gigaset/interface.c
35745 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
35746 return -ERESTARTSYS; // FIXME -EINTR?
35747 tty->driver_data = cs;
35748
35749 - ++cs->open_count;
35750 -
35751 - if (cs->open_count == 1) {
35752 + if (local_inc_return(&cs->open_count) == 1) {
35753 spin_lock_irqsave(&cs->lock, flags);
35754 cs->tty = tty;
35755 spin_unlock_irqrestore(&cs->lock, flags);
35756 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
35757
35758 if (!cs->connected)
35759 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35760 - else if (!cs->open_count)
35761 + else if (!local_read(&cs->open_count))
35762 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35763 else {
35764 - if (!--cs->open_count) {
35765 + if (!local_dec_return(&cs->open_count)) {
35766 spin_lock_irqsave(&cs->lock, flags);
35767 cs->tty = NULL;
35768 spin_unlock_irqrestore(&cs->lock, flags);
35769 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
35770 if (!cs->connected) {
35771 gig_dbg(DEBUG_IF, "not connected");
35772 retval = -ENODEV;
35773 - } else if (!cs->open_count)
35774 + } else if (!local_read(&cs->open_count))
35775 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35776 else {
35777 retval = 0;
35778 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
35779 if (!cs->connected) {
35780 gig_dbg(DEBUG_IF, "not connected");
35781 retval = -ENODEV;
35782 - } else if (!cs->open_count)
35783 + } else if (!local_read(&cs->open_count))
35784 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35785 else if (cs->mstate != MS_LOCKED) {
35786 dev_warn(cs->dev, "can't write to unlocked device\n");
35787 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
35788 if (!cs->connected) {
35789 gig_dbg(DEBUG_IF, "not connected");
35790 retval = -ENODEV;
35791 - } else if (!cs->open_count)
35792 + } else if (!local_read(&cs->open_count))
35793 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35794 else if (cs->mstate != MS_LOCKED) {
35795 dev_warn(cs->dev, "can't write to unlocked device\n");
35796 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
35797
35798 if (!cs->connected)
35799 gig_dbg(DEBUG_IF, "not connected");
35800 - else if (!cs->open_count)
35801 + else if (!local_read(&cs->open_count))
35802 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35803 else if (cs->mstate != MS_LOCKED)
35804 dev_warn(cs->dev, "can't write to unlocked device\n");
35805 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
35806
35807 if (!cs->connected)
35808 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35809 - else if (!cs->open_count)
35810 + else if (!local_read(&cs->open_count))
35811 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35812 else {
35813 //FIXME
35814 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
35815
35816 if (!cs->connected)
35817 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35818 - else if (!cs->open_count)
35819 + else if (!local_read(&cs->open_count))
35820 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35821 else {
35822 //FIXME
35823 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
35824 goto out;
35825 }
35826
35827 - if (!cs->open_count) {
35828 + if (!local_read(&cs->open_count)) {
35829 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35830 goto out;
35831 }
35832 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
35833 index a7c0083..62a7cb6 100644
35834 --- a/drivers/isdn/hardware/avm/b1.c
35835 +++ b/drivers/isdn/hardware/avm/b1.c
35836 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
35837 }
35838 if (left) {
35839 if (t4file->user) {
35840 - if (copy_from_user(buf, dp, left))
35841 + if (left > sizeof buf || copy_from_user(buf, dp, left))
35842 return -EFAULT;
35843 } else {
35844 memcpy(buf, dp, left);
35845 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
35846 }
35847 if (left) {
35848 if (config->user) {
35849 - if (copy_from_user(buf, dp, left))
35850 + if (left > sizeof buf || copy_from_user(buf, dp, left))
35851 return -EFAULT;
35852 } else {
35853 memcpy(buf, dp, left);
35854 diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
35855 index f130724..c373c68 100644
35856 --- a/drivers/isdn/hardware/eicon/capidtmf.c
35857 +++ b/drivers/isdn/hardware/eicon/capidtmf.c
35858 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
35859 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
35860 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
35861
35862 + pax_track_stack();
35863
35864 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
35865 {
35866 diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
35867 index 4d425c6..a9be6c4 100644
35868 --- a/drivers/isdn/hardware/eicon/capifunc.c
35869 +++ b/drivers/isdn/hardware/eicon/capifunc.c
35870 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
35871 IDI_SYNC_REQ req;
35872 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35873
35874 + pax_track_stack();
35875 +
35876 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35877
35878 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35879 diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
35880 index 3029234..ef0d9e2 100644
35881 --- a/drivers/isdn/hardware/eicon/diddfunc.c
35882 +++ b/drivers/isdn/hardware/eicon/diddfunc.c
35883 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35884 IDI_SYNC_REQ req;
35885 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35886
35887 + pax_track_stack();
35888 +
35889 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35890
35891 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35892 diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
35893 index d36a4c0..11e7d1a 100644
35894 --- a/drivers/isdn/hardware/eicon/divasfunc.c
35895 +++ b/drivers/isdn/hardware/eicon/divasfunc.c
35896 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35897 IDI_SYNC_REQ req;
35898 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35899
35900 + pax_track_stack();
35901 +
35902 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35903
35904 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35905 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
35906 index 85784a7..a19ca98 100644
35907 --- a/drivers/isdn/hardware/eicon/divasync.h
35908 +++ b/drivers/isdn/hardware/eicon/divasync.h
35909 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
35910 } diva_didd_add_adapter_t;
35911 typedef struct _diva_didd_remove_adapter {
35912 IDI_CALL p_request;
35913 -} diva_didd_remove_adapter_t;
35914 +} __no_const diva_didd_remove_adapter_t;
35915 typedef struct _diva_didd_read_adapter_array {
35916 void * buffer;
35917 dword length;
35918 diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
35919 index db87d51..7d09acf 100644
35920 --- a/drivers/isdn/hardware/eicon/idifunc.c
35921 +++ b/drivers/isdn/hardware/eicon/idifunc.c
35922 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35923 IDI_SYNC_REQ req;
35924 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35925
35926 + pax_track_stack();
35927 +
35928 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35929
35930 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35931 diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
35932 index ae89fb8..0fab299 100644
35933 --- a/drivers/isdn/hardware/eicon/message.c
35934 +++ b/drivers/isdn/hardware/eicon/message.c
35935 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
35936 dword d;
35937 word w;
35938
35939 + pax_track_stack();
35940 +
35941 a = plci->adapter;
35942 Id = ((word)plci->Id<<8)|a->Id;
35943 PUT_WORD(&SS_Ind[4],0x0000);
35944 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
35945 word j, n, w;
35946 dword d;
35947
35948 + pax_track_stack();
35949 +
35950
35951 for(i=0;i<8;i++) bp_parms[i].length = 0;
35952 for(i=0;i<2;i++) global_config[i].length = 0;
35953 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
35954 const byte llc3[] = {4,3,2,2,6,6,0};
35955 const byte header[] = {0,2,3,3,0,0,0};
35956
35957 + pax_track_stack();
35958 +
35959 for(i=0;i<8;i++) bp_parms[i].length = 0;
35960 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
35961 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
35962 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
35963 word appl_number_group_type[MAX_APPL];
35964 PLCI *auxplci;
35965
35966 + pax_track_stack();
35967 +
35968 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
35969
35970 if(!a->group_optimization_enabled)
35971 diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
35972 index a564b75..f3cf8b5 100644
35973 --- a/drivers/isdn/hardware/eicon/mntfunc.c
35974 +++ b/drivers/isdn/hardware/eicon/mntfunc.c
35975 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35976 IDI_SYNC_REQ req;
35977 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35978
35979 + pax_track_stack();
35980 +
35981 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35982
35983 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35984 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
35985 index a3bd163..8956575 100644
35986 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
35987 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
35988 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
35989 typedef struct _diva_os_idi_adapter_interface {
35990 diva_init_card_proc_t cleanup_adapter_proc;
35991 diva_cmd_card_proc_t cmd_proc;
35992 -} diva_os_idi_adapter_interface_t;
35993 +} __no_const diva_os_idi_adapter_interface_t;
35994
35995 typedef struct _diva_os_xdi_adapter {
35996 struct list_head link;
35997 diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
35998 index adb1e8c..21b590b 100644
35999 --- a/drivers/isdn/i4l/isdn_common.c
36000 +++ b/drivers/isdn/i4l/isdn_common.c
36001 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
36002 } iocpar;
36003 void __user *argp = (void __user *)arg;
36004
36005 + pax_track_stack();
36006 +
36007 #define name iocpar.name
36008 #define bname iocpar.bname
36009 #define iocts iocpar.iocts
36010 diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
36011 index 90b56ed..5ed3305 100644
36012 --- a/drivers/isdn/i4l/isdn_net.c
36013 +++ b/drivers/isdn/i4l/isdn_net.c
36014 @@ -1902,7 +1902,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
36015 {
36016 isdn_net_local *lp = netdev_priv(dev);
36017 unsigned char *p;
36018 - ushort len = 0;
36019 + int len = 0;
36020
36021 switch (lp->p_encap) {
36022 case ISDN_NET_ENCAP_ETHER:
36023 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
36024 index bf7997a..cf091db 100644
36025 --- a/drivers/isdn/icn/icn.c
36026 +++ b/drivers/isdn/icn/icn.c
36027 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
36028 if (count > len)
36029 count = len;
36030 if (user) {
36031 - if (copy_from_user(msg, buf, count))
36032 + if (count > sizeof msg || copy_from_user(msg, buf, count))
36033 return -EFAULT;
36034 } else
36035 memcpy(msg, buf, count);
36036 diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
36037 index feb0fa4..f76f830 100644
36038 --- a/drivers/isdn/mISDN/socket.c
36039 +++ b/drivers/isdn/mISDN/socket.c
36040 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
36041 if (dev) {
36042 struct mISDN_devinfo di;
36043
36044 + memset(&di, 0, sizeof(di));
36045 di.id = dev->id;
36046 di.Dprotocols = dev->Dprotocols;
36047 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
36048 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
36049 if (dev) {
36050 struct mISDN_devinfo di;
36051
36052 + memset(&di, 0, sizeof(di));
36053 di.id = dev->id;
36054 di.Dprotocols = dev->Dprotocols;
36055 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
36056 diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
36057 index 485be8b..f0225bc 100644
36058 --- a/drivers/isdn/sc/interrupt.c
36059 +++ b/drivers/isdn/sc/interrupt.c
36060 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
36061 }
36062 else if(callid>=0x0000 && callid<=0x7FFF)
36063 {
36064 + int len;
36065 +
36066 pr_debug("%s: Got Incoming Call\n",
36067 sc_adapter[card]->devicename);
36068 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
36069 - strcpy(setup.eazmsn,
36070 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
36071 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
36072 + sizeof(setup.phone));
36073 + if (len >= sizeof(setup.phone))
36074 + continue;
36075 + len = strlcpy(setup.eazmsn,
36076 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
36077 + sizeof(setup.eazmsn));
36078 + if (len >= sizeof(setup.eazmsn))
36079 + continue;
36080 setup.si1 = 7;
36081 setup.si2 = 0;
36082 setup.plan = 0;
36083 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
36084 * Handle a GetMyNumber Rsp
36085 */
36086 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
36087 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
36088 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
36089 + rcvmsg.msg_data.byte_array,
36090 + sizeof(rcvmsg.msg_data.byte_array));
36091 continue;
36092 }
36093
36094 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
36095 index 8744d24..d1f9a9a 100644
36096 --- a/drivers/lguest/core.c
36097 +++ b/drivers/lguest/core.c
36098 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
36099 * it's worked so far. The end address needs +1 because __get_vm_area
36100 * allocates an extra guard page, so we need space for that.
36101 */
36102 +
36103 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
36104 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
36105 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
36106 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
36107 +#else
36108 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
36109 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
36110 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
36111 +#endif
36112 +
36113 if (!switcher_vma) {
36114 err = -ENOMEM;
36115 printk("lguest: could not map switcher pages high\n");
36116 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
36117 * Now the Switcher is mapped at the right address, we can't fail!
36118 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
36119 */
36120 - memcpy(switcher_vma->addr, start_switcher_text,
36121 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
36122 end_switcher_text - start_switcher_text);
36123
36124 printk(KERN_INFO "lguest: mapped switcher at %p\n",
36125 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
36126 index 6ae3888..8b38145 100644
36127 --- a/drivers/lguest/x86/core.c
36128 +++ b/drivers/lguest/x86/core.c
36129 @@ -59,7 +59,7 @@ static struct {
36130 /* Offset from where switcher.S was compiled to where we've copied it */
36131 static unsigned long switcher_offset(void)
36132 {
36133 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
36134 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
36135 }
36136
36137 /* This cpu's struct lguest_pages. */
36138 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
36139 * These copies are pretty cheap, so we do them unconditionally: */
36140 /* Save the current Host top-level page directory.
36141 */
36142 +
36143 +#ifdef CONFIG_PAX_PER_CPU_PGD
36144 + pages->state.host_cr3 = read_cr3();
36145 +#else
36146 pages->state.host_cr3 = __pa(current->mm->pgd);
36147 +#endif
36148 +
36149 /*
36150 * Set up the Guest's page tables to see this CPU's pages (and no
36151 * other CPU's pages).
36152 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
36153 * compiled-in switcher code and the high-mapped copy we just made.
36154 */
36155 for (i = 0; i < IDT_ENTRIES; i++)
36156 - default_idt_entries[i] += switcher_offset();
36157 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
36158
36159 /*
36160 * Set up the Switcher's per-cpu areas.
36161 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
36162 * it will be undisturbed when we switch. To change %cs and jump we
36163 * need this structure to feed to Intel's "lcall" instruction.
36164 */
36165 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
36166 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
36167 lguest_entry.segment = LGUEST_CS;
36168
36169 /*
36170 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
36171 index 40634b0..4f5855e 100644
36172 --- a/drivers/lguest/x86/switcher_32.S
36173 +++ b/drivers/lguest/x86/switcher_32.S
36174 @@ -87,6 +87,7 @@
36175 #include <asm/page.h>
36176 #include <asm/segment.h>
36177 #include <asm/lguest.h>
36178 +#include <asm/processor-flags.h>
36179
36180 // We mark the start of the code to copy
36181 // It's placed in .text tho it's never run here
36182 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
36183 // Changes type when we load it: damn Intel!
36184 // For after we switch over our page tables
36185 // That entry will be read-only: we'd crash.
36186 +
36187 +#ifdef CONFIG_PAX_KERNEXEC
36188 + mov %cr0, %edx
36189 + xor $X86_CR0_WP, %edx
36190 + mov %edx, %cr0
36191 +#endif
36192 +
36193 movl $(GDT_ENTRY_TSS*8), %edx
36194 ltr %dx
36195
36196 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
36197 // Let's clear it again for our return.
36198 // The GDT descriptor of the Host
36199 // Points to the table after two "size" bytes
36200 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
36201 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
36202 // Clear "used" from type field (byte 5, bit 2)
36203 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
36204 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
36205 +
36206 +#ifdef CONFIG_PAX_KERNEXEC
36207 + mov %cr0, %eax
36208 + xor $X86_CR0_WP, %eax
36209 + mov %eax, %cr0
36210 +#endif
36211
36212 // Once our page table's switched, the Guest is live!
36213 // The Host fades as we run this final step.
36214 @@ -295,13 +309,12 @@ deliver_to_host:
36215 // I consulted gcc, and it gave
36216 // These instructions, which I gladly credit:
36217 leal (%edx,%ebx,8), %eax
36218 - movzwl (%eax),%edx
36219 - movl 4(%eax), %eax
36220 - xorw %ax, %ax
36221 - orl %eax, %edx
36222 + movl 4(%eax), %edx
36223 + movw (%eax), %dx
36224 // Now the address of the handler's in %edx
36225 // We call it now: its "iret" drops us home.
36226 - jmp *%edx
36227 + ljmp $__KERNEL_CS, $1f
36228 +1: jmp *%edx
36229
36230 // Every interrupt can come to us here
36231 // But we must truly tell each apart.
36232 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
36233 index 588a5b0..b71db89 100644
36234 --- a/drivers/macintosh/macio_asic.c
36235 +++ b/drivers/macintosh/macio_asic.c
36236 @@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
36237 * MacIO is matched against any Apple ID, it's probe() function
36238 * will then decide wether it applies or not
36239 */
36240 -static const struct pci_device_id __devinitdata pci_ids [] = { {
36241 +static const struct pci_device_id __devinitconst pci_ids [] = { {
36242 .vendor = PCI_VENDOR_ID_APPLE,
36243 .device = PCI_ANY_ID,
36244 .subvendor = PCI_ANY_ID,
36245 diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
36246 index a348bb0..ecd9b3f 100644
36247 --- a/drivers/macintosh/via-pmu-backlight.c
36248 +++ b/drivers/macintosh/via-pmu-backlight.c
36249 @@ -15,7 +15,7 @@
36250
36251 #define MAX_PMU_LEVEL 0xFF
36252
36253 -static struct backlight_ops pmu_backlight_data;
36254 +static const struct backlight_ops pmu_backlight_data;
36255 static DEFINE_SPINLOCK(pmu_backlight_lock);
36256 static int sleeping, uses_pmu_bl;
36257 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
36258 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
36259 return bd->props.brightness;
36260 }
36261
36262 -static struct backlight_ops pmu_backlight_data = {
36263 +static const struct backlight_ops pmu_backlight_data = {
36264 .get_brightness = pmu_backlight_get_brightness,
36265 .update_status = pmu_backlight_update_status,
36266
36267 diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
36268 index 6f308a4..b5f7ff7 100644
36269 --- a/drivers/macintosh/via-pmu.c
36270 +++ b/drivers/macintosh/via-pmu.c
36271 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
36272 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
36273 }
36274
36275 -static struct platform_suspend_ops pmu_pm_ops = {
36276 +static const struct platform_suspend_ops pmu_pm_ops = {
36277 .enter = powerbook_sleep,
36278 .valid = pmu_sleep_valid,
36279 };
36280 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
36281 index 818b617..4656e38 100644
36282 --- a/drivers/md/dm-ioctl.c
36283 +++ b/drivers/md/dm-ioctl.c
36284 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
36285 cmd == DM_LIST_VERSIONS_CMD)
36286 return 0;
36287
36288 - if ((cmd == DM_DEV_CREATE_CMD)) {
36289 + if (cmd == DM_DEV_CREATE_CMD) {
36290 if (!*param->name) {
36291 DMWARN("name not supplied when creating device");
36292 return -EINVAL;
36293 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
36294 index 6021d0a..a878643 100644
36295 --- a/drivers/md/dm-raid1.c
36296 +++ b/drivers/md/dm-raid1.c
36297 @@ -41,7 +41,7 @@ enum dm_raid1_error {
36298
36299 struct mirror {
36300 struct mirror_set *ms;
36301 - atomic_t error_count;
36302 + atomic_unchecked_t error_count;
36303 unsigned long error_type;
36304 struct dm_dev *dev;
36305 sector_t offset;
36306 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36307 * simple way to tell if a device has encountered
36308 * errors.
36309 */
36310 - atomic_inc(&m->error_count);
36311 + atomic_inc_unchecked(&m->error_count);
36312
36313 if (test_and_set_bit(error_type, &m->error_type))
36314 return;
36315 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36316 }
36317
36318 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
36319 - if (!atomic_read(&new->error_count)) {
36320 + if (!atomic_read_unchecked(&new->error_count)) {
36321 set_default_mirror(new);
36322 break;
36323 }
36324 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
36325 struct mirror *m = get_default_mirror(ms);
36326
36327 do {
36328 - if (likely(!atomic_read(&m->error_count)))
36329 + if (likely(!atomic_read_unchecked(&m->error_count)))
36330 return m;
36331
36332 if (m-- == ms->mirror)
36333 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
36334 {
36335 struct mirror *default_mirror = get_default_mirror(m->ms);
36336
36337 - return !atomic_read(&default_mirror->error_count);
36338 + return !atomic_read_unchecked(&default_mirror->error_count);
36339 }
36340
36341 static int mirror_available(struct mirror_set *ms, struct bio *bio)
36342 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
36343 */
36344 if (likely(region_in_sync(ms, region, 1)))
36345 m = choose_mirror(ms, bio->bi_sector);
36346 - else if (m && atomic_read(&m->error_count))
36347 + else if (m && atomic_read_unchecked(&m->error_count))
36348 m = NULL;
36349
36350 if (likely(m))
36351 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
36352 }
36353
36354 ms->mirror[mirror].ms = ms;
36355 - atomic_set(&(ms->mirror[mirror].error_count), 0);
36356 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
36357 ms->mirror[mirror].error_type = 0;
36358 ms->mirror[mirror].offset = offset;
36359
36360 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
36361 */
36362 static char device_status_char(struct mirror *m)
36363 {
36364 - if (!atomic_read(&(m->error_count)))
36365 + if (!atomic_read_unchecked(&(m->error_count)))
36366 return 'A';
36367
36368 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
36369 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
36370 index bd58703..9f26571 100644
36371 --- a/drivers/md/dm-stripe.c
36372 +++ b/drivers/md/dm-stripe.c
36373 @@ -20,7 +20,7 @@ struct stripe {
36374 struct dm_dev *dev;
36375 sector_t physical_start;
36376
36377 - atomic_t error_count;
36378 + atomic_unchecked_t error_count;
36379 };
36380
36381 struct stripe_c {
36382 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
36383 kfree(sc);
36384 return r;
36385 }
36386 - atomic_set(&(sc->stripe[i].error_count), 0);
36387 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
36388 }
36389
36390 ti->private = sc;
36391 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
36392 DMEMIT("%d ", sc->stripes);
36393 for (i = 0; i < sc->stripes; i++) {
36394 DMEMIT("%s ", sc->stripe[i].dev->name);
36395 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
36396 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
36397 'D' : 'A';
36398 }
36399 buffer[i] = '\0';
36400 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
36401 */
36402 for (i = 0; i < sc->stripes; i++)
36403 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
36404 - atomic_inc(&(sc->stripe[i].error_count));
36405 - if (atomic_read(&(sc->stripe[i].error_count)) <
36406 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
36407 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
36408 DM_IO_ERROR_THRESHOLD)
36409 queue_work(kstriped, &sc->kstriped_ws);
36410 }
36411 diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
36412 index 4b04590..13a77b2 100644
36413 --- a/drivers/md/dm-sysfs.c
36414 +++ b/drivers/md/dm-sysfs.c
36415 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
36416 NULL,
36417 };
36418
36419 -static struct sysfs_ops dm_sysfs_ops = {
36420 +static const struct sysfs_ops dm_sysfs_ops = {
36421 .show = dm_attr_show,
36422 };
36423
36424 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
36425 index 03345bb..332250d 100644
36426 --- a/drivers/md/dm-table.c
36427 +++ b/drivers/md/dm-table.c
36428 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
36429 if (!dev_size)
36430 return 0;
36431
36432 - if ((start >= dev_size) || (start + len > dev_size)) {
36433 + if ((start >= dev_size) || (len > dev_size - start)) {
36434 DMWARN("%s: %s too small for target: "
36435 "start=%llu, len=%llu, dev_size=%llu",
36436 dm_device_name(ti->table->md), bdevname(bdev, b),
36437 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
36438 index c988ac2..c418141 100644
36439 --- a/drivers/md/dm.c
36440 +++ b/drivers/md/dm.c
36441 @@ -165,9 +165,9 @@ struct mapped_device {
36442 /*
36443 * Event handling.
36444 */
36445 - atomic_t event_nr;
36446 + atomic_unchecked_t event_nr;
36447 wait_queue_head_t eventq;
36448 - atomic_t uevent_seq;
36449 + atomic_unchecked_t uevent_seq;
36450 struct list_head uevent_list;
36451 spinlock_t uevent_lock; /* Protect access to uevent_list */
36452
36453 @@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
36454 rwlock_init(&md->map_lock);
36455 atomic_set(&md->holders, 1);
36456 atomic_set(&md->open_count, 0);
36457 - atomic_set(&md->event_nr, 0);
36458 - atomic_set(&md->uevent_seq, 0);
36459 + atomic_set_unchecked(&md->event_nr, 0);
36460 + atomic_set_unchecked(&md->uevent_seq, 0);
36461 INIT_LIST_HEAD(&md->uevent_list);
36462 spin_lock_init(&md->uevent_lock);
36463
36464 @@ -1927,7 +1927,7 @@ static void event_callback(void *context)
36465
36466 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
36467
36468 - atomic_inc(&md->event_nr);
36469 + atomic_inc_unchecked(&md->event_nr);
36470 wake_up(&md->eventq);
36471 }
36472
36473 @@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
36474
36475 uint32_t dm_next_uevent_seq(struct mapped_device *md)
36476 {
36477 - return atomic_add_return(1, &md->uevent_seq);
36478 + return atomic_add_return_unchecked(1, &md->uevent_seq);
36479 }
36480
36481 uint32_t dm_get_event_nr(struct mapped_device *md)
36482 {
36483 - return atomic_read(&md->event_nr);
36484 + return atomic_read_unchecked(&md->event_nr);
36485 }
36486
36487 int dm_wait_event(struct mapped_device *md, int event_nr)
36488 {
36489 return wait_event_interruptible(md->eventq,
36490 - (event_nr != atomic_read(&md->event_nr)));
36491 + (event_nr != atomic_read_unchecked(&md->event_nr)));
36492 }
36493
36494 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
36495 diff --git a/drivers/md/md.c b/drivers/md/md.c
36496 index 4ce6e2f..7a9530a 100644
36497 --- a/drivers/md/md.c
36498 +++ b/drivers/md/md.c
36499 @@ -153,10 +153,10 @@ static int start_readonly;
36500 * start build, activate spare
36501 */
36502 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
36503 -static atomic_t md_event_count;
36504 +static atomic_unchecked_t md_event_count;
36505 void md_new_event(mddev_t *mddev)
36506 {
36507 - atomic_inc(&md_event_count);
36508 + atomic_inc_unchecked(&md_event_count);
36509 wake_up(&md_event_waiters);
36510 }
36511 EXPORT_SYMBOL_GPL(md_new_event);
36512 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
36513 */
36514 static void md_new_event_inintr(mddev_t *mddev)
36515 {
36516 - atomic_inc(&md_event_count);
36517 + atomic_inc_unchecked(&md_event_count);
36518 wake_up(&md_event_waiters);
36519 }
36520
36521 @@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
36522
36523 rdev->preferred_minor = 0xffff;
36524 rdev->data_offset = le64_to_cpu(sb->data_offset);
36525 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36526 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36527
36528 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
36529 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
36530 @@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
36531 else
36532 sb->resync_offset = cpu_to_le64(0);
36533
36534 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
36535 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
36536
36537 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
36538 sb->size = cpu_to_le64(mddev->dev_sectors);
36539 @@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
36540 static ssize_t
36541 errors_show(mdk_rdev_t *rdev, char *page)
36542 {
36543 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
36544 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
36545 }
36546
36547 static ssize_t
36548 @@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
36549 char *e;
36550 unsigned long n = simple_strtoul(buf, &e, 10);
36551 if (*buf && (*e == 0 || *e == '\n')) {
36552 - atomic_set(&rdev->corrected_errors, n);
36553 + atomic_set_unchecked(&rdev->corrected_errors, n);
36554 return len;
36555 }
36556 return -EINVAL;
36557 @@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
36558 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
36559 kfree(rdev);
36560 }
36561 -static struct sysfs_ops rdev_sysfs_ops = {
36562 +static const struct sysfs_ops rdev_sysfs_ops = {
36563 .show = rdev_attr_show,
36564 .store = rdev_attr_store,
36565 };
36566 @@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
36567 rdev->data_offset = 0;
36568 rdev->sb_events = 0;
36569 atomic_set(&rdev->nr_pending, 0);
36570 - atomic_set(&rdev->read_errors, 0);
36571 - atomic_set(&rdev->corrected_errors, 0);
36572 + atomic_set_unchecked(&rdev->read_errors, 0);
36573 + atomic_set_unchecked(&rdev->corrected_errors, 0);
36574
36575 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
36576 if (!size) {
36577 @@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
36578 kfree(mddev);
36579 }
36580
36581 -static struct sysfs_ops md_sysfs_ops = {
36582 +static const struct sysfs_ops md_sysfs_ops = {
36583 .show = md_attr_show,
36584 .store = md_attr_store,
36585 };
36586 @@ -4482,7 +4482,8 @@ out:
36587 err = 0;
36588 blk_integrity_unregister(disk);
36589 md_new_event(mddev);
36590 - sysfs_notify_dirent(mddev->sysfs_state);
36591 + if (mddev->sysfs_state)
36592 + sysfs_notify_dirent(mddev->sysfs_state);
36593 return err;
36594 }
36595
36596 @@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36597
36598 spin_unlock(&pers_lock);
36599 seq_printf(seq, "\n");
36600 - mi->event = atomic_read(&md_event_count);
36601 + mi->event = atomic_read_unchecked(&md_event_count);
36602 return 0;
36603 }
36604 if (v == (void*)2) {
36605 @@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36606 chunk_kb ? "KB" : "B");
36607 if (bitmap->file) {
36608 seq_printf(seq, ", file: ");
36609 - seq_path(seq, &bitmap->file->f_path, " \t\n");
36610 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
36611 }
36612
36613 seq_printf(seq, "\n");
36614 @@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
36615 else {
36616 struct seq_file *p = file->private_data;
36617 p->private = mi;
36618 - mi->event = atomic_read(&md_event_count);
36619 + mi->event = atomic_read_unchecked(&md_event_count);
36620 }
36621 return error;
36622 }
36623 @@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
36624 /* always allow read */
36625 mask = POLLIN | POLLRDNORM;
36626
36627 - if (mi->event != atomic_read(&md_event_count))
36628 + if (mi->event != atomic_read_unchecked(&md_event_count))
36629 mask |= POLLERR | POLLPRI;
36630 return mask;
36631 }
36632 @@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
36633 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
36634 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
36635 (int)part_stat_read(&disk->part0, sectors[1]) -
36636 - atomic_read(&disk->sync_io);
36637 + atomic_read_unchecked(&disk->sync_io);
36638 /* sync IO will cause sync_io to increase before the disk_stats
36639 * as sync_io is counted when a request starts, and
36640 * disk_stats is counted when it completes.
36641 diff --git a/drivers/md/md.h b/drivers/md/md.h
36642 index 87430fe..0024a4c 100644
36643 --- a/drivers/md/md.h
36644 +++ b/drivers/md/md.h
36645 @@ -94,10 +94,10 @@ struct mdk_rdev_s
36646 * only maintained for arrays that
36647 * support hot removal
36648 */
36649 - atomic_t read_errors; /* number of consecutive read errors that
36650 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
36651 * we have tried to ignore.
36652 */
36653 - atomic_t corrected_errors; /* number of corrected read errors,
36654 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
36655 * for reporting to userspace and storing
36656 * in superblock.
36657 */
36658 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
36659
36660 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
36661 {
36662 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36663 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36664 }
36665
36666 struct mdk_personality
36667 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
36668 index 968cb14..f0ad2e4 100644
36669 --- a/drivers/md/raid1.c
36670 +++ b/drivers/md/raid1.c
36671 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
36672 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
36673 continue;
36674 rdev = conf->mirrors[d].rdev;
36675 - atomic_add(s, &rdev->corrected_errors);
36676 + atomic_add_unchecked(s, &rdev->corrected_errors);
36677 if (sync_page_io(rdev->bdev,
36678 sect + rdev->data_offset,
36679 s<<9,
36680 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
36681 /* Well, this device is dead */
36682 md_error(mddev, rdev);
36683 else {
36684 - atomic_add(s, &rdev->corrected_errors);
36685 + atomic_add_unchecked(s, &rdev->corrected_errors);
36686 printk(KERN_INFO
36687 "raid1:%s: read error corrected "
36688 "(%d sectors at %llu on %s)\n",
36689 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
36690 index 1b4e232..cf0f534 100644
36691 --- a/drivers/md/raid10.c
36692 +++ b/drivers/md/raid10.c
36693 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
36694 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
36695 set_bit(R10BIO_Uptodate, &r10_bio->state);
36696 else {
36697 - atomic_add(r10_bio->sectors,
36698 + atomic_add_unchecked(r10_bio->sectors,
36699 &conf->mirrors[d].rdev->corrected_errors);
36700 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
36701 md_error(r10_bio->mddev,
36702 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
36703 test_bit(In_sync, &rdev->flags)) {
36704 atomic_inc(&rdev->nr_pending);
36705 rcu_read_unlock();
36706 - atomic_add(s, &rdev->corrected_errors);
36707 + atomic_add_unchecked(s, &rdev->corrected_errors);
36708 if (sync_page_io(rdev->bdev,
36709 r10_bio->devs[sl].addr +
36710 sect + rdev->data_offset,
36711 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
36712 index 883215d..675bf47 100644
36713 --- a/drivers/md/raid5.c
36714 +++ b/drivers/md/raid5.c
36715 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
36716 bi->bi_next = NULL;
36717 if ((rw & WRITE) &&
36718 test_bit(R5_ReWrite, &sh->dev[i].flags))
36719 - atomic_add(STRIPE_SECTORS,
36720 + atomic_add_unchecked(STRIPE_SECTORS,
36721 &rdev->corrected_errors);
36722 generic_make_request(bi);
36723 } else {
36724 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
36725 clear_bit(R5_ReadError, &sh->dev[i].flags);
36726 clear_bit(R5_ReWrite, &sh->dev[i].flags);
36727 }
36728 - if (atomic_read(&conf->disks[i].rdev->read_errors))
36729 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
36730 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
36731 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
36732 } else {
36733 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
36734 int retry = 0;
36735 rdev = conf->disks[i].rdev;
36736
36737 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
36738 - atomic_inc(&rdev->read_errors);
36739 + atomic_inc_unchecked(&rdev->read_errors);
36740 if (conf->mddev->degraded >= conf->max_degraded)
36741 printk_rl(KERN_WARNING
36742 "raid5:%s: read error not correctable "
36743 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
36744 (unsigned long long)(sh->sector
36745 + rdev->data_offset),
36746 bdn);
36747 - else if (atomic_read(&rdev->read_errors)
36748 + else if (atomic_read_unchecked(&rdev->read_errors)
36749 > conf->max_nr_stripes)
36750 printk(KERN_WARNING
36751 "raid5:%s: Too many read errors, failing device %s.\n",
36752 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
36753 sector_t r_sector;
36754 struct stripe_head sh2;
36755
36756 + pax_track_stack();
36757
36758 chunk_offset = sector_div(new_sector, sectors_per_chunk);
36759 stripe = new_sector;
36760 diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
36761 index 05bde9c..2f31d40 100644
36762 --- a/drivers/media/common/saa7146_hlp.c
36763 +++ b/drivers/media/common/saa7146_hlp.c
36764 @@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
36765
36766 int x[32], y[32], w[32], h[32];
36767
36768 + pax_track_stack();
36769 +
36770 /* clear out memory */
36771 memset(&line_list[0], 0x00, sizeof(u32)*32);
36772 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
36773 diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36774 index cb22da5..82b686e 100644
36775 --- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36776 +++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36777 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
36778 u8 buf[HOST_LINK_BUF_SIZE];
36779 int i;
36780
36781 + pax_track_stack();
36782 +
36783 dprintk("%s\n", __func__);
36784
36785 /* check if we have space for a link buf in the rx_buffer */
36786 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
36787 unsigned long timeout;
36788 int written;
36789
36790 + pax_track_stack();
36791 +
36792 dprintk("%s\n", __func__);
36793
36794 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
36795 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
36796 index 2fe05d0..a3289c4 100644
36797 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
36798 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
36799 @@ -71,7 +71,7 @@ struct dvb_demux_feed {
36800 union {
36801 dmx_ts_cb ts;
36802 dmx_section_cb sec;
36803 - } cb;
36804 + } __no_const cb;
36805
36806 struct dvb_demux *demux;
36807 void *priv;
36808 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
36809 index 94159b9..376bd8e 100644
36810 --- a/drivers/media/dvb/dvb-core/dvbdev.c
36811 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
36812 @@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
36813 const struct dvb_device *template, void *priv, int type)
36814 {
36815 struct dvb_device *dvbdev;
36816 - struct file_operations *dvbdevfops;
36817 + file_operations_no_const *dvbdevfops;
36818 struct device *clsdev;
36819 int minor;
36820 int id;
36821 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
36822 index 2a53dd0..db8c07a 100644
36823 --- a/drivers/media/dvb/dvb-usb/cxusb.c
36824 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
36825 @@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
36826 struct dib0700_adapter_state {
36827 int (*set_param_save) (struct dvb_frontend *,
36828 struct dvb_frontend_parameters *);
36829 -};
36830 +} __no_const;
36831
36832 static int dib7070_set_param_override(struct dvb_frontend *fe,
36833 struct dvb_frontend_parameters *fep)
36834 diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
36835 index db7f7f7..f55e96f 100644
36836 --- a/drivers/media/dvb/dvb-usb/dib0700_core.c
36837 +++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
36838 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
36839
36840 u8 buf[260];
36841
36842 + pax_track_stack();
36843 +
36844 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
36845 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
36846
36847 diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36848 index 524acf5..5ffc403 100644
36849 --- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
36850 +++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36851 @@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
36852
36853 struct dib0700_adapter_state {
36854 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
36855 -};
36856 +} __no_const;
36857
36858 /* Hauppauge Nova-T 500 (aka Bristol)
36859 * has a LNA on GPIO0 which is enabled by setting 1 */
36860 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
36861 index ba91735..4261d84 100644
36862 --- a/drivers/media/dvb/frontends/dib3000.h
36863 +++ b/drivers/media/dvb/frontends/dib3000.h
36864 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
36865 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
36866 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
36867 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
36868 -};
36869 +} __no_const;
36870
36871 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
36872 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
36873 diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
36874 index c709ce6..b3fe620 100644
36875 --- a/drivers/media/dvb/frontends/or51211.c
36876 +++ b/drivers/media/dvb/frontends/or51211.c
36877 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
36878 u8 tudata[585];
36879 int i;
36880
36881 + pax_track_stack();
36882 +
36883 dprintk("Firmware is %zd bytes\n",fw->size);
36884
36885 /* Get eprom data */
36886 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
36887 index 482d0f3..ee1e202 100644
36888 --- a/drivers/media/radio/radio-cadet.c
36889 +++ b/drivers/media/radio/radio-cadet.c
36890 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
36891 while (i < count && dev->rdsin != dev->rdsout)
36892 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
36893
36894 - if (copy_to_user(data, readbuf, i))
36895 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
36896 return -EFAULT;
36897 return i;
36898 }
36899 diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
36900 index 6dd51e2..0359b92 100644
36901 --- a/drivers/media/video/cx18/cx18-driver.c
36902 +++ b/drivers/media/video/cx18/cx18-driver.c
36903 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
36904
36905 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
36906
36907 -static atomic_t cx18_instance = ATOMIC_INIT(0);
36908 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
36909
36910 /* Parameter declarations */
36911 static int cardtype[CX18_MAX_CARDS];
36912 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
36913 struct i2c_client c;
36914 u8 eedata[256];
36915
36916 + pax_track_stack();
36917 +
36918 memset(&c, 0, sizeof(c));
36919 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
36920 c.adapter = &cx->i2c_adap[0];
36921 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
36922 struct cx18 *cx;
36923
36924 /* FIXME - module parameter arrays constrain max instances */
36925 - i = atomic_inc_return(&cx18_instance) - 1;
36926 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
36927 if (i >= CX18_MAX_CARDS) {
36928 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
36929 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
36930 diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
36931 index 463ec34..2f4625a 100644
36932 --- a/drivers/media/video/ivtv/ivtv-driver.c
36933 +++ b/drivers/media/video/ivtv/ivtv-driver.c
36934 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
36935 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
36936
36937 /* ivtv instance counter */
36938 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
36939 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
36940
36941 /* Parameter declarations */
36942 static int cardtype[IVTV_MAX_CARDS];
36943 diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
36944 index 5fc4ac0..652a54a 100644
36945 --- a/drivers/media/video/omap24xxcam.c
36946 +++ b/drivers/media/video/omap24xxcam.c
36947 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
36948 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
36949
36950 do_gettimeofday(&vb->ts);
36951 - vb->field_count = atomic_add_return(2, &fh->field_count);
36952 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
36953 if (csr & csr_error) {
36954 vb->state = VIDEOBUF_ERROR;
36955 if (!atomic_read(&fh->cam->in_reset)) {
36956 diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
36957 index 2ce67f5..cf26a5b 100644
36958 --- a/drivers/media/video/omap24xxcam.h
36959 +++ b/drivers/media/video/omap24xxcam.h
36960 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
36961 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
36962 struct videobuf_queue vbq;
36963 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
36964 - atomic_t field_count; /* field counter for videobuf_buffer */
36965 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
36966 /* accessing cam here doesn't need serialisation: it's constant */
36967 struct omap24xxcam_device *cam;
36968 };
36969 diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36970 index 299afa4..eb47459 100644
36971 --- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36972 +++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36973 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
36974 u8 *eeprom;
36975 struct tveeprom tvdata;
36976
36977 + pax_track_stack();
36978 +
36979 memset(&tvdata,0,sizeof(tvdata));
36980
36981 eeprom = pvr2_eeprom_fetch(hdw);
36982 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36983 index 5b152ff..3320638 100644
36984 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36985 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36986 @@ -195,7 +195,7 @@ struct pvr2_hdw {
36987
36988 /* I2C stuff */
36989 struct i2c_adapter i2c_adap;
36990 - struct i2c_algorithm i2c_algo;
36991 + i2c_algorithm_no_const i2c_algo;
36992 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
36993 int i2c_cx25840_hack_state;
36994 int i2c_linked;
36995 diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
36996 index 1eabff6..8e2313a 100644
36997 --- a/drivers/media/video/saa7134/saa6752hs.c
36998 +++ b/drivers/media/video/saa7134/saa6752hs.c
36999 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
37000 unsigned char localPAT[256];
37001 unsigned char localPMT[256];
37002
37003 + pax_track_stack();
37004 +
37005 /* Set video format - must be done first as it resets other settings */
37006 set_reg8(client, 0x41, h->video_format);
37007
37008 diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
37009 index 9c1d3ac..b1b49e9 100644
37010 --- a/drivers/media/video/saa7164/saa7164-cmd.c
37011 +++ b/drivers/media/video/saa7164/saa7164-cmd.c
37012 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
37013 wait_queue_head_t *q = 0;
37014 dprintk(DBGLVL_CMD, "%s()\n", __func__);
37015
37016 + pax_track_stack();
37017 +
37018 /* While any outstand message on the bus exists... */
37019 do {
37020
37021 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
37022 u8 tmp[512];
37023 dprintk(DBGLVL_CMD, "%s()\n", __func__);
37024
37025 + pax_track_stack();
37026 +
37027 while (loop) {
37028
37029 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
37030 diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
37031 index b085496..cde0270 100644
37032 --- a/drivers/media/video/usbvideo/ibmcam.c
37033 +++ b/drivers/media/video/usbvideo/ibmcam.c
37034 @@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
37035 static int __init ibmcam_init(void)
37036 {
37037 struct usbvideo_cb cbTbl;
37038 - memset(&cbTbl, 0, sizeof(cbTbl));
37039 - cbTbl.probe = ibmcam_probe;
37040 - cbTbl.setupOnOpen = ibmcam_setup_on_open;
37041 - cbTbl.videoStart = ibmcam_video_start;
37042 - cbTbl.videoStop = ibmcam_video_stop;
37043 - cbTbl.processData = ibmcam_ProcessIsocData;
37044 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37045 - cbTbl.adjustPicture = ibmcam_adjust_picture;
37046 - cbTbl.getFPS = ibmcam_calculate_fps;
37047 + memset((void *)&cbTbl, 0, sizeof(cbTbl));
37048 + *(void **)&cbTbl.probe = ibmcam_probe;
37049 + *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
37050 + *(void **)&cbTbl.videoStart = ibmcam_video_start;
37051 + *(void **)&cbTbl.videoStop = ibmcam_video_stop;
37052 + *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
37053 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37054 + *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
37055 + *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
37056 return usbvideo_register(
37057 &cams,
37058 MAX_IBMCAM,
37059 diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
37060 index 31d57f2..600b735 100644
37061 --- a/drivers/media/video/usbvideo/konicawc.c
37062 +++ b/drivers/media/video/usbvideo/konicawc.c
37063 @@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
37064 int error;
37065
37066 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
37067 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37068 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37069
37070 cam->input = input_dev = input_allocate_device();
37071 if (!input_dev) {
37072 @@ -935,16 +935,16 @@ static int __init konicawc_init(void)
37073 struct usbvideo_cb cbTbl;
37074 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
37075 DRIVER_DESC "\n");
37076 - memset(&cbTbl, 0, sizeof(cbTbl));
37077 - cbTbl.probe = konicawc_probe;
37078 - cbTbl.setupOnOpen = konicawc_setup_on_open;
37079 - cbTbl.processData = konicawc_process_isoc;
37080 - cbTbl.getFPS = konicawc_calculate_fps;
37081 - cbTbl.setVideoMode = konicawc_set_video_mode;
37082 - cbTbl.startDataPump = konicawc_start_data;
37083 - cbTbl.stopDataPump = konicawc_stop_data;
37084 - cbTbl.adjustPicture = konicawc_adjust_picture;
37085 - cbTbl.userFree = konicawc_free_uvd;
37086 + memset((void * )&cbTbl, 0, sizeof(cbTbl));
37087 + *(void **)&cbTbl.probe = konicawc_probe;
37088 + *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
37089 + *(void **)&cbTbl.processData = konicawc_process_isoc;
37090 + *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
37091 + *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
37092 + *(void **)&cbTbl.startDataPump = konicawc_start_data;
37093 + *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
37094 + *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
37095 + *(void **)&cbTbl.userFree = konicawc_free_uvd;
37096 return usbvideo_register(
37097 &cams,
37098 MAX_CAMERAS,
37099 diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
37100 index 803d3e4..c4d1b96 100644
37101 --- a/drivers/media/video/usbvideo/quickcam_messenger.c
37102 +++ b/drivers/media/video/usbvideo/quickcam_messenger.c
37103 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
37104 int error;
37105
37106 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
37107 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37108 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37109
37110 cam->input = input_dev = input_allocate_device();
37111 if (!input_dev) {
37112 diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
37113 index fbd1b63..292f9f0 100644
37114 --- a/drivers/media/video/usbvideo/ultracam.c
37115 +++ b/drivers/media/video/usbvideo/ultracam.c
37116 @@ -655,14 +655,14 @@ static int __init ultracam_init(void)
37117 {
37118 struct usbvideo_cb cbTbl;
37119 memset(&cbTbl, 0, sizeof(cbTbl));
37120 - cbTbl.probe = ultracam_probe;
37121 - cbTbl.setupOnOpen = ultracam_setup_on_open;
37122 - cbTbl.videoStart = ultracam_video_start;
37123 - cbTbl.videoStop = ultracam_video_stop;
37124 - cbTbl.processData = ultracam_ProcessIsocData;
37125 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37126 - cbTbl.adjustPicture = ultracam_adjust_picture;
37127 - cbTbl.getFPS = ultracam_calculate_fps;
37128 + *(void **)&cbTbl.probe = ultracam_probe;
37129 + *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
37130 + *(void **)&cbTbl.videoStart = ultracam_video_start;
37131 + *(void **)&cbTbl.videoStop = ultracam_video_stop;
37132 + *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
37133 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37134 + *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
37135 + *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
37136 return usbvideo_register(
37137 &cams,
37138 MAX_CAMERAS,
37139 diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
37140 index dea8b32..34f6878 100644
37141 --- a/drivers/media/video/usbvideo/usbvideo.c
37142 +++ b/drivers/media/video/usbvideo/usbvideo.c
37143 @@ -697,15 +697,15 @@ int usbvideo_register(
37144 __func__, cams, base_size, num_cams);
37145
37146 /* Copy callbacks, apply defaults for those that are not set */
37147 - memmove(&cams->cb, cbTbl, sizeof(cams->cb));
37148 + memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
37149 if (cams->cb.getFrame == NULL)
37150 - cams->cb.getFrame = usbvideo_GetFrame;
37151 + *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
37152 if (cams->cb.disconnect == NULL)
37153 - cams->cb.disconnect = usbvideo_Disconnect;
37154 + *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
37155 if (cams->cb.startDataPump == NULL)
37156 - cams->cb.startDataPump = usbvideo_StartDataPump;
37157 + *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
37158 if (cams->cb.stopDataPump == NULL)
37159 - cams->cb.stopDataPump = usbvideo_StopDataPump;
37160 + *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
37161
37162 cams->num_cameras = num_cams;
37163 cams->cam = (struct uvd *) &cams[1];
37164 diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
37165 index c66985b..7fa143a 100644
37166 --- a/drivers/media/video/usbvideo/usbvideo.h
37167 +++ b/drivers/media/video/usbvideo/usbvideo.h
37168 @@ -268,7 +268,7 @@ struct usbvideo_cb {
37169 int (*startDataPump)(struct uvd *uvd);
37170 void (*stopDataPump)(struct uvd *uvd);
37171 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
37172 -};
37173 +} __no_const;
37174
37175 struct usbvideo {
37176 int num_cameras; /* As allocated */
37177 diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
37178 index e0f91e4..37554ea 100644
37179 --- a/drivers/media/video/usbvision/usbvision-core.c
37180 +++ b/drivers/media/video/usbvision/usbvision-core.c
37181 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
37182 unsigned char rv, gv, bv;
37183 static unsigned char *Y, *U, *V;
37184
37185 + pax_track_stack();
37186 +
37187 frame = usbvision->curFrame;
37188 imageSize = frame->frmwidth * frame->frmheight;
37189 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
37190 diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
37191 index 0d06e7c..3d17d24 100644
37192 --- a/drivers/media/video/v4l2-device.c
37193 +++ b/drivers/media/video/v4l2-device.c
37194 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
37195 EXPORT_SYMBOL_GPL(v4l2_device_register);
37196
37197 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
37198 - atomic_t *instance)
37199 + atomic_unchecked_t *instance)
37200 {
37201 - int num = atomic_inc_return(instance) - 1;
37202 + int num = atomic_inc_return_unchecked(instance) - 1;
37203 int len = strlen(basename);
37204
37205 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
37206 diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
37207 index 032ebae..6a3532c 100644
37208 --- a/drivers/media/video/videobuf-dma-sg.c
37209 +++ b/drivers/media/video/videobuf-dma-sg.c
37210 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
37211 {
37212 struct videobuf_queue q;
37213
37214 + pax_track_stack();
37215 +
37216 /* Required to make generic handler to call __videobuf_alloc */
37217 q.int_ops = &sg_ops;
37218
37219 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
37220 index b6992b7..9fa7547 100644
37221 --- a/drivers/message/fusion/mptbase.c
37222 +++ b/drivers/message/fusion/mptbase.c
37223 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
37224 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
37225 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
37226
37227 +#ifdef CONFIG_GRKERNSEC_HIDESYM
37228 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
37229 + NULL, NULL);
37230 +#else
37231 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
37232 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
37233 +#endif
37234 +
37235 /*
37236 * Rounding UP to nearest 4-kB boundary here...
37237 */
37238 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
37239 index 83873e3..e360e9a 100644
37240 --- a/drivers/message/fusion/mptsas.c
37241 +++ b/drivers/message/fusion/mptsas.c
37242 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
37243 return 0;
37244 }
37245
37246 +static inline void
37247 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
37248 +{
37249 + if (phy_info->port_details) {
37250 + phy_info->port_details->rphy = rphy;
37251 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
37252 + ioc->name, rphy));
37253 + }
37254 +
37255 + if (rphy) {
37256 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
37257 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
37258 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
37259 + ioc->name, rphy, rphy->dev.release));
37260 + }
37261 +}
37262 +
37263 /* no mutex */
37264 static void
37265 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
37266 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
37267 return NULL;
37268 }
37269
37270 -static inline void
37271 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
37272 -{
37273 - if (phy_info->port_details) {
37274 - phy_info->port_details->rphy = rphy;
37275 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
37276 - ioc->name, rphy));
37277 - }
37278 -
37279 - if (rphy) {
37280 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
37281 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
37282 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
37283 - ioc->name, rphy, rphy->dev.release));
37284 - }
37285 -}
37286 -
37287 static inline struct sas_port *
37288 mptsas_get_port(struct mptsas_phyinfo *phy_info)
37289 {
37290 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
37291 index bd096ca..332cf76 100644
37292 --- a/drivers/message/fusion/mptscsih.c
37293 +++ b/drivers/message/fusion/mptscsih.c
37294 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
37295
37296 h = shost_priv(SChost);
37297
37298 - if (h) {
37299 - if (h->info_kbuf == NULL)
37300 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37301 - return h->info_kbuf;
37302 - h->info_kbuf[0] = '\0';
37303 + if (!h)
37304 + return NULL;
37305
37306 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37307 - h->info_kbuf[size-1] = '\0';
37308 - }
37309 + if (h->info_kbuf == NULL)
37310 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37311 + return h->info_kbuf;
37312 + h->info_kbuf[0] = '\0';
37313 +
37314 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37315 + h->info_kbuf[size-1] = '\0';
37316
37317 return h->info_kbuf;
37318 }
37319 diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
37320 index efba702..59b2c0f 100644
37321 --- a/drivers/message/i2o/i2o_config.c
37322 +++ b/drivers/message/i2o/i2o_config.c
37323 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
37324 struct i2o_message *msg;
37325 unsigned int iop;
37326
37327 + pax_track_stack();
37328 +
37329 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
37330 return -EFAULT;
37331
37332 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
37333 index 7045c45..c07b170 100644
37334 --- a/drivers/message/i2o/i2o_proc.c
37335 +++ b/drivers/message/i2o/i2o_proc.c
37336 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
37337 "Array Controller Device"
37338 };
37339
37340 -static char *chtostr(u8 * chars, int n)
37341 -{
37342 - char tmp[256];
37343 - tmp[0] = 0;
37344 - return strncat(tmp, (char *)chars, n);
37345 -}
37346 -
37347 static int i2o_report_query_status(struct seq_file *seq, int block_status,
37348 char *group)
37349 {
37350 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
37351
37352 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
37353 seq_printf(seq, "%-#8x", ddm_table.module_id);
37354 - seq_printf(seq, "%-29s",
37355 - chtostr(ddm_table.module_name_version, 28));
37356 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
37357 seq_printf(seq, "%9d ", ddm_table.data_size);
37358 seq_printf(seq, "%8d", ddm_table.code_size);
37359
37360 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
37361
37362 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
37363 seq_printf(seq, "%-#8x", dst->module_id);
37364 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
37365 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
37366 + seq_printf(seq, "%-.28s", dst->module_name_version);
37367 + seq_printf(seq, "%-.8s", dst->date);
37368 seq_printf(seq, "%8d ", dst->module_size);
37369 seq_printf(seq, "%8d ", dst->mpb_size);
37370 seq_printf(seq, "0x%04x", dst->module_flags);
37371 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
37372 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
37373 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
37374 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
37375 - seq_printf(seq, "Vendor info : %s\n",
37376 - chtostr((u8 *) (work32 + 2), 16));
37377 - seq_printf(seq, "Product info : %s\n",
37378 - chtostr((u8 *) (work32 + 6), 16));
37379 - seq_printf(seq, "Description : %s\n",
37380 - chtostr((u8 *) (work32 + 10), 16));
37381 - seq_printf(seq, "Product rev. : %s\n",
37382 - chtostr((u8 *) (work32 + 14), 8));
37383 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
37384 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
37385 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
37386 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
37387
37388 seq_printf(seq, "Serial number : ");
37389 print_serial_number(seq, (u8 *) (work32 + 16),
37390 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
37391 }
37392
37393 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
37394 - seq_printf(seq, "Module name : %s\n",
37395 - chtostr(result.module_name, 24));
37396 - seq_printf(seq, "Module revision : %s\n",
37397 - chtostr(result.module_rev, 8));
37398 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
37399 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
37400
37401 seq_printf(seq, "Serial number : ");
37402 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
37403 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
37404 return 0;
37405 }
37406
37407 - seq_printf(seq, "Device name : %s\n",
37408 - chtostr(result.device_name, 64));
37409 - seq_printf(seq, "Service name : %s\n",
37410 - chtostr(result.service_name, 64));
37411 - seq_printf(seq, "Physical name : %s\n",
37412 - chtostr(result.physical_location, 64));
37413 - seq_printf(seq, "Instance number : %s\n",
37414 - chtostr(result.instance_number, 4));
37415 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
37416 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
37417 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
37418 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
37419
37420 return 0;
37421 }
37422 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
37423 index 27cf4af..b1205b8 100644
37424 --- a/drivers/message/i2o/iop.c
37425 +++ b/drivers/message/i2o/iop.c
37426 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
37427
37428 spin_lock_irqsave(&c->context_list_lock, flags);
37429
37430 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
37431 - atomic_inc(&c->context_list_counter);
37432 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
37433 + atomic_inc_unchecked(&c->context_list_counter);
37434
37435 - entry->context = atomic_read(&c->context_list_counter);
37436 + entry->context = atomic_read_unchecked(&c->context_list_counter);
37437
37438 list_add(&entry->list, &c->context_list);
37439
37440 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
37441
37442 #if BITS_PER_LONG == 64
37443 spin_lock_init(&c->context_list_lock);
37444 - atomic_set(&c->context_list_counter, 0);
37445 + atomic_set_unchecked(&c->context_list_counter, 0);
37446 INIT_LIST_HEAD(&c->context_list);
37447 #endif
37448
37449 diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
37450 index 78e3e85..66c9a0d 100644
37451 --- a/drivers/mfd/ab3100-core.c
37452 +++ b/drivers/mfd/ab3100-core.c
37453 @@ -777,7 +777,7 @@ struct ab_family_id {
37454 char *name;
37455 };
37456
37457 -static const struct ab_family_id ids[] __initdata = {
37458 +static const struct ab_family_id ids[] __initconst = {
37459 /* AB3100 */
37460 {
37461 .id = 0xc0,
37462 diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
37463 index 8d8c932..8104515 100644
37464 --- a/drivers/mfd/wm8350-i2c.c
37465 +++ b/drivers/mfd/wm8350-i2c.c
37466 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
37467 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
37468 int ret;
37469
37470 + pax_track_stack();
37471 +
37472 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
37473 return -EINVAL;
37474
37475 diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
37476 index e4ff50b..4cc3f04 100644
37477 --- a/drivers/misc/kgdbts.c
37478 +++ b/drivers/misc/kgdbts.c
37479 @@ -118,7 +118,7 @@
37480 } while (0)
37481 #define MAX_CONFIG_LEN 40
37482
37483 -static struct kgdb_io kgdbts_io_ops;
37484 +static const struct kgdb_io kgdbts_io_ops;
37485 static char get_buf[BUFMAX];
37486 static int get_buf_cnt;
37487 static char put_buf[BUFMAX];
37488 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
37489 module_put(THIS_MODULE);
37490 }
37491
37492 -static struct kgdb_io kgdbts_io_ops = {
37493 +static const struct kgdb_io kgdbts_io_ops = {
37494 .name = "kgdbts",
37495 .read_char = kgdbts_get_char,
37496 .write_char = kgdbts_put_char,
37497 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
37498 index 37e7cfc..67cfb76 100644
37499 --- a/drivers/misc/sgi-gru/gruhandles.c
37500 +++ b/drivers/misc/sgi-gru/gruhandles.c
37501 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37502
37503 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
37504 {
37505 - atomic_long_inc(&mcs_op_statistics[op].count);
37506 - atomic_long_add(clks, &mcs_op_statistics[op].total);
37507 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
37508 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
37509 if (mcs_op_statistics[op].max < clks)
37510 mcs_op_statistics[op].max = clks;
37511 }
37512 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
37513 index 3f2375c..467c6e6 100644
37514 --- a/drivers/misc/sgi-gru/gruprocfs.c
37515 +++ b/drivers/misc/sgi-gru/gruprocfs.c
37516 @@ -32,9 +32,9 @@
37517
37518 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
37519
37520 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
37521 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
37522 {
37523 - unsigned long val = atomic_long_read(v);
37524 + unsigned long val = atomic_long_read_unchecked(v);
37525
37526 if (val)
37527 seq_printf(s, "%16lu %s\n", val, id);
37528 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
37529 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
37530
37531 for (op = 0; op < mcsop_last; op++) {
37532 - count = atomic_long_read(&mcs_op_statistics[op].count);
37533 - total = atomic_long_read(&mcs_op_statistics[op].total);
37534 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
37535 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
37536 max = mcs_op_statistics[op].max;
37537 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
37538 count ? total / count : 0, max);
37539 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
37540 index 46990bc..4a251b5 100644
37541 --- a/drivers/misc/sgi-gru/grutables.h
37542 +++ b/drivers/misc/sgi-gru/grutables.h
37543 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
37544 * GRU statistics.
37545 */
37546 struct gru_stats_s {
37547 - atomic_long_t vdata_alloc;
37548 - atomic_long_t vdata_free;
37549 - atomic_long_t gts_alloc;
37550 - atomic_long_t gts_free;
37551 - atomic_long_t vdata_double_alloc;
37552 - atomic_long_t gts_double_allocate;
37553 - atomic_long_t assign_context;
37554 - atomic_long_t assign_context_failed;
37555 - atomic_long_t free_context;
37556 - atomic_long_t load_user_context;
37557 - atomic_long_t load_kernel_context;
37558 - atomic_long_t lock_kernel_context;
37559 - atomic_long_t unlock_kernel_context;
37560 - atomic_long_t steal_user_context;
37561 - atomic_long_t steal_kernel_context;
37562 - atomic_long_t steal_context_failed;
37563 - atomic_long_t nopfn;
37564 - atomic_long_t break_cow;
37565 - atomic_long_t asid_new;
37566 - atomic_long_t asid_next;
37567 - atomic_long_t asid_wrap;
37568 - atomic_long_t asid_reuse;
37569 - atomic_long_t intr;
37570 - atomic_long_t intr_mm_lock_failed;
37571 - atomic_long_t call_os;
37572 - atomic_long_t call_os_offnode_reference;
37573 - atomic_long_t call_os_check_for_bug;
37574 - atomic_long_t call_os_wait_queue;
37575 - atomic_long_t user_flush_tlb;
37576 - atomic_long_t user_unload_context;
37577 - atomic_long_t user_exception;
37578 - atomic_long_t set_context_option;
37579 - atomic_long_t migrate_check;
37580 - atomic_long_t migrated_retarget;
37581 - atomic_long_t migrated_unload;
37582 - atomic_long_t migrated_unload_delay;
37583 - atomic_long_t migrated_nopfn_retarget;
37584 - atomic_long_t migrated_nopfn_unload;
37585 - atomic_long_t tlb_dropin;
37586 - atomic_long_t tlb_dropin_fail_no_asid;
37587 - atomic_long_t tlb_dropin_fail_upm;
37588 - atomic_long_t tlb_dropin_fail_invalid;
37589 - atomic_long_t tlb_dropin_fail_range_active;
37590 - atomic_long_t tlb_dropin_fail_idle;
37591 - atomic_long_t tlb_dropin_fail_fmm;
37592 - atomic_long_t tlb_dropin_fail_no_exception;
37593 - atomic_long_t tlb_dropin_fail_no_exception_war;
37594 - atomic_long_t tfh_stale_on_fault;
37595 - atomic_long_t mmu_invalidate_range;
37596 - atomic_long_t mmu_invalidate_page;
37597 - atomic_long_t mmu_clear_flush_young;
37598 - atomic_long_t flush_tlb;
37599 - atomic_long_t flush_tlb_gru;
37600 - atomic_long_t flush_tlb_gru_tgh;
37601 - atomic_long_t flush_tlb_gru_zero_asid;
37602 + atomic_long_unchecked_t vdata_alloc;
37603 + atomic_long_unchecked_t vdata_free;
37604 + atomic_long_unchecked_t gts_alloc;
37605 + atomic_long_unchecked_t gts_free;
37606 + atomic_long_unchecked_t vdata_double_alloc;
37607 + atomic_long_unchecked_t gts_double_allocate;
37608 + atomic_long_unchecked_t assign_context;
37609 + atomic_long_unchecked_t assign_context_failed;
37610 + atomic_long_unchecked_t free_context;
37611 + atomic_long_unchecked_t load_user_context;
37612 + atomic_long_unchecked_t load_kernel_context;
37613 + atomic_long_unchecked_t lock_kernel_context;
37614 + atomic_long_unchecked_t unlock_kernel_context;
37615 + atomic_long_unchecked_t steal_user_context;
37616 + atomic_long_unchecked_t steal_kernel_context;
37617 + atomic_long_unchecked_t steal_context_failed;
37618 + atomic_long_unchecked_t nopfn;
37619 + atomic_long_unchecked_t break_cow;
37620 + atomic_long_unchecked_t asid_new;
37621 + atomic_long_unchecked_t asid_next;
37622 + atomic_long_unchecked_t asid_wrap;
37623 + atomic_long_unchecked_t asid_reuse;
37624 + atomic_long_unchecked_t intr;
37625 + atomic_long_unchecked_t intr_mm_lock_failed;
37626 + atomic_long_unchecked_t call_os;
37627 + atomic_long_unchecked_t call_os_offnode_reference;
37628 + atomic_long_unchecked_t call_os_check_for_bug;
37629 + atomic_long_unchecked_t call_os_wait_queue;
37630 + atomic_long_unchecked_t user_flush_tlb;
37631 + atomic_long_unchecked_t user_unload_context;
37632 + atomic_long_unchecked_t user_exception;
37633 + atomic_long_unchecked_t set_context_option;
37634 + atomic_long_unchecked_t migrate_check;
37635 + atomic_long_unchecked_t migrated_retarget;
37636 + atomic_long_unchecked_t migrated_unload;
37637 + atomic_long_unchecked_t migrated_unload_delay;
37638 + atomic_long_unchecked_t migrated_nopfn_retarget;
37639 + atomic_long_unchecked_t migrated_nopfn_unload;
37640 + atomic_long_unchecked_t tlb_dropin;
37641 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
37642 + atomic_long_unchecked_t tlb_dropin_fail_upm;
37643 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
37644 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
37645 + atomic_long_unchecked_t tlb_dropin_fail_idle;
37646 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
37647 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
37648 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
37649 + atomic_long_unchecked_t tfh_stale_on_fault;
37650 + atomic_long_unchecked_t mmu_invalidate_range;
37651 + atomic_long_unchecked_t mmu_invalidate_page;
37652 + atomic_long_unchecked_t mmu_clear_flush_young;
37653 + atomic_long_unchecked_t flush_tlb;
37654 + atomic_long_unchecked_t flush_tlb_gru;
37655 + atomic_long_unchecked_t flush_tlb_gru_tgh;
37656 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
37657
37658 - atomic_long_t copy_gpa;
37659 + atomic_long_unchecked_t copy_gpa;
37660
37661 - atomic_long_t mesq_receive;
37662 - atomic_long_t mesq_receive_none;
37663 - atomic_long_t mesq_send;
37664 - atomic_long_t mesq_send_failed;
37665 - atomic_long_t mesq_noop;
37666 - atomic_long_t mesq_send_unexpected_error;
37667 - atomic_long_t mesq_send_lb_overflow;
37668 - atomic_long_t mesq_send_qlimit_reached;
37669 - atomic_long_t mesq_send_amo_nacked;
37670 - atomic_long_t mesq_send_put_nacked;
37671 - atomic_long_t mesq_qf_not_full;
37672 - atomic_long_t mesq_qf_locked;
37673 - atomic_long_t mesq_qf_noop_not_full;
37674 - atomic_long_t mesq_qf_switch_head_failed;
37675 - atomic_long_t mesq_qf_unexpected_error;
37676 - atomic_long_t mesq_noop_unexpected_error;
37677 - atomic_long_t mesq_noop_lb_overflow;
37678 - atomic_long_t mesq_noop_qlimit_reached;
37679 - atomic_long_t mesq_noop_amo_nacked;
37680 - atomic_long_t mesq_noop_put_nacked;
37681 + atomic_long_unchecked_t mesq_receive;
37682 + atomic_long_unchecked_t mesq_receive_none;
37683 + atomic_long_unchecked_t mesq_send;
37684 + atomic_long_unchecked_t mesq_send_failed;
37685 + atomic_long_unchecked_t mesq_noop;
37686 + atomic_long_unchecked_t mesq_send_unexpected_error;
37687 + atomic_long_unchecked_t mesq_send_lb_overflow;
37688 + atomic_long_unchecked_t mesq_send_qlimit_reached;
37689 + atomic_long_unchecked_t mesq_send_amo_nacked;
37690 + atomic_long_unchecked_t mesq_send_put_nacked;
37691 + atomic_long_unchecked_t mesq_qf_not_full;
37692 + atomic_long_unchecked_t mesq_qf_locked;
37693 + atomic_long_unchecked_t mesq_qf_noop_not_full;
37694 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
37695 + atomic_long_unchecked_t mesq_qf_unexpected_error;
37696 + atomic_long_unchecked_t mesq_noop_unexpected_error;
37697 + atomic_long_unchecked_t mesq_noop_lb_overflow;
37698 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
37699 + atomic_long_unchecked_t mesq_noop_amo_nacked;
37700 + atomic_long_unchecked_t mesq_noop_put_nacked;
37701
37702 };
37703
37704 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
37705 cchop_deallocate, tghop_invalidate, mcsop_last};
37706
37707 struct mcs_op_statistic {
37708 - atomic_long_t count;
37709 - atomic_long_t total;
37710 + atomic_long_unchecked_t count;
37711 + atomic_long_unchecked_t total;
37712 unsigned long max;
37713 };
37714
37715 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37716
37717 #define STAT(id) do { \
37718 if (gru_options & OPT_STATS) \
37719 - atomic_long_inc(&gru_stats.id); \
37720 + atomic_long_inc_unchecked(&gru_stats.id); \
37721 } while (0)
37722
37723 #ifdef CONFIG_SGI_GRU_DEBUG
37724 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
37725 index 2275126..12a9dbfb 100644
37726 --- a/drivers/misc/sgi-xp/xp.h
37727 +++ b/drivers/misc/sgi-xp/xp.h
37728 @@ -289,7 +289,7 @@ struct xpc_interface {
37729 xpc_notify_func, void *);
37730 void (*received) (short, int, void *);
37731 enum xp_retval (*partid_to_nasids) (short, void *);
37732 -};
37733 +} __no_const;
37734
37735 extern struct xpc_interface xpc_interface;
37736
37737 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
37738 index b94d5f7..7f494c5 100644
37739 --- a/drivers/misc/sgi-xp/xpc.h
37740 +++ b/drivers/misc/sgi-xp/xpc.h
37741 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
37742 void (*received_payload) (struct xpc_channel *, void *);
37743 void (*notify_senders_of_disconnect) (struct xpc_channel *);
37744 };
37745 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
37746
37747 /* struct xpc_partition act_state values (for XPC HB) */
37748
37749 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
37750 /* found in xpc_main.c */
37751 extern struct device *xpc_part;
37752 extern struct device *xpc_chan;
37753 -extern struct xpc_arch_operations xpc_arch_ops;
37754 +extern xpc_arch_operations_no_const xpc_arch_ops;
37755 extern int xpc_disengage_timelimit;
37756 extern int xpc_disengage_timedout;
37757 extern int xpc_activate_IRQ_rcvd;
37758 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
37759 index fd3688a..7e211a4 100644
37760 --- a/drivers/misc/sgi-xp/xpc_main.c
37761 +++ b/drivers/misc/sgi-xp/xpc_main.c
37762 @@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
37763 .notifier_call = xpc_system_die,
37764 };
37765
37766 -struct xpc_arch_operations xpc_arch_ops;
37767 +xpc_arch_operations_no_const xpc_arch_ops;
37768
37769 /*
37770 * Timer function to enforce the timelimit on the partition disengage.
37771 diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
37772 index 8b70e03..700bda6 100644
37773 --- a/drivers/misc/sgi-xp/xpc_sn2.c
37774 +++ b/drivers/misc/sgi-xp/xpc_sn2.c
37775 @@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
37776 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
37777 }
37778
37779 -static struct xpc_arch_operations xpc_arch_ops_sn2 = {
37780 +static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
37781 .setup_partitions = xpc_setup_partitions_sn2,
37782 .teardown_partitions = xpc_teardown_partitions_sn2,
37783 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
37784 @@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
37785 int ret;
37786 size_t buf_size;
37787
37788 - xpc_arch_ops = xpc_arch_ops_sn2;
37789 + pax_open_kernel();
37790 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
37791 + pax_close_kernel();
37792
37793 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
37794 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
37795 diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
37796 index 8e08d71..7cb8c9b 100644
37797 --- a/drivers/misc/sgi-xp/xpc_uv.c
37798 +++ b/drivers/misc/sgi-xp/xpc_uv.c
37799 @@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
37800 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
37801 }
37802
37803 -static struct xpc_arch_operations xpc_arch_ops_uv = {
37804 +static const struct xpc_arch_operations xpc_arch_ops_uv = {
37805 .setup_partitions = xpc_setup_partitions_uv,
37806 .teardown_partitions = xpc_teardown_partitions_uv,
37807 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
37808 @@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
37809 int
37810 xpc_init_uv(void)
37811 {
37812 - xpc_arch_ops = xpc_arch_ops_uv;
37813 + pax_open_kernel();
37814 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
37815 + pax_close_kernel();
37816
37817 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
37818 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
37819 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
37820 index 6fd20b42..650efe3 100644
37821 --- a/drivers/mmc/host/sdhci-pci.c
37822 +++ b/drivers/mmc/host/sdhci-pci.c
37823 @@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
37824 .probe = via_probe,
37825 };
37826
37827 -static const struct pci_device_id pci_ids[] __devinitdata = {
37828 +static const struct pci_device_id pci_ids[] __devinitconst = {
37829 {
37830 .vendor = PCI_VENDOR_ID_RICOH,
37831 .device = PCI_DEVICE_ID_RICOH_R5C822,
37832 diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
37833 index e7563a9..5f90ce5 100644
37834 --- a/drivers/mtd/chips/cfi_cmdset_0001.c
37835 +++ b/drivers/mtd/chips/cfi_cmdset_0001.c
37836 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
37837 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
37838 unsigned long timeo = jiffies + HZ;
37839
37840 + pax_track_stack();
37841 +
37842 /* Prevent setting state FL_SYNCING for chip in suspended state. */
37843 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
37844 goto sleep;
37845 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
37846 unsigned long initial_adr;
37847 int initial_len = len;
37848
37849 + pax_track_stack();
37850 +
37851 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
37852 adr += chip->start;
37853 initial_adr = adr;
37854 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
37855 int retries = 3;
37856 int ret;
37857
37858 + pax_track_stack();
37859 +
37860 adr += chip->start;
37861
37862 retry:
37863 diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
37864 index 0667a67..3ab97ed 100644
37865 --- a/drivers/mtd/chips/cfi_cmdset_0020.c
37866 +++ b/drivers/mtd/chips/cfi_cmdset_0020.c
37867 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
37868 unsigned long cmd_addr;
37869 struct cfi_private *cfi = map->fldrv_priv;
37870
37871 + pax_track_stack();
37872 +
37873 adr += chip->start;
37874
37875 /* Ensure cmd read/writes are aligned. */
37876 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
37877 DECLARE_WAITQUEUE(wait, current);
37878 int wbufsize, z;
37879
37880 + pax_track_stack();
37881 +
37882 /* M58LW064A requires bus alignment for buffer wriets -- saw */
37883 if (adr & (map_bankwidth(map)-1))
37884 return -EINVAL;
37885 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
37886 DECLARE_WAITQUEUE(wait, current);
37887 int ret = 0;
37888
37889 + pax_track_stack();
37890 +
37891 adr += chip->start;
37892
37893 /* Let's determine this according to the interleave only once */
37894 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
37895 unsigned long timeo = jiffies + HZ;
37896 DECLARE_WAITQUEUE(wait, current);
37897
37898 + pax_track_stack();
37899 +
37900 adr += chip->start;
37901
37902 /* Let's determine this according to the interleave only once */
37903 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
37904 unsigned long timeo = jiffies + HZ;
37905 DECLARE_WAITQUEUE(wait, current);
37906
37907 + pax_track_stack();
37908 +
37909 adr += chip->start;
37910
37911 /* Let's determine this according to the interleave only once */
37912 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
37913 index 5bf5f46..c5de373 100644
37914 --- a/drivers/mtd/devices/doc2000.c
37915 +++ b/drivers/mtd/devices/doc2000.c
37916 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
37917
37918 /* The ECC will not be calculated correctly if less than 512 is written */
37919 /* DBB-
37920 - if (len != 0x200 && eccbuf)
37921 + if (len != 0x200)
37922 printk(KERN_WARNING
37923 "ECC needs a full sector write (adr: %lx size %lx)\n",
37924 (long) to, (long) len);
37925 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
37926 index 0990f78..bb4e8a4 100644
37927 --- a/drivers/mtd/devices/doc2001.c
37928 +++ b/drivers/mtd/devices/doc2001.c
37929 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
37930 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
37931
37932 /* Don't allow read past end of device */
37933 - if (from >= this->totlen)
37934 + if (from >= this->totlen || !len)
37935 return -EINVAL;
37936
37937 /* Don't allow a single read to cross a 512-byte block boundary */
37938 diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
37939 index e56d6b4..f07e6cf 100644
37940 --- a/drivers/mtd/ftl.c
37941 +++ b/drivers/mtd/ftl.c
37942 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
37943 loff_t offset;
37944 uint16_t srcunitswap = cpu_to_le16(srcunit);
37945
37946 + pax_track_stack();
37947 +
37948 eun = &part->EUNInfo[srcunit];
37949 xfer = &part->XferInfo[xferunit];
37950 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
37951 diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
37952 index 8aca552..146446e 100755
37953 --- a/drivers/mtd/inftlcore.c
37954 +++ b/drivers/mtd/inftlcore.c
37955 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
37956 struct inftl_oob oob;
37957 size_t retlen;
37958
37959 + pax_track_stack();
37960 +
37961 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
37962 "pending=%d)\n", inftl, thisVUC, pendingblock);
37963
37964 diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
37965 index 32e82ae..ed50953 100644
37966 --- a/drivers/mtd/inftlmount.c
37967 +++ b/drivers/mtd/inftlmount.c
37968 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
37969 struct INFTLPartition *ip;
37970 size_t retlen;
37971
37972 + pax_track_stack();
37973 +
37974 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
37975
37976 /*
37977 diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
37978 index 79bf40f..fe5f8fd 100644
37979 --- a/drivers/mtd/lpddr/qinfo_probe.c
37980 +++ b/drivers/mtd/lpddr/qinfo_probe.c
37981 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
37982 {
37983 map_word pfow_val[4];
37984
37985 + pax_track_stack();
37986 +
37987 /* Check identification string */
37988 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
37989 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
37990 diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
37991 index 726a1b8..f46b460 100644
37992 --- a/drivers/mtd/mtdchar.c
37993 +++ b/drivers/mtd/mtdchar.c
37994 @@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
37995 u_long size;
37996 struct mtd_info_user info;
37997
37998 + pax_track_stack();
37999 +
38000 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
38001
38002 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
38003 diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
38004 index 1002e18..26d82d5 100644
38005 --- a/drivers/mtd/nftlcore.c
38006 +++ b/drivers/mtd/nftlcore.c
38007 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
38008 int inplace = 1;
38009 size_t retlen;
38010
38011 + pax_track_stack();
38012 +
38013 memset(BlockMap, 0xff, sizeof(BlockMap));
38014 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
38015
38016 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
38017 index 8b22b18..6fada85 100644
38018 --- a/drivers/mtd/nftlmount.c
38019 +++ b/drivers/mtd/nftlmount.c
38020 @@ -23,6 +23,7 @@
38021 #include <asm/errno.h>
38022 #include <linux/delay.h>
38023 #include <linux/slab.h>
38024 +#include <linux/sched.h>
38025 #include <linux/mtd/mtd.h>
38026 #include <linux/mtd/nand.h>
38027 #include <linux/mtd/nftl.h>
38028 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
38029 struct mtd_info *mtd = nftl->mbd.mtd;
38030 unsigned int i;
38031
38032 + pax_track_stack();
38033 +
38034 /* Assume logical EraseSize == physical erasesize for starting the scan.
38035 We'll sort it out later if we find a MediaHeader which says otherwise */
38036 /* Actually, we won't. The new DiskOnChip driver has already scanned
38037 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
38038 index 14cec04..d775b87 100644
38039 --- a/drivers/mtd/ubi/build.c
38040 +++ b/drivers/mtd/ubi/build.c
38041 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
38042 static int __init bytes_str_to_int(const char *str)
38043 {
38044 char *endp;
38045 - unsigned long result;
38046 + unsigned long result, scale = 1;
38047
38048 result = simple_strtoul(str, &endp, 0);
38049 if (str == endp || result >= INT_MAX) {
38050 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
38051
38052 switch (*endp) {
38053 case 'G':
38054 - result *= 1024;
38055 + scale *= 1024;
38056 case 'M':
38057 - result *= 1024;
38058 + scale *= 1024;
38059 case 'K':
38060 - result *= 1024;
38061 + scale *= 1024;
38062 if (endp[1] == 'i' && endp[2] == 'B')
38063 endp += 2;
38064 case '\0':
38065 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
38066 return -EINVAL;
38067 }
38068
38069 - return result;
38070 + if ((intoverflow_t)result*scale >= INT_MAX) {
38071 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
38072 + str);
38073 + return -EINVAL;
38074 + }
38075 +
38076 + return result*scale;
38077 }
38078
38079 /**
38080 diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
38081 index ab68886..ca405e8 100644
38082 --- a/drivers/net/atlx/atl2.c
38083 +++ b/drivers/net/atlx/atl2.c
38084 @@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
38085 */
38086
38087 #define ATL2_PARAM(X, desc) \
38088 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
38089 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
38090 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
38091 MODULE_PARM_DESC(X, desc);
38092 #else
38093 diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
38094 index 4874b2b..67f8526 100644
38095 --- a/drivers/net/bnx2.c
38096 +++ b/drivers/net/bnx2.c
38097 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
38098 int rc = 0;
38099 u32 magic, csum;
38100
38101 + pax_track_stack();
38102 +
38103 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
38104 goto test_nvram_done;
38105
38106 diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
38107 index fd3eb07..8a6978d 100644
38108 --- a/drivers/net/cxgb3/l2t.h
38109 +++ b/drivers/net/cxgb3/l2t.h
38110 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
38111 */
38112 struct l2t_skb_cb {
38113 arp_failure_handler_func arp_failure_handler;
38114 -};
38115 +} __no_const;
38116
38117 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
38118
38119 diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
38120 index 032cfe0..411af379 100644
38121 --- a/drivers/net/cxgb3/t3_hw.c
38122 +++ b/drivers/net/cxgb3/t3_hw.c
38123 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
38124 int i, addr, ret;
38125 struct t3_vpd vpd;
38126
38127 + pax_track_stack();
38128 +
38129 /*
38130 * Card information is normally at VPD_BASE but some early cards had
38131 * it at 0.
38132 diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
38133 index d1e0563..b9e129c 100644
38134 --- a/drivers/net/e1000e/82571.c
38135 +++ b/drivers/net/e1000e/82571.c
38136 @@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
38137 {
38138 struct e1000_hw *hw = &adapter->hw;
38139 struct e1000_mac_info *mac = &hw->mac;
38140 - struct e1000_mac_operations *func = &mac->ops;
38141 + e1000_mac_operations_no_const *func = &mac->ops;
38142 u32 swsm = 0;
38143 u32 swsm2 = 0;
38144 bool force_clear_smbi = false;
38145 @@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
38146 temp = er32(ICRXDMTC);
38147 }
38148
38149 -static struct e1000_mac_operations e82571_mac_ops = {
38150 +static const struct e1000_mac_operations e82571_mac_ops = {
38151 /* .check_mng_mode: mac type dependent */
38152 /* .check_for_link: media type dependent */
38153 .id_led_init = e1000e_id_led_init,
38154 @@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
38155 .setup_led = e1000e_setup_led_generic,
38156 };
38157
38158 -static struct e1000_phy_operations e82_phy_ops_igp = {
38159 +static const struct e1000_phy_operations e82_phy_ops_igp = {
38160 .acquire_phy = e1000_get_hw_semaphore_82571,
38161 .check_reset_block = e1000e_check_reset_block_generic,
38162 .commit_phy = NULL,
38163 @@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
38164 .cfg_on_link_up = NULL,
38165 };
38166
38167 -static struct e1000_phy_operations e82_phy_ops_m88 = {
38168 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
38169 .acquire_phy = e1000_get_hw_semaphore_82571,
38170 .check_reset_block = e1000e_check_reset_block_generic,
38171 .commit_phy = e1000e_phy_sw_reset,
38172 @@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
38173 .cfg_on_link_up = NULL,
38174 };
38175
38176 -static struct e1000_phy_operations e82_phy_ops_bm = {
38177 +static const struct e1000_phy_operations e82_phy_ops_bm = {
38178 .acquire_phy = e1000_get_hw_semaphore_82571,
38179 .check_reset_block = e1000e_check_reset_block_generic,
38180 .commit_phy = e1000e_phy_sw_reset,
38181 @@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
38182 .cfg_on_link_up = NULL,
38183 };
38184
38185 -static struct e1000_nvm_operations e82571_nvm_ops = {
38186 +static const struct e1000_nvm_operations e82571_nvm_ops = {
38187 .acquire_nvm = e1000_acquire_nvm_82571,
38188 .read_nvm = e1000e_read_nvm_eerd,
38189 .release_nvm = e1000_release_nvm_82571,
38190 diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
38191 index 47db9bd..fa58ccd 100644
38192 --- a/drivers/net/e1000e/e1000.h
38193 +++ b/drivers/net/e1000e/e1000.h
38194 @@ -375,9 +375,9 @@ struct e1000_info {
38195 u32 pba;
38196 u32 max_hw_frame_size;
38197 s32 (*get_variants)(struct e1000_adapter *);
38198 - struct e1000_mac_operations *mac_ops;
38199 - struct e1000_phy_operations *phy_ops;
38200 - struct e1000_nvm_operations *nvm_ops;
38201 + const struct e1000_mac_operations *mac_ops;
38202 + const struct e1000_phy_operations *phy_ops;
38203 + const struct e1000_nvm_operations *nvm_ops;
38204 };
38205
38206 /* hardware capability, feature, and workaround flags */
38207 diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
38208 index ae5d736..e9a93a1 100644
38209 --- a/drivers/net/e1000e/es2lan.c
38210 +++ b/drivers/net/e1000e/es2lan.c
38211 @@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
38212 {
38213 struct e1000_hw *hw = &adapter->hw;
38214 struct e1000_mac_info *mac = &hw->mac;
38215 - struct e1000_mac_operations *func = &mac->ops;
38216 + e1000_mac_operations_no_const *func = &mac->ops;
38217
38218 /* Set media type */
38219 switch (adapter->pdev->device) {
38220 @@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
38221 temp = er32(ICRXDMTC);
38222 }
38223
38224 -static struct e1000_mac_operations es2_mac_ops = {
38225 +static const struct e1000_mac_operations es2_mac_ops = {
38226 .id_led_init = e1000e_id_led_init,
38227 .check_mng_mode = e1000e_check_mng_mode_generic,
38228 /* check_for_link dependent on media type */
38229 @@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
38230 .setup_led = e1000e_setup_led_generic,
38231 };
38232
38233 -static struct e1000_phy_operations es2_phy_ops = {
38234 +static const struct e1000_phy_operations es2_phy_ops = {
38235 .acquire_phy = e1000_acquire_phy_80003es2lan,
38236 .check_reset_block = e1000e_check_reset_block_generic,
38237 .commit_phy = e1000e_phy_sw_reset,
38238 @@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
38239 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
38240 };
38241
38242 -static struct e1000_nvm_operations es2_nvm_ops = {
38243 +static const struct e1000_nvm_operations es2_nvm_ops = {
38244 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
38245 .read_nvm = e1000e_read_nvm_eerd,
38246 .release_nvm = e1000_release_nvm_80003es2lan,
38247 diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
38248 index 11f3b7c..6381887 100644
38249 --- a/drivers/net/e1000e/hw.h
38250 +++ b/drivers/net/e1000e/hw.h
38251 @@ -753,6 +753,7 @@ struct e1000_mac_operations {
38252 s32 (*setup_physical_interface)(struct e1000_hw *);
38253 s32 (*setup_led)(struct e1000_hw *);
38254 };
38255 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38256
38257 /* Function pointers for the PHY. */
38258 struct e1000_phy_operations {
38259 @@ -774,6 +775,7 @@ struct e1000_phy_operations {
38260 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
38261 s32 (*cfg_on_link_up)(struct e1000_hw *);
38262 };
38263 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38264
38265 /* Function pointers for the NVM. */
38266 struct e1000_nvm_operations {
38267 @@ -785,9 +787,10 @@ struct e1000_nvm_operations {
38268 s32 (*validate_nvm)(struct e1000_hw *);
38269 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
38270 };
38271 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38272
38273 struct e1000_mac_info {
38274 - struct e1000_mac_operations ops;
38275 + e1000_mac_operations_no_const ops;
38276
38277 u8 addr[6];
38278 u8 perm_addr[6];
38279 @@ -823,7 +826,7 @@ struct e1000_mac_info {
38280 };
38281
38282 struct e1000_phy_info {
38283 - struct e1000_phy_operations ops;
38284 + e1000_phy_operations_no_const ops;
38285
38286 enum e1000_phy_type type;
38287
38288 @@ -857,7 +860,7 @@ struct e1000_phy_info {
38289 };
38290
38291 struct e1000_nvm_info {
38292 - struct e1000_nvm_operations ops;
38293 + e1000_nvm_operations_no_const ops;
38294
38295 enum e1000_nvm_type type;
38296 enum e1000_nvm_override override;
38297 diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
38298 index de39f9a..e28d3e0 100644
38299 --- a/drivers/net/e1000e/ich8lan.c
38300 +++ b/drivers/net/e1000e/ich8lan.c
38301 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
38302 }
38303 }
38304
38305 -static struct e1000_mac_operations ich8_mac_ops = {
38306 +static const struct e1000_mac_operations ich8_mac_ops = {
38307 .id_led_init = e1000e_id_led_init,
38308 .check_mng_mode = e1000_check_mng_mode_ich8lan,
38309 .check_for_link = e1000_check_for_copper_link_ich8lan,
38310 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
38311 /* id_led_init dependent on mac type */
38312 };
38313
38314 -static struct e1000_phy_operations ich8_phy_ops = {
38315 +static const struct e1000_phy_operations ich8_phy_ops = {
38316 .acquire_phy = e1000_acquire_swflag_ich8lan,
38317 .check_reset_block = e1000_check_reset_block_ich8lan,
38318 .commit_phy = NULL,
38319 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
38320 .write_phy_reg = e1000e_write_phy_reg_igp,
38321 };
38322
38323 -static struct e1000_nvm_operations ich8_nvm_ops = {
38324 +static const struct e1000_nvm_operations ich8_nvm_ops = {
38325 .acquire_nvm = e1000_acquire_nvm_ich8lan,
38326 .read_nvm = e1000_read_nvm_ich8lan,
38327 .release_nvm = e1000_release_nvm_ich8lan,
38328 diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
38329 index 18d5fbb..542d96d 100644
38330 --- a/drivers/net/fealnx.c
38331 +++ b/drivers/net/fealnx.c
38332 @@ -151,7 +151,7 @@ struct chip_info {
38333 int flags;
38334 };
38335
38336 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
38337 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
38338 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38339 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
38340 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38341 diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
38342 index 0e5b54b..b503f82 100644
38343 --- a/drivers/net/hamradio/6pack.c
38344 +++ b/drivers/net/hamradio/6pack.c
38345 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
38346 unsigned char buf[512];
38347 int count1;
38348
38349 + pax_track_stack();
38350 +
38351 if (!count)
38352 return;
38353
38354 diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
38355 index 5862282..7cce8cb 100644
38356 --- a/drivers/net/ibmveth.c
38357 +++ b/drivers/net/ibmveth.c
38358 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
38359 NULL,
38360 };
38361
38362 -static struct sysfs_ops veth_pool_ops = {
38363 +static const struct sysfs_ops veth_pool_ops = {
38364 .show = veth_pool_show,
38365 .store = veth_pool_store,
38366 };
38367 diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
38368 index d617f2d..57b5309 100644
38369 --- a/drivers/net/igb/e1000_82575.c
38370 +++ b/drivers/net/igb/e1000_82575.c
38371 @@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
38372 wr32(E1000_VT_CTL, vt_ctl);
38373 }
38374
38375 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
38376 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
38377 .reset_hw = igb_reset_hw_82575,
38378 .init_hw = igb_init_hw_82575,
38379 .check_for_link = igb_check_for_link_82575,
38380 @@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
38381 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
38382 };
38383
38384 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
38385 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
38386 .acquire = igb_acquire_phy_82575,
38387 .get_cfg_done = igb_get_cfg_done_82575,
38388 .release = igb_release_phy_82575,
38389 };
38390
38391 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38392 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38393 .acquire = igb_acquire_nvm_82575,
38394 .read = igb_read_nvm_eerd,
38395 .release = igb_release_nvm_82575,
38396 diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
38397 index 72081df..d855cf5 100644
38398 --- a/drivers/net/igb/e1000_hw.h
38399 +++ b/drivers/net/igb/e1000_hw.h
38400 @@ -288,6 +288,7 @@ struct e1000_mac_operations {
38401 s32 (*read_mac_addr)(struct e1000_hw *);
38402 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
38403 };
38404 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38405
38406 struct e1000_phy_operations {
38407 s32 (*acquire)(struct e1000_hw *);
38408 @@ -303,6 +304,7 @@ struct e1000_phy_operations {
38409 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
38410 s32 (*write_reg)(struct e1000_hw *, u32, u16);
38411 };
38412 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38413
38414 struct e1000_nvm_operations {
38415 s32 (*acquire)(struct e1000_hw *);
38416 @@ -310,6 +312,7 @@ struct e1000_nvm_operations {
38417 void (*release)(struct e1000_hw *);
38418 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
38419 };
38420 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38421
38422 struct e1000_info {
38423 s32 (*get_invariants)(struct e1000_hw *);
38424 @@ -321,7 +324,7 @@ struct e1000_info {
38425 extern const struct e1000_info e1000_82575_info;
38426
38427 struct e1000_mac_info {
38428 - struct e1000_mac_operations ops;
38429 + e1000_mac_operations_no_const ops;
38430
38431 u8 addr[6];
38432 u8 perm_addr[6];
38433 @@ -365,7 +368,7 @@ struct e1000_mac_info {
38434 };
38435
38436 struct e1000_phy_info {
38437 - struct e1000_phy_operations ops;
38438 + e1000_phy_operations_no_const ops;
38439
38440 enum e1000_phy_type type;
38441
38442 @@ -400,7 +403,7 @@ struct e1000_phy_info {
38443 };
38444
38445 struct e1000_nvm_info {
38446 - struct e1000_nvm_operations ops;
38447 + e1000_nvm_operations_no_const ops;
38448
38449 enum e1000_nvm_type type;
38450 enum e1000_nvm_override override;
38451 @@ -446,6 +449,7 @@ struct e1000_mbx_operations {
38452 s32 (*check_for_ack)(struct e1000_hw *, u16);
38453 s32 (*check_for_rst)(struct e1000_hw *, u16);
38454 };
38455 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38456
38457 struct e1000_mbx_stats {
38458 u32 msgs_tx;
38459 @@ -457,7 +461,7 @@ struct e1000_mbx_stats {
38460 };
38461
38462 struct e1000_mbx_info {
38463 - struct e1000_mbx_operations ops;
38464 + e1000_mbx_operations_no_const ops;
38465 struct e1000_mbx_stats stats;
38466 u32 timeout;
38467 u32 usec_delay;
38468 diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
38469 index 1e8ce37..549c453 100644
38470 --- a/drivers/net/igbvf/vf.h
38471 +++ b/drivers/net/igbvf/vf.h
38472 @@ -187,9 +187,10 @@ struct e1000_mac_operations {
38473 s32 (*read_mac_addr)(struct e1000_hw *);
38474 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
38475 };
38476 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38477
38478 struct e1000_mac_info {
38479 - struct e1000_mac_operations ops;
38480 + e1000_mac_operations_no_const ops;
38481 u8 addr[6];
38482 u8 perm_addr[6];
38483
38484 @@ -211,6 +212,7 @@ struct e1000_mbx_operations {
38485 s32 (*check_for_ack)(struct e1000_hw *);
38486 s32 (*check_for_rst)(struct e1000_hw *);
38487 };
38488 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38489
38490 struct e1000_mbx_stats {
38491 u32 msgs_tx;
38492 @@ -222,7 +224,7 @@ struct e1000_mbx_stats {
38493 };
38494
38495 struct e1000_mbx_info {
38496 - struct e1000_mbx_operations ops;
38497 + e1000_mbx_operations_no_const ops;
38498 struct e1000_mbx_stats stats;
38499 u32 timeout;
38500 u32 usec_delay;
38501 diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
38502 index aa7286b..a61394f 100644
38503 --- a/drivers/net/iseries_veth.c
38504 +++ b/drivers/net/iseries_veth.c
38505 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
38506 NULL
38507 };
38508
38509 -static struct sysfs_ops veth_cnx_sysfs_ops = {
38510 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
38511 .show = veth_cnx_attribute_show
38512 };
38513
38514 @@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
38515 NULL
38516 };
38517
38518 -static struct sysfs_ops veth_port_sysfs_ops = {
38519 +static const struct sysfs_ops veth_port_sysfs_ops = {
38520 .show = veth_port_attribute_show
38521 };
38522
38523 diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
38524 index 8aa44dc..fa1e797 100644
38525 --- a/drivers/net/ixgb/ixgb_main.c
38526 +++ b/drivers/net/ixgb/ixgb_main.c
38527 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
38528 u32 rctl;
38529 int i;
38530
38531 + pax_track_stack();
38532 +
38533 /* Check for Promiscuous and All Multicast modes */
38534
38535 rctl = IXGB_READ_REG(hw, RCTL);
38536 diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
38537 index af35e1d..8781785 100644
38538 --- a/drivers/net/ixgb/ixgb_param.c
38539 +++ b/drivers/net/ixgb/ixgb_param.c
38540 @@ -260,6 +260,9 @@ void __devinit
38541 ixgb_check_options(struct ixgb_adapter *adapter)
38542 {
38543 int bd = adapter->bd_number;
38544 +
38545 + pax_track_stack();
38546 +
38547 if (bd >= IXGB_MAX_NIC) {
38548 printk(KERN_NOTICE
38549 "Warning: no configuration for board #%i\n", bd);
38550 diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
38551 index b17aa73..ed74540 100644
38552 --- a/drivers/net/ixgbe/ixgbe_type.h
38553 +++ b/drivers/net/ixgbe/ixgbe_type.h
38554 @@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
38555 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
38556 s32 (*update_checksum)(struct ixgbe_hw *);
38557 };
38558 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
38559
38560 struct ixgbe_mac_operations {
38561 s32 (*init_hw)(struct ixgbe_hw *);
38562 @@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
38563 /* Flow Control */
38564 s32 (*fc_enable)(struct ixgbe_hw *, s32);
38565 };
38566 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
38567
38568 struct ixgbe_phy_operations {
38569 s32 (*identify)(struct ixgbe_hw *);
38570 @@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
38571 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
38572 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
38573 };
38574 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
38575
38576 struct ixgbe_eeprom_info {
38577 - struct ixgbe_eeprom_operations ops;
38578 + ixgbe_eeprom_operations_no_const ops;
38579 enum ixgbe_eeprom_type type;
38580 u32 semaphore_delay;
38581 u16 word_size;
38582 @@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
38583 };
38584
38585 struct ixgbe_mac_info {
38586 - struct ixgbe_mac_operations ops;
38587 + ixgbe_mac_operations_no_const ops;
38588 enum ixgbe_mac_type type;
38589 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38590 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38591 @@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
38592 };
38593
38594 struct ixgbe_phy_info {
38595 - struct ixgbe_phy_operations ops;
38596 + ixgbe_phy_operations_no_const ops;
38597 struct mdio_if_info mdio;
38598 enum ixgbe_phy_type type;
38599 u32 id;
38600 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
38601 index 291a505..2543756 100644
38602 --- a/drivers/net/mlx4/main.c
38603 +++ b/drivers/net/mlx4/main.c
38604 @@ -38,6 +38,7 @@
38605 #include <linux/errno.h>
38606 #include <linux/pci.h>
38607 #include <linux/dma-mapping.h>
38608 +#include <linux/sched.h>
38609
38610 #include <linux/mlx4/device.h>
38611 #include <linux/mlx4/doorbell.h>
38612 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
38613 u64 icm_size;
38614 int err;
38615
38616 + pax_track_stack();
38617 +
38618 err = mlx4_QUERY_FW(dev);
38619 if (err) {
38620 if (err == -EACCES)
38621 diff --git a/drivers/net/niu.c b/drivers/net/niu.c
38622 index 2dce134..fa5ce75 100644
38623 --- a/drivers/net/niu.c
38624 +++ b/drivers/net/niu.c
38625 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
38626 int i, num_irqs, err;
38627 u8 first_ldg;
38628
38629 + pax_track_stack();
38630 +
38631 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
38632 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
38633 ldg_num_map[i] = first_ldg + i;
38634 diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
38635 index c1b3f09..97cd8c4 100644
38636 --- a/drivers/net/pcnet32.c
38637 +++ b/drivers/net/pcnet32.c
38638 @@ -79,7 +79,7 @@ static int cards_found;
38639 /*
38640 * VLB I/O addresses
38641 */
38642 -static unsigned int pcnet32_portlist[] __initdata =
38643 +static unsigned int pcnet32_portlist[] __devinitdata =
38644 { 0x300, 0x320, 0x340, 0x360, 0 };
38645
38646 static int pcnet32_debug = 0;
38647 @@ -267,7 +267,7 @@ struct pcnet32_private {
38648 struct sk_buff **rx_skbuff;
38649 dma_addr_t *tx_dma_addr;
38650 dma_addr_t *rx_dma_addr;
38651 - struct pcnet32_access a;
38652 + struct pcnet32_access *a;
38653 spinlock_t lock; /* Guard lock */
38654 unsigned int cur_rx, cur_tx; /* The next free ring entry */
38655 unsigned int rx_ring_size; /* current rx ring size */
38656 @@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
38657 u16 val;
38658
38659 netif_wake_queue(dev);
38660 - val = lp->a.read_csr(ioaddr, CSR3);
38661 + val = lp->a->read_csr(ioaddr, CSR3);
38662 val &= 0x00ff;
38663 - lp->a.write_csr(ioaddr, CSR3, val);
38664 + lp->a->write_csr(ioaddr, CSR3, val);
38665 napi_enable(&lp->napi);
38666 }
38667
38668 @@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
38669 r = mii_link_ok(&lp->mii_if);
38670 } else if (lp->chip_version >= PCNET32_79C970A) {
38671 ulong ioaddr = dev->base_addr; /* card base I/O address */
38672 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38673 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38674 } else { /* can not detect link on really old chips */
38675 r = 1;
38676 }
38677 @@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
38678 pcnet32_netif_stop(dev);
38679
38680 spin_lock_irqsave(&lp->lock, flags);
38681 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38682 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38683
38684 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
38685
38686 @@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
38687 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38688 {
38689 struct pcnet32_private *lp = netdev_priv(dev);
38690 - struct pcnet32_access *a = &lp->a; /* access to registers */
38691 + struct pcnet32_access *a = lp->a; /* access to registers */
38692 ulong ioaddr = dev->base_addr; /* card base I/O address */
38693 struct sk_buff *skb; /* sk buff */
38694 int x, i; /* counters */
38695 @@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38696 pcnet32_netif_stop(dev);
38697
38698 spin_lock_irqsave(&lp->lock, flags);
38699 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38700 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38701
38702 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
38703
38704 /* Reset the PCNET32 */
38705 - lp->a.reset(ioaddr);
38706 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38707 + lp->a->reset(ioaddr);
38708 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38709
38710 /* switch pcnet32 to 32bit mode */
38711 - lp->a.write_bcr(ioaddr, 20, 2);
38712 + lp->a->write_bcr(ioaddr, 20, 2);
38713
38714 /* purge & init rings but don't actually restart */
38715 pcnet32_restart(dev, 0x0000);
38716
38717 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38718 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38719
38720 /* Initialize Transmit buffers. */
38721 size = data_len + 15;
38722 @@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38723
38724 /* set int loopback in CSR15 */
38725 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
38726 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
38727 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
38728
38729 teststatus = cpu_to_le16(0x8000);
38730 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38731 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38732
38733 /* Check status of descriptors */
38734 for (x = 0; x < numbuffs; x++) {
38735 @@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38736 }
38737 }
38738
38739 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38740 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38741 wmb();
38742 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
38743 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
38744 @@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38745 pcnet32_restart(dev, CSR0_NORMAL);
38746 } else {
38747 pcnet32_purge_rx_ring(dev);
38748 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38749 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38750 }
38751 spin_unlock_irqrestore(&lp->lock, flags);
38752
38753 @@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38754 static void pcnet32_led_blink_callback(struct net_device *dev)
38755 {
38756 struct pcnet32_private *lp = netdev_priv(dev);
38757 - struct pcnet32_access *a = &lp->a;
38758 + struct pcnet32_access *a = lp->a;
38759 ulong ioaddr = dev->base_addr;
38760 unsigned long flags;
38761 int i;
38762 @@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
38763 static int pcnet32_phys_id(struct net_device *dev, u32 data)
38764 {
38765 struct pcnet32_private *lp = netdev_priv(dev);
38766 - struct pcnet32_access *a = &lp->a;
38767 + struct pcnet32_access *a = lp->a;
38768 ulong ioaddr = dev->base_addr;
38769 unsigned long flags;
38770 int i, regs[4];
38771 @@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
38772 {
38773 int csr5;
38774 struct pcnet32_private *lp = netdev_priv(dev);
38775 - struct pcnet32_access *a = &lp->a;
38776 + struct pcnet32_access *a = lp->a;
38777 ulong ioaddr = dev->base_addr;
38778 int ticks;
38779
38780 @@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38781 spin_lock_irqsave(&lp->lock, flags);
38782 if (pcnet32_tx(dev)) {
38783 /* reset the chip to clear the error condition, then restart */
38784 - lp->a.reset(ioaddr);
38785 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38786 + lp->a->reset(ioaddr);
38787 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38788 pcnet32_restart(dev, CSR0_START);
38789 netif_wake_queue(dev);
38790 }
38791 @@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38792 __napi_complete(napi);
38793
38794 /* clear interrupt masks */
38795 - val = lp->a.read_csr(ioaddr, CSR3);
38796 + val = lp->a->read_csr(ioaddr, CSR3);
38797 val &= 0x00ff;
38798 - lp->a.write_csr(ioaddr, CSR3, val);
38799 + lp->a->write_csr(ioaddr, CSR3, val);
38800
38801 /* Set interrupt enable. */
38802 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
38803 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
38804
38805 spin_unlock_irqrestore(&lp->lock, flags);
38806 }
38807 @@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38808 int i, csr0;
38809 u16 *buff = ptr;
38810 struct pcnet32_private *lp = netdev_priv(dev);
38811 - struct pcnet32_access *a = &lp->a;
38812 + struct pcnet32_access *a = lp->a;
38813 ulong ioaddr = dev->base_addr;
38814 unsigned long flags;
38815
38816 @@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38817 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
38818 if (lp->phymask & (1 << j)) {
38819 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
38820 - lp->a.write_bcr(ioaddr, 33,
38821 + lp->a->write_bcr(ioaddr, 33,
38822 (j << 5) | i);
38823 - *buff++ = lp->a.read_bcr(ioaddr, 34);
38824 + *buff++ = lp->a->read_bcr(ioaddr, 34);
38825 }
38826 }
38827 }
38828 @@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38829 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
38830 lp->options |= PCNET32_PORT_FD;
38831
38832 - lp->a = *a;
38833 + lp->a = a;
38834
38835 /* prior to register_netdev, dev->name is not yet correct */
38836 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
38837 @@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38838 if (lp->mii) {
38839 /* lp->phycount and lp->phymask are set to 0 by memset above */
38840
38841 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38842 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38843 /* scan for PHYs */
38844 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38845 unsigned short id1, id2;
38846 @@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38847 "Found PHY %04x:%04x at address %d.\n",
38848 id1, id2, i);
38849 }
38850 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38851 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38852 if (lp->phycount > 1) {
38853 lp->options |= PCNET32_PORT_MII;
38854 }
38855 @@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
38856 }
38857
38858 /* Reset the PCNET32 */
38859 - lp->a.reset(ioaddr);
38860 + lp->a->reset(ioaddr);
38861
38862 /* switch pcnet32 to 32bit mode */
38863 - lp->a.write_bcr(ioaddr, 20, 2);
38864 + lp->a->write_bcr(ioaddr, 20, 2);
38865
38866 if (netif_msg_ifup(lp))
38867 printk(KERN_DEBUG
38868 @@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
38869 (u32) (lp->init_dma_addr));
38870
38871 /* set/reset autoselect bit */
38872 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
38873 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
38874 if (lp->options & PCNET32_PORT_ASEL)
38875 val |= 2;
38876 - lp->a.write_bcr(ioaddr, 2, val);
38877 + lp->a->write_bcr(ioaddr, 2, val);
38878
38879 /* handle full duplex setting */
38880 if (lp->mii_if.full_duplex) {
38881 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
38882 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
38883 if (lp->options & PCNET32_PORT_FD) {
38884 val |= 1;
38885 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
38886 @@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
38887 if (lp->chip_version == 0x2627)
38888 val |= 3;
38889 }
38890 - lp->a.write_bcr(ioaddr, 9, val);
38891 + lp->a->write_bcr(ioaddr, 9, val);
38892 }
38893
38894 /* set/reset GPSI bit in test register */
38895 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
38896 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
38897 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
38898 val |= 0x10;
38899 - lp->a.write_csr(ioaddr, 124, val);
38900 + lp->a->write_csr(ioaddr, 124, val);
38901
38902 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
38903 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
38904 @@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
38905 * duplex, and/or enable auto negotiation, and clear DANAS
38906 */
38907 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
38908 - lp->a.write_bcr(ioaddr, 32,
38909 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
38910 + lp->a->write_bcr(ioaddr, 32,
38911 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
38912 /* disable Auto Negotiation, set 10Mpbs, HD */
38913 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
38914 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
38915 if (lp->options & PCNET32_PORT_FD)
38916 val |= 0x10;
38917 if (lp->options & PCNET32_PORT_100)
38918 val |= 0x08;
38919 - lp->a.write_bcr(ioaddr, 32, val);
38920 + lp->a->write_bcr(ioaddr, 32, val);
38921 } else {
38922 if (lp->options & PCNET32_PORT_ASEL) {
38923 - lp->a.write_bcr(ioaddr, 32,
38924 - lp->a.read_bcr(ioaddr,
38925 + lp->a->write_bcr(ioaddr, 32,
38926 + lp->a->read_bcr(ioaddr,
38927 32) | 0x0080);
38928 /* enable auto negotiate, setup, disable fd */
38929 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
38930 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
38931 val |= 0x20;
38932 - lp->a.write_bcr(ioaddr, 32, val);
38933 + lp->a->write_bcr(ioaddr, 32, val);
38934 }
38935 }
38936 } else {
38937 @@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
38938 * There is really no good other way to handle multiple PHYs
38939 * other than turning off all automatics
38940 */
38941 - val = lp->a.read_bcr(ioaddr, 2);
38942 - lp->a.write_bcr(ioaddr, 2, val & ~2);
38943 - val = lp->a.read_bcr(ioaddr, 32);
38944 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38945 + val = lp->a->read_bcr(ioaddr, 2);
38946 + lp->a->write_bcr(ioaddr, 2, val & ~2);
38947 + val = lp->a->read_bcr(ioaddr, 32);
38948 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38949
38950 if (!(lp->options & PCNET32_PORT_ASEL)) {
38951 /* setup ecmd */
38952 @@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
38953 ecmd.speed =
38954 lp->
38955 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
38956 - bcr9 = lp->a.read_bcr(ioaddr, 9);
38957 + bcr9 = lp->a->read_bcr(ioaddr, 9);
38958
38959 if (lp->options & PCNET32_PORT_FD) {
38960 ecmd.duplex = DUPLEX_FULL;
38961 @@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
38962 ecmd.duplex = DUPLEX_HALF;
38963 bcr9 |= ~(1 << 0);
38964 }
38965 - lp->a.write_bcr(ioaddr, 9, bcr9);
38966 + lp->a->write_bcr(ioaddr, 9, bcr9);
38967 }
38968
38969 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38970 @@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
38971
38972 #ifdef DO_DXSUFLO
38973 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
38974 - val = lp->a.read_csr(ioaddr, CSR3);
38975 + val = lp->a->read_csr(ioaddr, CSR3);
38976 val |= 0x40;
38977 - lp->a.write_csr(ioaddr, CSR3, val);
38978 + lp->a->write_csr(ioaddr, CSR3, val);
38979 }
38980 #endif
38981
38982 @@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
38983 napi_enable(&lp->napi);
38984
38985 /* Re-initialize the PCNET32, and start it when done. */
38986 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38987 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38988 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38989 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38990
38991 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38992 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38993 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38994 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38995
38996 netif_start_queue(dev);
38997
38998 @@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
38999
39000 i = 0;
39001 while (i++ < 100)
39002 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
39003 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
39004 break;
39005 /*
39006 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
39007 * reports that doing so triggers a bug in the '974.
39008 */
39009 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
39010 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
39011
39012 if (netif_msg_ifup(lp))
39013 printk(KERN_DEBUG
39014 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
39015 dev->name, i,
39016 (u32) (lp->init_dma_addr),
39017 - lp->a.read_csr(ioaddr, CSR0));
39018 + lp->a->read_csr(ioaddr, CSR0));
39019
39020 spin_unlock_irqrestore(&lp->lock, flags);
39021
39022 @@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
39023 * Switch back to 16bit mode to avoid problems with dumb
39024 * DOS packet driver after a warm reboot
39025 */
39026 - lp->a.write_bcr(ioaddr, 20, 4);
39027 + lp->a->write_bcr(ioaddr, 20, 4);
39028
39029 err_free_irq:
39030 spin_unlock_irqrestore(&lp->lock, flags);
39031 @@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
39032
39033 /* wait for stop */
39034 for (i = 0; i < 100; i++)
39035 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
39036 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
39037 break;
39038
39039 if (i >= 100 && netif_msg_drv(lp))
39040 @@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
39041 return;
39042
39043 /* ReInit Ring */
39044 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
39045 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
39046 i = 0;
39047 while (i++ < 1000)
39048 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
39049 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
39050 break;
39051
39052 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
39053 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
39054 }
39055
39056 static void pcnet32_tx_timeout(struct net_device *dev)
39057 @@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
39058 if (pcnet32_debug & NETIF_MSG_DRV)
39059 printk(KERN_ERR
39060 "%s: transmit timed out, status %4.4x, resetting.\n",
39061 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39062 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39063 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39064 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39065 dev->stats.tx_errors++;
39066 if (netif_msg_tx_err(lp)) {
39067 int i;
39068 @@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
39069 if (netif_msg_tx_queued(lp)) {
39070 printk(KERN_DEBUG
39071 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
39072 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39073 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39074 }
39075
39076 /* Default status -- will not enable Successful-TxDone
39077 @@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
39078 dev->stats.tx_bytes += skb->len;
39079
39080 /* Trigger an immediate send poll. */
39081 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
39082 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
39083
39084 dev->trans_start = jiffies;
39085
39086 @@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
39087
39088 spin_lock(&lp->lock);
39089
39090 - csr0 = lp->a.read_csr(ioaddr, CSR0);
39091 + csr0 = lp->a->read_csr(ioaddr, CSR0);
39092 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
39093 if (csr0 == 0xffff) {
39094 break; /* PCMCIA remove happened */
39095 }
39096 /* Acknowledge all of the current interrupt sources ASAP. */
39097 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
39098 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
39099
39100 if (netif_msg_intr(lp))
39101 printk(KERN_DEBUG
39102 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
39103 - dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
39104 + dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
39105
39106 /* Log misc errors. */
39107 if (csr0 & 0x4000)
39108 @@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
39109 if (napi_schedule_prep(&lp->napi)) {
39110 u16 val;
39111 /* set interrupt masks */
39112 - val = lp->a.read_csr(ioaddr, CSR3);
39113 + val = lp->a->read_csr(ioaddr, CSR3);
39114 val |= 0x5f00;
39115 - lp->a.write_csr(ioaddr, CSR3, val);
39116 + lp->a->write_csr(ioaddr, CSR3, val);
39117
39118 __napi_schedule(&lp->napi);
39119 break;
39120 }
39121 - csr0 = lp->a.read_csr(ioaddr, CSR0);
39122 + csr0 = lp->a->read_csr(ioaddr, CSR0);
39123 }
39124
39125 if (netif_msg_intr(lp))
39126 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
39127 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39128 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39129
39130 spin_unlock(&lp->lock);
39131
39132 @@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
39133
39134 spin_lock_irqsave(&lp->lock, flags);
39135
39136 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
39137 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
39138
39139 if (netif_msg_ifdown(lp))
39140 printk(KERN_DEBUG
39141 "%s: Shutting down ethercard, status was %2.2x.\n",
39142 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39143 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39144
39145 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
39146 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39147 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39148
39149 /*
39150 * Switch back to 16bit mode to avoid problems with dumb
39151 * DOS packet driver after a warm reboot
39152 */
39153 - lp->a.write_bcr(ioaddr, 20, 4);
39154 + lp->a->write_bcr(ioaddr, 20, 4);
39155
39156 spin_unlock_irqrestore(&lp->lock, flags);
39157
39158 @@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
39159 unsigned long flags;
39160
39161 spin_lock_irqsave(&lp->lock, flags);
39162 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
39163 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
39164 spin_unlock_irqrestore(&lp->lock, flags);
39165
39166 return &dev->stats;
39167 @@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
39168 if (dev->flags & IFF_ALLMULTI) {
39169 ib->filter[0] = cpu_to_le32(~0U);
39170 ib->filter[1] = cpu_to_le32(~0U);
39171 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
39172 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
39173 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
39174 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
39175 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
39176 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
39177 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
39178 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
39179 return;
39180 }
39181 /* clear the multicast filter */
39182 @@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
39183 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
39184 }
39185 for (i = 0; i < 4; i++)
39186 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
39187 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
39188 le16_to_cpu(mcast_table[i]));
39189 return;
39190 }
39191 @@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
39192
39193 spin_lock_irqsave(&lp->lock, flags);
39194 suspended = pcnet32_suspend(dev, &flags, 0);
39195 - csr15 = lp->a.read_csr(ioaddr, CSR15);
39196 + csr15 = lp->a->read_csr(ioaddr, CSR15);
39197 if (dev->flags & IFF_PROMISC) {
39198 /* Log any net taps. */
39199 if (netif_msg_hw(lp))
39200 @@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
39201 lp->init_block->mode =
39202 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
39203 7);
39204 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
39205 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
39206 } else {
39207 lp->init_block->mode =
39208 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
39209 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
39210 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
39211 pcnet32_load_multicast(dev);
39212 }
39213
39214 if (suspended) {
39215 int csr5;
39216 /* clear SUSPEND (SPND) - CSR5 bit 0 */
39217 - csr5 = lp->a.read_csr(ioaddr, CSR5);
39218 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
39219 + csr5 = lp->a->read_csr(ioaddr, CSR5);
39220 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
39221 } else {
39222 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39223 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39224 pcnet32_restart(dev, CSR0_NORMAL);
39225 netif_wake_queue(dev);
39226 }
39227 @@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
39228 if (!lp->mii)
39229 return 0;
39230
39231 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39232 - val_out = lp->a.read_bcr(ioaddr, 34);
39233 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39234 + val_out = lp->a->read_bcr(ioaddr, 34);
39235
39236 return val_out;
39237 }
39238 @@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
39239 if (!lp->mii)
39240 return;
39241
39242 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39243 - lp->a.write_bcr(ioaddr, 34, val);
39244 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39245 + lp->a->write_bcr(ioaddr, 34, val);
39246 }
39247
39248 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39249 @@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
39250 curr_link = mii_link_ok(&lp->mii_if);
39251 } else {
39252 ulong ioaddr = dev->base_addr; /* card base I/O address */
39253 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
39254 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
39255 }
39256 if (!curr_link) {
39257 if (prev_link || verbose) {
39258 @@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
39259 (ecmd.duplex ==
39260 DUPLEX_FULL) ? "full" : "half");
39261 }
39262 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
39263 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
39264 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
39265 if (lp->mii_if.full_duplex)
39266 bcr9 |= (1 << 0);
39267 else
39268 bcr9 &= ~(1 << 0);
39269 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
39270 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
39271 }
39272 } else {
39273 if (netif_msg_link(lp))
39274 diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
39275 index 7cc9898..6eb50d3 100644
39276 --- a/drivers/net/sis190.c
39277 +++ b/drivers/net/sis190.c
39278 @@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
39279 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
39280 struct net_device *dev)
39281 {
39282 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
39283 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
39284 struct sis190_private *tp = netdev_priv(dev);
39285 struct pci_dev *isa_bridge;
39286 u8 reg, tmp8;
39287 diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
39288 index e13685a..60c948c 100644
39289 --- a/drivers/net/sundance.c
39290 +++ b/drivers/net/sundance.c
39291 @@ -225,7 +225,7 @@ enum {
39292 struct pci_id_info {
39293 const char *name;
39294 };
39295 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39296 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39297 {"D-Link DFE-550TX FAST Ethernet Adapter"},
39298 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
39299 {"D-Link DFE-580TX 4 port Server Adapter"},
39300 diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
39301 index 529f55a..cccaa18 100644
39302 --- a/drivers/net/tg3.h
39303 +++ b/drivers/net/tg3.h
39304 @@ -95,6 +95,7 @@
39305 #define CHIPREV_ID_5750_A0 0x4000
39306 #define CHIPREV_ID_5750_A1 0x4001
39307 #define CHIPREV_ID_5750_A3 0x4003
39308 +#define CHIPREV_ID_5750_C1 0x4201
39309 #define CHIPREV_ID_5750_C2 0x4202
39310 #define CHIPREV_ID_5752_A0_HW 0x5000
39311 #define CHIPREV_ID_5752_A0 0x6000
39312 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
39313 index b9db1b5..720f9ce 100644
39314 --- a/drivers/net/tokenring/abyss.c
39315 +++ b/drivers/net/tokenring/abyss.c
39316 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
39317
39318 static int __init abyss_init (void)
39319 {
39320 - abyss_netdev_ops = tms380tr_netdev_ops;
39321 + pax_open_kernel();
39322 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39323
39324 - abyss_netdev_ops.ndo_open = abyss_open;
39325 - abyss_netdev_ops.ndo_stop = abyss_close;
39326 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
39327 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
39328 + pax_close_kernel();
39329
39330 return pci_register_driver(&abyss_driver);
39331 }
39332 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
39333 index 456f8bf..373e56d 100644
39334 --- a/drivers/net/tokenring/madgemc.c
39335 +++ b/drivers/net/tokenring/madgemc.c
39336 @@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
39337
39338 static int __init madgemc_init (void)
39339 {
39340 - madgemc_netdev_ops = tms380tr_netdev_ops;
39341 - madgemc_netdev_ops.ndo_open = madgemc_open;
39342 - madgemc_netdev_ops.ndo_stop = madgemc_close;
39343 + pax_open_kernel();
39344 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39345 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
39346 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
39347 + pax_close_kernel();
39348
39349 return mca_register_driver (&madgemc_driver);
39350 }
39351 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
39352 index 16e8783..925bd49 100644
39353 --- a/drivers/net/tokenring/proteon.c
39354 +++ b/drivers/net/tokenring/proteon.c
39355 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
39356 struct platform_device *pdev;
39357 int i, num = 0, err = 0;
39358
39359 - proteon_netdev_ops = tms380tr_netdev_ops;
39360 - proteon_netdev_ops.ndo_open = proteon_open;
39361 - proteon_netdev_ops.ndo_stop = tms380tr_close;
39362 + pax_open_kernel();
39363 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39364 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
39365 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
39366 + pax_close_kernel();
39367
39368 err = platform_driver_register(&proteon_driver);
39369 if (err)
39370 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
39371 index 46db5c5..37c1536 100644
39372 --- a/drivers/net/tokenring/skisa.c
39373 +++ b/drivers/net/tokenring/skisa.c
39374 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
39375 struct platform_device *pdev;
39376 int i, num = 0, err = 0;
39377
39378 - sk_isa_netdev_ops = tms380tr_netdev_ops;
39379 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
39380 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39381 + pax_open_kernel();
39382 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39383 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
39384 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39385 + pax_close_kernel();
39386
39387 err = platform_driver_register(&sk_isa_driver);
39388 if (err)
39389 diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
39390 index 74e5ba4..5cf6bc9 100644
39391 --- a/drivers/net/tulip/de2104x.c
39392 +++ b/drivers/net/tulip/de2104x.c
39393 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
39394 struct de_srom_info_leaf *il;
39395 void *bufp;
39396
39397 + pax_track_stack();
39398 +
39399 /* download entire eeprom */
39400 for (i = 0; i < DE_EEPROM_WORDS; i++)
39401 ((__le16 *)ee_data)[i] =
39402 diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
39403 index a8349b7..90f9dfe 100644
39404 --- a/drivers/net/tulip/de4x5.c
39405 +++ b/drivers/net/tulip/de4x5.c
39406 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39407 for (i=0; i<ETH_ALEN; i++) {
39408 tmp.addr[i] = dev->dev_addr[i];
39409 }
39410 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39411 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39412 break;
39413
39414 case DE4X5_SET_HWADDR: /* Set the hardware address */
39415 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39416 spin_lock_irqsave(&lp->lock, flags);
39417 memcpy(&statbuf, &lp->pktStats, ioc->len);
39418 spin_unlock_irqrestore(&lp->lock, flags);
39419 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
39420 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
39421 return -EFAULT;
39422 break;
39423 }
39424 diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
39425 index 391acd3..56d11cd 100644
39426 --- a/drivers/net/tulip/eeprom.c
39427 +++ b/drivers/net/tulip/eeprom.c
39428 @@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
39429 {NULL}};
39430
39431
39432 -static const char *block_name[] __devinitdata = {
39433 +static const char *block_name[] __devinitconst = {
39434 "21140 non-MII",
39435 "21140 MII PHY",
39436 "21142 Serial PHY",
39437 diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
39438 index b38d3b7..b1cff23 100644
39439 --- a/drivers/net/tulip/winbond-840.c
39440 +++ b/drivers/net/tulip/winbond-840.c
39441 @@ -235,7 +235,7 @@ struct pci_id_info {
39442 int drv_flags; /* Driver use, intended as capability flags. */
39443 };
39444
39445 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39446 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39447 { /* Sometime a Level-One switch card. */
39448 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
39449 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
39450 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
39451 index f450bc9..2b747c8 100644
39452 --- a/drivers/net/usb/hso.c
39453 +++ b/drivers/net/usb/hso.c
39454 @@ -71,7 +71,7 @@
39455 #include <asm/byteorder.h>
39456 #include <linux/serial_core.h>
39457 #include <linux/serial.h>
39458 -
39459 +#include <asm/local.h>
39460
39461 #define DRIVER_VERSION "1.2"
39462 #define MOD_AUTHOR "Option Wireless"
39463 @@ -258,7 +258,7 @@ struct hso_serial {
39464
39465 /* from usb_serial_port */
39466 struct tty_struct *tty;
39467 - int open_count;
39468 + local_t open_count;
39469 spinlock_t serial_lock;
39470
39471 int (*write_data) (struct hso_serial *serial);
39472 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
39473 struct urb *urb;
39474
39475 urb = serial->rx_urb[0];
39476 - if (serial->open_count > 0) {
39477 + if (local_read(&serial->open_count) > 0) {
39478 count = put_rxbuf_data(urb, serial);
39479 if (count == -1)
39480 return;
39481 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
39482 DUMP1(urb->transfer_buffer, urb->actual_length);
39483
39484 /* Anyone listening? */
39485 - if (serial->open_count == 0)
39486 + if (local_read(&serial->open_count) == 0)
39487 return;
39488
39489 if (status == 0) {
39490 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39491 spin_unlock_irq(&serial->serial_lock);
39492
39493 /* check for port already opened, if not set the termios */
39494 - serial->open_count++;
39495 - if (serial->open_count == 1) {
39496 + if (local_inc_return(&serial->open_count) == 1) {
39497 tty->low_latency = 1;
39498 serial->rx_state = RX_IDLE;
39499 /* Force default termio settings */
39500 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39501 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
39502 if (result) {
39503 hso_stop_serial_device(serial->parent);
39504 - serial->open_count--;
39505 + local_dec(&serial->open_count);
39506 kref_put(&serial->parent->ref, hso_serial_ref_free);
39507 }
39508 } else {
39509 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
39510
39511 /* reset the rts and dtr */
39512 /* do the actual close */
39513 - serial->open_count--;
39514 + local_dec(&serial->open_count);
39515
39516 - if (serial->open_count <= 0) {
39517 - serial->open_count = 0;
39518 + if (local_read(&serial->open_count) <= 0) {
39519 + local_set(&serial->open_count, 0);
39520 spin_lock_irq(&serial->serial_lock);
39521 if (serial->tty == tty) {
39522 serial->tty->driver_data = NULL;
39523 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
39524
39525 /* the actual setup */
39526 spin_lock_irqsave(&serial->serial_lock, flags);
39527 - if (serial->open_count)
39528 + if (local_read(&serial->open_count))
39529 _hso_serial_set_termios(tty, old);
39530 else
39531 tty->termios = old;
39532 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
39533 /* Start all serial ports */
39534 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
39535 if (serial_table[i] && (serial_table[i]->interface == iface)) {
39536 - if (dev2ser(serial_table[i])->open_count) {
39537 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
39538 result =
39539 hso_start_serial_device(serial_table[i], GFP_NOIO);
39540 hso_kick_transmit(dev2ser(serial_table[i]));
39541 diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
39542 index 3e94f0c..ffdd926 100644
39543 --- a/drivers/net/vxge/vxge-config.h
39544 +++ b/drivers/net/vxge/vxge-config.h
39545 @@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
39546 void (*link_down)(struct __vxge_hw_device *devh);
39547 void (*crit_err)(struct __vxge_hw_device *devh,
39548 enum vxge_hw_event type, u64 ext_data);
39549 -};
39550 +} __no_const;
39551
39552 /*
39553 * struct __vxge_hw_blockpool_entry - Block private data structure
39554 diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
39555 index 068d7a9..35293de 100644
39556 --- a/drivers/net/vxge/vxge-main.c
39557 +++ b/drivers/net/vxge/vxge-main.c
39558 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
39559 struct sk_buff *completed[NR_SKB_COMPLETED];
39560 int more;
39561
39562 + pax_track_stack();
39563 +
39564 do {
39565 more = 0;
39566 skb_ptr = completed;
39567 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
39568 u8 mtable[256] = {0}; /* CPU to vpath mapping */
39569 int index;
39570
39571 + pax_track_stack();
39572 +
39573 /*
39574 * Filling
39575 * - itable with bucket numbers
39576 diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
39577 index 461742b..81be42e 100644
39578 --- a/drivers/net/vxge/vxge-traffic.h
39579 +++ b/drivers/net/vxge/vxge-traffic.h
39580 @@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
39581 struct vxge_hw_mempool_dma *dma_object,
39582 u32 index,
39583 u32 is_last);
39584 -};
39585 +} __no_const;
39586
39587 void
39588 __vxge_hw_mempool_destroy(
39589 diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
39590 index cd8cb95..4153b79 100644
39591 --- a/drivers/net/wan/cycx_x25.c
39592 +++ b/drivers/net/wan/cycx_x25.c
39593 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
39594 unsigned char hex[1024],
39595 * phex = hex;
39596
39597 + pax_track_stack();
39598 +
39599 if (len >= (sizeof(hex) / 2))
39600 len = (sizeof(hex) / 2) - 1;
39601
39602 diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
39603 index aa9248f..a4e3c3b 100644
39604 --- a/drivers/net/wan/hdlc_x25.c
39605 +++ b/drivers/net/wan/hdlc_x25.c
39606 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
39607
39608 static int x25_open(struct net_device *dev)
39609 {
39610 - struct lapb_register_struct cb;
39611 + static struct lapb_register_struct cb = {
39612 + .connect_confirmation = x25_connected,
39613 + .connect_indication = x25_connected,
39614 + .disconnect_confirmation = x25_disconnected,
39615 + .disconnect_indication = x25_disconnected,
39616 + .data_indication = x25_data_indication,
39617 + .data_transmit = x25_data_transmit
39618 + };
39619 int result;
39620
39621 - cb.connect_confirmation = x25_connected;
39622 - cb.connect_indication = x25_connected;
39623 - cb.disconnect_confirmation = x25_disconnected;
39624 - cb.disconnect_indication = x25_disconnected;
39625 - cb.data_indication = x25_data_indication;
39626 - cb.data_transmit = x25_data_transmit;
39627 -
39628 result = lapb_register(dev, &cb);
39629 if (result != LAPB_OK)
39630 return result;
39631 diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
39632 index 5ad287c..783b020 100644
39633 --- a/drivers/net/wimax/i2400m/usb-fw.c
39634 +++ b/drivers/net/wimax/i2400m/usb-fw.c
39635 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
39636 int do_autopm = 1;
39637 DECLARE_COMPLETION_ONSTACK(notif_completion);
39638
39639 + pax_track_stack();
39640 +
39641 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
39642 i2400m, ack, ack_size);
39643 BUG_ON(_ack == i2400m->bm_ack_buf);
39644 diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
39645 index 6c26840..62c97c3 100644
39646 --- a/drivers/net/wireless/airo.c
39647 +++ b/drivers/net/wireless/airo.c
39648 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
39649 BSSListElement * loop_net;
39650 BSSListElement * tmp_net;
39651
39652 + pax_track_stack();
39653 +
39654 /* Blow away current list of scan results */
39655 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
39656 list_move_tail (&loop_net->list, &ai->network_free_list);
39657 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
39658 WepKeyRid wkr;
39659 int rc;
39660
39661 + pax_track_stack();
39662 +
39663 memset( &mySsid, 0, sizeof( mySsid ) );
39664 kfree (ai->flash);
39665 ai->flash = NULL;
39666 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
39667 __le32 *vals = stats.vals;
39668 int len;
39669
39670 + pax_track_stack();
39671 +
39672 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39673 return -ENOMEM;
39674 data = (struct proc_data *)file->private_data;
39675 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
39676 /* If doLoseSync is not 1, we won't do a Lose Sync */
39677 int doLoseSync = -1;
39678
39679 + pax_track_stack();
39680 +
39681 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39682 return -ENOMEM;
39683 data = (struct proc_data *)file->private_data;
39684 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
39685 int i;
39686 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
39687
39688 + pax_track_stack();
39689 +
39690 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
39691 if (!qual)
39692 return -ENOMEM;
39693 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
39694 CapabilityRid cap_rid;
39695 __le32 *vals = stats_rid.vals;
39696
39697 + pax_track_stack();
39698 +
39699 /* Get stats out of the card */
39700 clear_bit(JOB_WSTATS, &local->jobs);
39701 if (local->power.event) {
39702 diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
39703 index 747508c..82e965d 100644
39704 --- a/drivers/net/wireless/ath/ath5k/debug.c
39705 +++ b/drivers/net/wireless/ath/ath5k/debug.c
39706 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
39707 unsigned int v;
39708 u64 tsf;
39709
39710 + pax_track_stack();
39711 +
39712 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
39713 len += snprintf(buf+len, sizeof(buf)-len,
39714 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
39715 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
39716 unsigned int len = 0;
39717 unsigned int i;
39718
39719 + pax_track_stack();
39720 +
39721 len += snprintf(buf+len, sizeof(buf)-len,
39722 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
39723
39724 diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
39725 index 2be4c22..593b1eb 100644
39726 --- a/drivers/net/wireless/ath/ath9k/debug.c
39727 +++ b/drivers/net/wireless/ath/ath9k/debug.c
39728 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
39729 char buf[512];
39730 unsigned int len = 0;
39731
39732 + pax_track_stack();
39733 +
39734 len += snprintf(buf + len, sizeof(buf) - len,
39735 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
39736 len += snprintf(buf + len, sizeof(buf) - len,
39737 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
39738 int i;
39739 u8 addr[ETH_ALEN];
39740
39741 + pax_track_stack();
39742 +
39743 len += snprintf(buf + len, sizeof(buf) - len,
39744 "primary: %s (%s chan=%d ht=%d)\n",
39745 wiphy_name(sc->pri_wiphy->hw->wiphy),
39746 diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
39747 index 80b19a4..dab3a45 100644
39748 --- a/drivers/net/wireless/b43/debugfs.c
39749 +++ b/drivers/net/wireless/b43/debugfs.c
39750 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
39751 struct b43_debugfs_fops {
39752 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
39753 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
39754 - struct file_operations fops;
39755 + const struct file_operations fops;
39756 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
39757 size_t file_struct_offset;
39758 };
39759 diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
39760 index 1f85ac5..c99b4b4 100644
39761 --- a/drivers/net/wireless/b43legacy/debugfs.c
39762 +++ b/drivers/net/wireless/b43legacy/debugfs.c
39763 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
39764 struct b43legacy_debugfs_fops {
39765 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
39766 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
39767 - struct file_operations fops;
39768 + const struct file_operations fops;
39769 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
39770 size_t file_struct_offset;
39771 /* Take wl->irq_lock before calling read/write? */
39772 diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
39773 index 43102bf..3b569c3 100644
39774 --- a/drivers/net/wireless/ipw2x00/ipw2100.c
39775 +++ b/drivers/net/wireless/ipw2x00/ipw2100.c
39776 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
39777 int err;
39778 DECLARE_SSID_BUF(ssid);
39779
39780 + pax_track_stack();
39781 +
39782 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
39783
39784 if (ssid_len)
39785 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
39786 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
39787 int err;
39788
39789 + pax_track_stack();
39790 +
39791 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
39792 idx, keylen, len);
39793
39794 diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
39795 index 282b1f7..169f0cf 100644
39796 --- a/drivers/net/wireless/ipw2x00/libipw_rx.c
39797 +++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
39798 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
39799 unsigned long flags;
39800 DECLARE_SSID_BUF(ssid);
39801
39802 + pax_track_stack();
39803 +
39804 LIBIPW_DEBUG_SCAN("'%s' (%pM"
39805 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
39806 print_ssid(ssid, info_element->data, info_element->len),
39807 diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
39808 index 950267a..80d5fd2 100644
39809 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c
39810 +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
39811 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
39812 },
39813 };
39814
39815 -static struct iwl_ops iwl1000_ops = {
39816 +static const struct iwl_ops iwl1000_ops = {
39817 .ucode = &iwl5000_ucode,
39818 .lib = &iwl1000_lib,
39819 .hcmd = &iwl5000_hcmd,
39820 diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
39821 index 56bfcc3..b348020 100644
39822 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c
39823 +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
39824 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
39825 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
39826 };
39827
39828 -static struct iwl_ops iwl3945_ops = {
39829 +static const struct iwl_ops iwl3945_ops = {
39830 .ucode = &iwl3945_ucode,
39831 .lib = &iwl3945_lib,
39832 .hcmd = &iwl3945_hcmd,
39833 diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
39834 index 585b8d4..e142963 100644
39835 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c
39836 +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
39837 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
39838 },
39839 };
39840
39841 -static struct iwl_ops iwl4965_ops = {
39842 +static const struct iwl_ops iwl4965_ops = {
39843 .ucode = &iwl4965_ucode,
39844 .lib = &iwl4965_lib,
39845 .hcmd = &iwl4965_hcmd,
39846 diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
39847 index 1f423f2..e37c192 100644
39848 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c
39849 +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
39850 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
39851 },
39852 };
39853
39854 -struct iwl_ops iwl5000_ops = {
39855 +const struct iwl_ops iwl5000_ops = {
39856 .ucode = &iwl5000_ucode,
39857 .lib = &iwl5000_lib,
39858 .hcmd = &iwl5000_hcmd,
39859 .utils = &iwl5000_hcmd_utils,
39860 };
39861
39862 -static struct iwl_ops iwl5150_ops = {
39863 +static const struct iwl_ops iwl5150_ops = {
39864 .ucode = &iwl5000_ucode,
39865 .lib = &iwl5150_lib,
39866 .hcmd = &iwl5000_hcmd,
39867 diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
39868 index 1473452..f07d5e1 100644
39869 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c
39870 +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
39871 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
39872 .calc_rssi = iwl5000_calc_rssi,
39873 };
39874
39875 -static struct iwl_ops iwl6000_ops = {
39876 +static const struct iwl_ops iwl6000_ops = {
39877 .ucode = &iwl5000_ucode,
39878 .lib = &iwl6000_lib,
39879 .hcmd = &iwl5000_hcmd,
39880 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39881 index 1a3dfa2..b3e0a61 100644
39882 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39883 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39884 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
39885 u8 active_index = 0;
39886 s32 tpt = 0;
39887
39888 + pax_track_stack();
39889 +
39890 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
39891
39892 if (!ieee80211_is_data(hdr->frame_control) ||
39893 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
39894 u8 valid_tx_ant = 0;
39895 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
39896
39897 + pax_track_stack();
39898 +
39899 /* Override starting rate (index 0) if needed for debug purposes */
39900 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
39901
39902 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
39903 index 0e56d78..6a3c107 100644
39904 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c
39905 +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
39906 @@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
39907 if (iwl_debug_level & IWL_DL_INFO)
39908 dev_printk(KERN_DEBUG, &(pdev->dev),
39909 "Disabling hw_scan\n");
39910 - iwl_hw_ops.hw_scan = NULL;
39911 + pax_open_kernel();
39912 + *(void **)&iwl_hw_ops.hw_scan = NULL;
39913 + pax_close_kernel();
39914 }
39915
39916 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
39917 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
39918 index cbc6290..eb323d7 100644
39919 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
39920 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
39921 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
39922 #endif
39923
39924 #else
39925 -#define IWL_DEBUG(__priv, level, fmt, args...)
39926 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
39927 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
39928 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
39929 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
39930 void *p, u32 len)
39931 {}
39932 diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39933 index a198bcf..8e68233 100644
39934 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39935 +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39936 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
39937 int pos = 0;
39938 const size_t bufsz = sizeof(buf);
39939
39940 + pax_track_stack();
39941 +
39942 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
39943 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
39944 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
39945 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
39946 const size_t bufsz = sizeof(buf);
39947 ssize_t ret;
39948
39949 + pax_track_stack();
39950 +
39951 for (i = 0; i < AC_NUM; i++) {
39952 pos += scnprintf(buf + pos, bufsz - pos,
39953 "\tcw_min\tcw_max\taifsn\ttxop\n");
39954 diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
39955 index 3539ea4..b174bfa 100644
39956 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h
39957 +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
39958 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
39959
39960 /* shared structures from iwl-5000.c */
39961 extern struct iwl_mod_params iwl50_mod_params;
39962 -extern struct iwl_ops iwl5000_ops;
39963 +extern const struct iwl_ops iwl5000_ops;
39964 extern struct iwl_ucode_ops iwl5000_ucode;
39965 extern struct iwl_lib_ops iwl5000_lib;
39966 extern struct iwl_hcmd_ops iwl5000_hcmd;
39967 diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39968 index 619590d..69235ee 100644
39969 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
39970 +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39971 @@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
39972 */
39973 if (iwl3945_mod_params.disable_hw_scan) {
39974 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
39975 - iwl3945_hw_ops.hw_scan = NULL;
39976 + pax_open_kernel();
39977 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
39978 + pax_close_kernel();
39979 }
39980
39981
39982 diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39983 index 1465379..fe4d78b 100644
39984 --- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
39985 +++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39986 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
39987 int buf_len = 512;
39988 size_t len = 0;
39989
39990 + pax_track_stack();
39991 +
39992 if (*ppos != 0)
39993 return 0;
39994 if (count < sizeof(buf))
39995 diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
39996 index 893a55c..7f66a50 100644
39997 --- a/drivers/net/wireless/libertas/debugfs.c
39998 +++ b/drivers/net/wireless/libertas/debugfs.c
39999 @@ -708,7 +708,7 @@ out_unlock:
40000 struct lbs_debugfs_files {
40001 const char *name;
40002 int perm;
40003 - struct file_operations fops;
40004 + const struct file_operations fops;
40005 };
40006
40007 static const struct lbs_debugfs_files debugfs_files[] = {
40008 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
40009 index 2ecbedb..42704f0 100644
40010 --- a/drivers/net/wireless/rndis_wlan.c
40011 +++ b/drivers/net/wireless/rndis_wlan.c
40012 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
40013
40014 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
40015
40016 - if (rts_threshold < 0 || rts_threshold > 2347)
40017 + if (rts_threshold > 2347)
40018 rts_threshold = 2347;
40019
40020 tmp = cpu_to_le32(rts_threshold);
40021 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
40022 index 334ccd6..47f8944 100644
40023 --- a/drivers/oprofile/buffer_sync.c
40024 +++ b/drivers/oprofile/buffer_sync.c
40025 @@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
40026 if (cookie == NO_COOKIE)
40027 offset = pc;
40028 if (cookie == INVALID_COOKIE) {
40029 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40030 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40031 offset = pc;
40032 }
40033 if (cookie != last_cookie) {
40034 @@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
40035 /* add userspace sample */
40036
40037 if (!mm) {
40038 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
40039 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
40040 return 0;
40041 }
40042
40043 cookie = lookup_dcookie(mm, s->eip, &offset);
40044
40045 if (cookie == INVALID_COOKIE) {
40046 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40047 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40048 return 0;
40049 }
40050
40051 @@ -562,7 +562,7 @@ void sync_buffer(int cpu)
40052 /* ignore backtraces if failed to add a sample */
40053 if (state == sb_bt_start) {
40054 state = sb_bt_ignore;
40055 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
40056 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
40057 }
40058 }
40059 release_mm(mm);
40060 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
40061 index 5df60a6..72f5c1c 100644
40062 --- a/drivers/oprofile/event_buffer.c
40063 +++ b/drivers/oprofile/event_buffer.c
40064 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
40065 }
40066
40067 if (buffer_pos == buffer_size) {
40068 - atomic_inc(&oprofile_stats.event_lost_overflow);
40069 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
40070 return;
40071 }
40072
40073 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
40074 index dc8a042..fe5f315 100644
40075 --- a/drivers/oprofile/oprof.c
40076 +++ b/drivers/oprofile/oprof.c
40077 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
40078 if (oprofile_ops.switch_events())
40079 return;
40080
40081 - atomic_inc(&oprofile_stats.multiplex_counter);
40082 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
40083 start_switch_worker();
40084 }
40085
40086 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
40087 index 61689e8..387f7f8 100644
40088 --- a/drivers/oprofile/oprofile_stats.c
40089 +++ b/drivers/oprofile/oprofile_stats.c
40090 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
40091 cpu_buf->sample_invalid_eip = 0;
40092 }
40093
40094 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
40095 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
40096 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
40097 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
40098 - atomic_set(&oprofile_stats.multiplex_counter, 0);
40099 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
40100 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
40101 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
40102 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
40103 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
40104 }
40105
40106
40107 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
40108 index 0b54e46..a37c527 100644
40109 --- a/drivers/oprofile/oprofile_stats.h
40110 +++ b/drivers/oprofile/oprofile_stats.h
40111 @@ -13,11 +13,11 @@
40112 #include <asm/atomic.h>
40113
40114 struct oprofile_stat_struct {
40115 - atomic_t sample_lost_no_mm;
40116 - atomic_t sample_lost_no_mapping;
40117 - atomic_t bt_lost_no_mapping;
40118 - atomic_t event_lost_overflow;
40119 - atomic_t multiplex_counter;
40120 + atomic_unchecked_t sample_lost_no_mm;
40121 + atomic_unchecked_t sample_lost_no_mapping;
40122 + atomic_unchecked_t bt_lost_no_mapping;
40123 + atomic_unchecked_t event_lost_overflow;
40124 + atomic_unchecked_t multiplex_counter;
40125 };
40126
40127 extern struct oprofile_stat_struct oprofile_stats;
40128 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
40129 index 2766a6d..80c77e2 100644
40130 --- a/drivers/oprofile/oprofilefs.c
40131 +++ b/drivers/oprofile/oprofilefs.c
40132 @@ -187,7 +187,7 @@ static const struct file_operations atomic_ro_fops = {
40133
40134
40135 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
40136 - char const *name, atomic_t *val)
40137 + char const *name, atomic_unchecked_t *val)
40138 {
40139 struct dentry *d = __oprofilefs_create_file(sb, root, name,
40140 &atomic_ro_fops, 0444);
40141 diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
40142 index 13a64bc..ad62835 100644
40143 --- a/drivers/parisc/pdc_stable.c
40144 +++ b/drivers/parisc/pdc_stable.c
40145 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
40146 return ret;
40147 }
40148
40149 -static struct sysfs_ops pdcspath_attr_ops = {
40150 +static const struct sysfs_ops pdcspath_attr_ops = {
40151 .show = pdcspath_attr_show,
40152 .store = pdcspath_attr_store,
40153 };
40154 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
40155 index 8eefe56..40751a7 100644
40156 --- a/drivers/parport/procfs.c
40157 +++ b/drivers/parport/procfs.c
40158 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
40159
40160 *ppos += len;
40161
40162 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
40163 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
40164 }
40165
40166 #ifdef CONFIG_PARPORT_1284
40167 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
40168
40169 *ppos += len;
40170
40171 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
40172 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
40173 }
40174 #endif /* IEEE1284.3 support. */
40175
40176 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
40177 index 73e7d8e..c80f3d2 100644
40178 --- a/drivers/pci/hotplug/acpiphp_glue.c
40179 +++ b/drivers/pci/hotplug/acpiphp_glue.c
40180 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
40181 }
40182
40183
40184 -static struct acpi_dock_ops acpiphp_dock_ops = {
40185 +static const struct acpi_dock_ops acpiphp_dock_ops = {
40186 .handler = handle_hotplug_event_func,
40187 };
40188
40189 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
40190 index 9fff878..ad0ad53 100644
40191 --- a/drivers/pci/hotplug/cpci_hotplug.h
40192 +++ b/drivers/pci/hotplug/cpci_hotplug.h
40193 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
40194 int (*hardware_test) (struct slot* slot, u32 value);
40195 u8 (*get_power) (struct slot* slot);
40196 int (*set_power) (struct slot* slot, int value);
40197 -};
40198 +} __no_const;
40199
40200 struct cpci_hp_controller {
40201 unsigned int irq;
40202 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
40203 index 76ba8a1..20ca857 100644
40204 --- a/drivers/pci/hotplug/cpqphp_nvram.c
40205 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
40206 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
40207
40208 void compaq_nvram_init (void __iomem *rom_start)
40209 {
40210 +
40211 +#ifndef CONFIG_PAX_KERNEXEC
40212 if (rom_start) {
40213 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
40214 }
40215 +#endif
40216 +
40217 dbg("int15 entry = %p\n", compaq_int15_entry_point);
40218
40219 /* initialize our int15 lock */
40220 diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
40221 index 6151389..0a894ef 100644
40222 --- a/drivers/pci/hotplug/fakephp.c
40223 +++ b/drivers/pci/hotplug/fakephp.c
40224 @@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
40225 }
40226
40227 static struct kobj_type legacy_ktype = {
40228 - .sysfs_ops = &(struct sysfs_ops){
40229 + .sysfs_ops = &(const struct sysfs_ops){
40230 .store = legacy_store, .show = legacy_show
40231 },
40232 .release = &legacy_release,
40233 diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
40234 index 5b680df..fe05b7e 100644
40235 --- a/drivers/pci/intel-iommu.c
40236 +++ b/drivers/pci/intel-iommu.c
40237 @@ -2643,7 +2643,7 @@ error:
40238 return 0;
40239 }
40240
40241 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
40242 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
40243 unsigned long offset, size_t size,
40244 enum dma_data_direction dir,
40245 struct dma_attrs *attrs)
40246 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
40247 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
40248 }
40249
40250 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40251 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40252 size_t size, enum dma_data_direction dir,
40253 struct dma_attrs *attrs)
40254 {
40255 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40256 }
40257 }
40258
40259 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
40260 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
40261 dma_addr_t *dma_handle, gfp_t flags)
40262 {
40263 void *vaddr;
40264 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
40265 return NULL;
40266 }
40267
40268 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40269 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40270 dma_addr_t dma_handle)
40271 {
40272 int order;
40273 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40274 free_pages((unsigned long)vaddr, order);
40275 }
40276
40277 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40278 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40279 int nelems, enum dma_data_direction dir,
40280 struct dma_attrs *attrs)
40281 {
40282 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
40283 return nelems;
40284 }
40285
40286 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40287 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40288 enum dma_data_direction dir, struct dma_attrs *attrs)
40289 {
40290 int i;
40291 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
40292 return nelems;
40293 }
40294
40295 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40296 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40297 {
40298 return !dma_addr;
40299 }
40300
40301 -struct dma_map_ops intel_dma_ops = {
40302 +const struct dma_map_ops intel_dma_ops = {
40303 .alloc_coherent = intel_alloc_coherent,
40304 .free_coherent = intel_free_coherent,
40305 .map_sg = intel_map_sg,
40306 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
40307 index 5b7056c..607bc94 100644
40308 --- a/drivers/pci/pcie/aspm.c
40309 +++ b/drivers/pci/pcie/aspm.c
40310 @@ -27,9 +27,9 @@
40311 #define MODULE_PARAM_PREFIX "pcie_aspm."
40312
40313 /* Note: those are not register definitions */
40314 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
40315 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
40316 -#define ASPM_STATE_L1 (4) /* L1 state */
40317 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
40318 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
40319 +#define ASPM_STATE_L1 (4U) /* L1 state */
40320 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
40321 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
40322
40323 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
40324 index 8105e32..ca10419 100644
40325 --- a/drivers/pci/probe.c
40326 +++ b/drivers/pci/probe.c
40327 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
40328 return ret;
40329 }
40330
40331 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
40332 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
40333 struct device_attribute *attr,
40334 char *buf)
40335 {
40336 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
40337 }
40338
40339 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
40340 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
40341 struct device_attribute *attr,
40342 char *buf)
40343 {
40344 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
40345 index a03ad8c..024b0da 100644
40346 --- a/drivers/pci/proc.c
40347 +++ b/drivers/pci/proc.c
40348 @@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
40349 static int __init pci_proc_init(void)
40350 {
40351 struct pci_dev *dev = NULL;
40352 +
40353 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40354 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40355 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
40356 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40357 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40358 +#endif
40359 +#else
40360 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
40361 +#endif
40362 proc_create("devices", 0, proc_bus_pci_dir,
40363 &proc_bus_pci_dev_operations);
40364 proc_initialized = 1;
40365 diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
40366 index 8c02b6c..5584d8e 100644
40367 --- a/drivers/pci/slot.c
40368 +++ b/drivers/pci/slot.c
40369 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
40370 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
40371 }
40372
40373 -static struct sysfs_ops pci_slot_sysfs_ops = {
40374 +static const struct sysfs_ops pci_slot_sysfs_ops = {
40375 .show = pci_slot_attr_show,
40376 .store = pci_slot_attr_store,
40377 };
40378 diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
40379 index 30cf71d2..50938f1 100644
40380 --- a/drivers/pcmcia/pcmcia_ioctl.c
40381 +++ b/drivers/pcmcia/pcmcia_ioctl.c
40382 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
40383 return -EFAULT;
40384 }
40385 }
40386 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40387 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40388 if (!buf)
40389 return -ENOMEM;
40390
40391 diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
40392 index 52183c4..b224c69 100644
40393 --- a/drivers/platform/x86/acer-wmi.c
40394 +++ b/drivers/platform/x86/acer-wmi.c
40395 @@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
40396 return 0;
40397 }
40398
40399 -static struct backlight_ops acer_bl_ops = {
40400 +static const struct backlight_ops acer_bl_ops = {
40401 .get_brightness = read_brightness,
40402 .update_status = update_bl_status,
40403 };
40404 diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
40405 index 767cb61..a87380b 100644
40406 --- a/drivers/platform/x86/asus-laptop.c
40407 +++ b/drivers/platform/x86/asus-laptop.c
40408 @@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
40409 */
40410 static int read_brightness(struct backlight_device *bd);
40411 static int update_bl_status(struct backlight_device *bd);
40412 -static struct backlight_ops asusbl_ops = {
40413 +static const struct backlight_ops asusbl_ops = {
40414 .get_brightness = read_brightness,
40415 .update_status = update_bl_status,
40416 };
40417 diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
40418 index d66c07a..a4abaac 100644
40419 --- a/drivers/platform/x86/asus_acpi.c
40420 +++ b/drivers/platform/x86/asus_acpi.c
40421 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
40422 return 0;
40423 }
40424
40425 -static struct backlight_ops asus_backlight_data = {
40426 +static const struct backlight_ops asus_backlight_data = {
40427 .get_brightness = read_brightness,
40428 .update_status = set_brightness_status,
40429 };
40430 diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
40431 index 11003bb..550ff1b 100644
40432 --- a/drivers/platform/x86/compal-laptop.c
40433 +++ b/drivers/platform/x86/compal-laptop.c
40434 @@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
40435 return set_lcd_level(b->props.brightness);
40436 }
40437
40438 -static struct backlight_ops compalbl_ops = {
40439 +static const struct backlight_ops compalbl_ops = {
40440 .get_brightness = bl_get_brightness,
40441 .update_status = bl_update_status,
40442 };
40443 diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
40444 index 07a74da..9dc99fa 100644
40445 --- a/drivers/platform/x86/dell-laptop.c
40446 +++ b/drivers/platform/x86/dell-laptop.c
40447 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
40448 return buffer.output[1];
40449 }
40450
40451 -static struct backlight_ops dell_ops = {
40452 +static const struct backlight_ops dell_ops = {
40453 .get_brightness = dell_get_intensity,
40454 .update_status = dell_send_intensity,
40455 };
40456 diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
40457 index c533b1c..5c81f22 100644
40458 --- a/drivers/platform/x86/eeepc-laptop.c
40459 +++ b/drivers/platform/x86/eeepc-laptop.c
40460 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
40461 */
40462 static int read_brightness(struct backlight_device *bd);
40463 static int update_bl_status(struct backlight_device *bd);
40464 -static struct backlight_ops eeepcbl_ops = {
40465 +static const struct backlight_ops eeepcbl_ops = {
40466 .get_brightness = read_brightness,
40467 .update_status = update_bl_status,
40468 };
40469 diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
40470 index bcd4ba8..a249b35 100644
40471 --- a/drivers/platform/x86/fujitsu-laptop.c
40472 +++ b/drivers/platform/x86/fujitsu-laptop.c
40473 @@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
40474 return ret;
40475 }
40476
40477 -static struct backlight_ops fujitsubl_ops = {
40478 +static const struct backlight_ops fujitsubl_ops = {
40479 .get_brightness = bl_get_brightness,
40480 .update_status = bl_update_status,
40481 };
40482 diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
40483 index 759763d..1093ba2 100644
40484 --- a/drivers/platform/x86/msi-laptop.c
40485 +++ b/drivers/platform/x86/msi-laptop.c
40486 @@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
40487 return set_lcd_level(b->props.brightness);
40488 }
40489
40490 -static struct backlight_ops msibl_ops = {
40491 +static const struct backlight_ops msibl_ops = {
40492 .get_brightness = bl_get_brightness,
40493 .update_status = bl_update_status,
40494 };
40495 diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
40496 index fe7cf01..9012d8d 100644
40497 --- a/drivers/platform/x86/panasonic-laptop.c
40498 +++ b/drivers/platform/x86/panasonic-laptop.c
40499 @@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
40500 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
40501 }
40502
40503 -static struct backlight_ops pcc_backlight_ops = {
40504 +static const struct backlight_ops pcc_backlight_ops = {
40505 .get_brightness = bl_get,
40506 .update_status = bl_set_status,
40507 };
40508 diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
40509 index a2a742c..b37e25e 100644
40510 --- a/drivers/platform/x86/sony-laptop.c
40511 +++ b/drivers/platform/x86/sony-laptop.c
40512 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
40513 }
40514
40515 static struct backlight_device *sony_backlight_device;
40516 -static struct backlight_ops sony_backlight_ops = {
40517 +static const struct backlight_ops sony_backlight_ops = {
40518 .update_status = sony_backlight_update_status,
40519 .get_brightness = sony_backlight_get_brightness,
40520 };
40521 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
40522 index 68271ae..5e8fb10 100644
40523 --- a/drivers/platform/x86/thinkpad_acpi.c
40524 +++ b/drivers/platform/x86/thinkpad_acpi.c
40525 @@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
40526 return 0;
40527 }
40528
40529 -void static hotkey_mask_warn_incomplete_mask(void)
40530 +static void hotkey_mask_warn_incomplete_mask(void)
40531 {
40532 /* log only what the user can fix... */
40533 const u32 wantedmask = hotkey_driver_mask &
40534 @@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
40535 BACKLIGHT_UPDATE_HOTKEY);
40536 }
40537
40538 -static struct backlight_ops ibm_backlight_data = {
40539 +static const struct backlight_ops ibm_backlight_data = {
40540 .get_brightness = brightness_get,
40541 .update_status = brightness_update_status,
40542 };
40543 diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
40544 index 51c0a8b..0786629 100644
40545 --- a/drivers/platform/x86/toshiba_acpi.c
40546 +++ b/drivers/platform/x86/toshiba_acpi.c
40547 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
40548 return AE_OK;
40549 }
40550
40551 -static struct backlight_ops toshiba_backlight_data = {
40552 +static const struct backlight_ops toshiba_backlight_data = {
40553 .get_brightness = get_lcd,
40554 .update_status = set_lcd_status,
40555 };
40556 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
40557 index fc83783c..cf370d7 100644
40558 --- a/drivers/pnp/pnpbios/bioscalls.c
40559 +++ b/drivers/pnp/pnpbios/bioscalls.c
40560 @@ -60,7 +60,7 @@ do { \
40561 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
40562 } while(0)
40563
40564 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
40565 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
40566 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
40567
40568 /*
40569 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40570
40571 cpu = get_cpu();
40572 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
40573 +
40574 + pax_open_kernel();
40575 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
40576 + pax_close_kernel();
40577
40578 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
40579 spin_lock_irqsave(&pnp_bios_lock, flags);
40580 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40581 :"memory");
40582 spin_unlock_irqrestore(&pnp_bios_lock, flags);
40583
40584 + pax_open_kernel();
40585 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
40586 + pax_close_kernel();
40587 +
40588 put_cpu();
40589
40590 /* If we get here and this is set then the PnP BIOS faulted on us. */
40591 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
40592 return status;
40593 }
40594
40595 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
40596 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
40597 {
40598 int i;
40599
40600 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40601 pnp_bios_callpoint.offset = header->fields.pm16offset;
40602 pnp_bios_callpoint.segment = PNP_CS16;
40603
40604 + pax_open_kernel();
40605 +
40606 for_each_possible_cpu(i) {
40607 struct desc_struct *gdt = get_cpu_gdt_table(i);
40608 if (!gdt)
40609 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40610 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
40611 (unsigned long)__va(header->fields.pm16dseg));
40612 }
40613 +
40614 + pax_close_kernel();
40615 }
40616 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
40617 index ba97654..66b99d4 100644
40618 --- a/drivers/pnp/resource.c
40619 +++ b/drivers/pnp/resource.c
40620 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
40621 return 1;
40622
40623 /* check if the resource is valid */
40624 - if (*irq < 0 || *irq > 15)
40625 + if (*irq > 15)
40626 return 0;
40627
40628 /* check if the resource is reserved */
40629 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
40630 return 1;
40631
40632 /* check if the resource is valid */
40633 - if (*dma < 0 || *dma == 4 || *dma > 7)
40634 + if (*dma == 4 || *dma > 7)
40635 return 0;
40636
40637 /* check if the resource is reserved */
40638 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
40639 index 62bb981..24a2dc9 100644
40640 --- a/drivers/power/bq27x00_battery.c
40641 +++ b/drivers/power/bq27x00_battery.c
40642 @@ -44,7 +44,7 @@ struct bq27x00_device_info;
40643 struct bq27x00_access_methods {
40644 int (*read)(u8 reg, int *rt_value, int b_single,
40645 struct bq27x00_device_info *di);
40646 -};
40647 +} __no_const;
40648
40649 struct bq27x00_device_info {
40650 struct device *dev;
40651 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
40652 index 62227cd..b5b538b 100644
40653 --- a/drivers/rtc/rtc-dev.c
40654 +++ b/drivers/rtc/rtc-dev.c
40655 @@ -14,6 +14,7 @@
40656 #include <linux/module.h>
40657 #include <linux/rtc.h>
40658 #include <linux/sched.h>
40659 +#include <linux/grsecurity.h>
40660 #include "rtc-core.h"
40661
40662 static dev_t rtc_devt;
40663 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
40664 if (copy_from_user(&tm, uarg, sizeof(tm)))
40665 return -EFAULT;
40666
40667 + gr_log_timechange();
40668 +
40669 return rtc_set_time(rtc, &tm);
40670
40671 case RTC_PIE_ON:
40672 diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
40673 index 968e3c7..fbc637a 100644
40674 --- a/drivers/s390/cio/qdio_perf.c
40675 +++ b/drivers/s390/cio/qdio_perf.c
40676 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
40677 static int qdio_perf_proc_show(struct seq_file *m, void *v)
40678 {
40679 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
40680 - (long)atomic_long_read(&perf_stats.qdio_int));
40681 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
40682 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
40683 - (long)atomic_long_read(&perf_stats.pci_int));
40684 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
40685 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
40686 - (long)atomic_long_read(&perf_stats.thin_int));
40687 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
40688 seq_printf(m, "\n");
40689 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
40690 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
40691 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
40692 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
40693 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
40694 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
40695 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
40696 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
40697 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
40698 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
40699 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
40700 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
40701 - (long)atomic_long_read(&perf_stats.thinint_inbound),
40702 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
40703 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
40704 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
40705 seq_printf(m, "\n");
40706 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
40707 - (long)atomic_long_read(&perf_stats.siga_in));
40708 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
40709 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
40710 - (long)atomic_long_read(&perf_stats.siga_out));
40711 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
40712 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
40713 - (long)atomic_long_read(&perf_stats.siga_sync));
40714 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
40715 seq_printf(m, "\n");
40716 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
40717 - (long)atomic_long_read(&perf_stats.inbound_handler));
40718 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
40719 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
40720 - (long)atomic_long_read(&perf_stats.outbound_handler));
40721 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
40722 seq_printf(m, "\n");
40723 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
40724 - (long)atomic_long_read(&perf_stats.fast_requeue));
40725 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
40726 seq_printf(m, "Number of outbound target full condition\t: %li\n",
40727 - (long)atomic_long_read(&perf_stats.outbound_target_full));
40728 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
40729 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
40730 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
40731 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
40732 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
40733 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
40734 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
40735 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
40736 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
40737 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
40738 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
40739 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
40740 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
40741 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
40742 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
40743 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
40744 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
40745 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
40746 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
40747 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
40748 seq_printf(m, "\n");
40749 return 0;
40750 }
40751 diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
40752 index ff4504c..b3604c3 100644
40753 --- a/drivers/s390/cio/qdio_perf.h
40754 +++ b/drivers/s390/cio/qdio_perf.h
40755 @@ -13,46 +13,46 @@
40756
40757 struct qdio_perf_stats {
40758 /* interrupt handler calls */
40759 - atomic_long_t qdio_int;
40760 - atomic_long_t pci_int;
40761 - atomic_long_t thin_int;
40762 + atomic_long_unchecked_t qdio_int;
40763 + atomic_long_unchecked_t pci_int;
40764 + atomic_long_unchecked_t thin_int;
40765
40766 /* tasklet runs */
40767 - atomic_long_t tasklet_inbound;
40768 - atomic_long_t tasklet_outbound;
40769 - atomic_long_t tasklet_thinint;
40770 - atomic_long_t tasklet_thinint_loop;
40771 - atomic_long_t thinint_inbound;
40772 - atomic_long_t thinint_inbound_loop;
40773 - atomic_long_t thinint_inbound_loop2;
40774 + atomic_long_unchecked_t tasklet_inbound;
40775 + atomic_long_unchecked_t tasklet_outbound;
40776 + atomic_long_unchecked_t tasklet_thinint;
40777 + atomic_long_unchecked_t tasklet_thinint_loop;
40778 + atomic_long_unchecked_t thinint_inbound;
40779 + atomic_long_unchecked_t thinint_inbound_loop;
40780 + atomic_long_unchecked_t thinint_inbound_loop2;
40781
40782 /* signal adapter calls */
40783 - atomic_long_t siga_out;
40784 - atomic_long_t siga_in;
40785 - atomic_long_t siga_sync;
40786 + atomic_long_unchecked_t siga_out;
40787 + atomic_long_unchecked_t siga_in;
40788 + atomic_long_unchecked_t siga_sync;
40789
40790 /* misc */
40791 - atomic_long_t inbound_handler;
40792 - atomic_long_t outbound_handler;
40793 - atomic_long_t fast_requeue;
40794 - atomic_long_t outbound_target_full;
40795 + atomic_long_unchecked_t inbound_handler;
40796 + atomic_long_unchecked_t outbound_handler;
40797 + atomic_long_unchecked_t fast_requeue;
40798 + atomic_long_unchecked_t outbound_target_full;
40799
40800 /* for debugging */
40801 - atomic_long_t debug_tl_out_timer;
40802 - atomic_long_t debug_stop_polling;
40803 - atomic_long_t debug_eqbs_all;
40804 - atomic_long_t debug_eqbs_incomplete;
40805 - atomic_long_t debug_sqbs_all;
40806 - atomic_long_t debug_sqbs_incomplete;
40807 + atomic_long_unchecked_t debug_tl_out_timer;
40808 + atomic_long_unchecked_t debug_stop_polling;
40809 + atomic_long_unchecked_t debug_eqbs_all;
40810 + atomic_long_unchecked_t debug_eqbs_incomplete;
40811 + atomic_long_unchecked_t debug_sqbs_all;
40812 + atomic_long_unchecked_t debug_sqbs_incomplete;
40813 };
40814
40815 extern struct qdio_perf_stats perf_stats;
40816 extern int qdio_performance_stats;
40817
40818 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
40819 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
40820 {
40821 if (qdio_performance_stats)
40822 - atomic_long_inc(count);
40823 + atomic_long_inc_unchecked(count);
40824 }
40825
40826 int qdio_setup_perf_stats(void);
40827 diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
40828 index 1ddcf40..a85f062 100644
40829 --- a/drivers/scsi/BusLogic.c
40830 +++ b/drivers/scsi/BusLogic.c
40831 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
40832 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
40833 *PrototypeHostAdapter)
40834 {
40835 + pax_track_stack();
40836 +
40837 /*
40838 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
40839 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
40840 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
40841 index cdbdec9..b7d560b 100644
40842 --- a/drivers/scsi/aacraid/aacraid.h
40843 +++ b/drivers/scsi/aacraid/aacraid.h
40844 @@ -471,7 +471,7 @@ struct adapter_ops
40845 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
40846 /* Administrative operations */
40847 int (*adapter_comm)(struct aac_dev * dev, int comm);
40848 -};
40849 +} __no_const;
40850
40851 /*
40852 * Define which interrupt handler needs to be installed
40853 diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
40854 index a5b8e7b..a6a0e43 100644
40855 --- a/drivers/scsi/aacraid/commctrl.c
40856 +++ b/drivers/scsi/aacraid/commctrl.c
40857 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
40858 u32 actual_fibsize64, actual_fibsize = 0;
40859 int i;
40860
40861 + pax_track_stack();
40862
40863 if (dev->in_reset) {
40864 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
40865 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
40866 index 9b97c3e..f099725 100644
40867 --- a/drivers/scsi/aacraid/linit.c
40868 +++ b/drivers/scsi/aacraid/linit.c
40869 @@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
40870 #elif defined(__devinitconst)
40871 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40872 #else
40873 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
40874 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40875 #endif
40876 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
40877 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
40878 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
40879 index 996f722..9127845 100644
40880 --- a/drivers/scsi/aic94xx/aic94xx_init.c
40881 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
40882 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
40883 flash_error_table[i].reason);
40884 }
40885
40886 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
40887 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
40888 asd_show_update_bios, asd_store_update_bios);
40889
40890 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
40891 @@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
40892 .lldd_control_phy = asd_control_phy,
40893 };
40894
40895 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
40896 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
40897 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
40898 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
40899 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
40900 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
40901 index 58efd4b..cb48dc7 100644
40902 --- a/drivers/scsi/bfa/bfa_ioc.h
40903 +++ b/drivers/scsi/bfa/bfa_ioc.h
40904 @@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
40905 bfa_ioc_disable_cbfn_t disable_cbfn;
40906 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
40907 bfa_ioc_reset_cbfn_t reset_cbfn;
40908 -};
40909 +} __no_const;
40910
40911 /**
40912 * Heartbeat failure notification queue element.
40913 diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
40914 index 7ad177e..5503586 100644
40915 --- a/drivers/scsi/bfa/bfa_iocfc.h
40916 +++ b/drivers/scsi/bfa/bfa_iocfc.h
40917 @@ -61,7 +61,7 @@ struct bfa_hwif_s {
40918 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
40919 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
40920 u32 *nvecs, u32 *maxvec);
40921 -};
40922 +} __no_const;
40923 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
40924
40925 struct bfa_iocfc_s {
40926 diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
40927 index 4967643..cbec06b 100644
40928 --- a/drivers/scsi/dpt_i2o.c
40929 +++ b/drivers/scsi/dpt_i2o.c
40930 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
40931 dma_addr_t addr;
40932 ulong flags = 0;
40933
40934 + pax_track_stack();
40935 +
40936 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
40937 // get user msg size in u32s
40938 if(get_user(size, &user_msg[0])){
40939 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
40940 s32 rcode;
40941 dma_addr_t addr;
40942
40943 + pax_track_stack();
40944 +
40945 memset(msg, 0 , sizeof(msg));
40946 len = scsi_bufflen(cmd);
40947 direction = 0x00000000;
40948 diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
40949 index c7076ce..e20c67c 100644
40950 --- a/drivers/scsi/eata.c
40951 +++ b/drivers/scsi/eata.c
40952 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
40953 struct hostdata *ha;
40954 char name[16];
40955
40956 + pax_track_stack();
40957 +
40958 sprintf(name, "%s%d", driver_name, j);
40959
40960 if (!request_region(port_base, REGION_SIZE, driver_name)) {
40961 diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
40962 index 11ae5c9..891daec 100644
40963 --- a/drivers/scsi/fcoe/libfcoe.c
40964 +++ b/drivers/scsi/fcoe/libfcoe.c
40965 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
40966 size_t rlen;
40967 size_t dlen;
40968
40969 + pax_track_stack();
40970 +
40971 fiph = (struct fip_header *)skb->data;
40972 sub = fiph->fip_subcode;
40973 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
40974 diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
40975 index 71c7bbe..e93088a 100644
40976 --- a/drivers/scsi/fnic/fnic_main.c
40977 +++ b/drivers/scsi/fnic/fnic_main.c
40978 @@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
40979 /* Start local port initiatialization */
40980
40981 lp->link_up = 0;
40982 - lp->tt = fnic_transport_template;
40983 + memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
40984
40985 lp->max_retry_count = fnic->config.flogi_retries;
40986 lp->max_rport_retry_count = fnic->config.plogi_retries;
40987 diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
40988 index bb96d74..9ec3ce4 100644
40989 --- a/drivers/scsi/gdth.c
40990 +++ b/drivers/scsi/gdth.c
40991 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
40992 ulong flags;
40993 gdth_ha_str *ha;
40994
40995 + pax_track_stack();
40996 +
40997 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
40998 return -EFAULT;
40999 ha = gdth_find_ha(ldrv.ionode);
41000 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
41001 gdth_ha_str *ha;
41002 int rval;
41003
41004 + pax_track_stack();
41005 +
41006 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
41007 res.number >= MAX_HDRIVES)
41008 return -EFAULT;
41009 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
41010 gdth_ha_str *ha;
41011 int rval;
41012
41013 + pax_track_stack();
41014 +
41015 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
41016 return -EFAULT;
41017 ha = gdth_find_ha(gen.ionode);
41018 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
41019 int i;
41020 gdth_cmd_str gdtcmd;
41021 char cmnd[MAX_COMMAND_SIZE];
41022 +
41023 + pax_track_stack();
41024 +
41025 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
41026
41027 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
41028 diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
41029 index 1258da3..20d8ae6 100644
41030 --- a/drivers/scsi/gdth_proc.c
41031 +++ b/drivers/scsi/gdth_proc.c
41032 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
41033 ulong64 paddr;
41034
41035 char cmnd[MAX_COMMAND_SIZE];
41036 +
41037 + pax_track_stack();
41038 +
41039 memset(cmnd, 0xff, 12);
41040 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
41041
41042 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
41043 gdth_hget_str *phg;
41044 char cmnd[MAX_COMMAND_SIZE];
41045
41046 + pax_track_stack();
41047 +
41048 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
41049 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
41050 if (!gdtcmd || !estr)
41051 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
41052 index d03a926..f324286 100644
41053 --- a/drivers/scsi/hosts.c
41054 +++ b/drivers/scsi/hosts.c
41055 @@ -40,7 +40,7 @@
41056 #include "scsi_logging.h"
41057
41058
41059 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
41060 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
41061
41062
41063 static void scsi_host_cls_release(struct device *dev)
41064 @@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
41065 * subtract one because we increment first then return, but we need to
41066 * know what the next host number was before increment
41067 */
41068 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
41069 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
41070 shost->dma_channel = 0xff;
41071
41072 /* These three are default values which can be overridden */
41073 diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
41074 index a601159..55e19d2 100644
41075 --- a/drivers/scsi/ipr.c
41076 +++ b/drivers/scsi/ipr.c
41077 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
41078 return true;
41079 }
41080
41081 -static struct ata_port_operations ipr_sata_ops = {
41082 +static const struct ata_port_operations ipr_sata_ops = {
41083 .phy_reset = ipr_ata_phy_reset,
41084 .hardreset = ipr_sata_reset,
41085 .post_internal_cmd = ipr_ata_post_internal,
41086 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
41087 index 4e49fbc..97907ff 100644
41088 --- a/drivers/scsi/ips.h
41089 +++ b/drivers/scsi/ips.h
41090 @@ -1027,7 +1027,7 @@ typedef struct {
41091 int (*intr)(struct ips_ha *);
41092 void (*enableint)(struct ips_ha *);
41093 uint32_t (*statupd)(struct ips_ha *);
41094 -} ips_hw_func_t;
41095 +} __no_const ips_hw_func_t;
41096
41097 typedef struct ips_ha {
41098 uint8_t ha_id[IPS_MAX_CHANNELS+1];
41099 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
41100 index c1c1574..a9c9348 100644
41101 --- a/drivers/scsi/libfc/fc_exch.c
41102 +++ b/drivers/scsi/libfc/fc_exch.c
41103 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
41104 * all together if not used XXX
41105 */
41106 struct {
41107 - atomic_t no_free_exch;
41108 - atomic_t no_free_exch_xid;
41109 - atomic_t xid_not_found;
41110 - atomic_t xid_busy;
41111 - atomic_t seq_not_found;
41112 - atomic_t non_bls_resp;
41113 + atomic_unchecked_t no_free_exch;
41114 + atomic_unchecked_t no_free_exch_xid;
41115 + atomic_unchecked_t xid_not_found;
41116 + atomic_unchecked_t xid_busy;
41117 + atomic_unchecked_t seq_not_found;
41118 + atomic_unchecked_t non_bls_resp;
41119 } stats;
41120 };
41121 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
41122 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
41123 /* allocate memory for exchange */
41124 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
41125 if (!ep) {
41126 - atomic_inc(&mp->stats.no_free_exch);
41127 + atomic_inc_unchecked(&mp->stats.no_free_exch);
41128 goto out;
41129 }
41130 memset(ep, 0, sizeof(*ep));
41131 @@ -557,7 +557,7 @@ out:
41132 return ep;
41133 err:
41134 spin_unlock_bh(&pool->lock);
41135 - atomic_inc(&mp->stats.no_free_exch_xid);
41136 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
41137 mempool_free(ep, mp->ep_pool);
41138 return NULL;
41139 }
41140 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41141 xid = ntohs(fh->fh_ox_id); /* we originated exch */
41142 ep = fc_exch_find(mp, xid);
41143 if (!ep) {
41144 - atomic_inc(&mp->stats.xid_not_found);
41145 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41146 reject = FC_RJT_OX_ID;
41147 goto out;
41148 }
41149 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41150 ep = fc_exch_find(mp, xid);
41151 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
41152 if (ep) {
41153 - atomic_inc(&mp->stats.xid_busy);
41154 + atomic_inc_unchecked(&mp->stats.xid_busy);
41155 reject = FC_RJT_RX_ID;
41156 goto rel;
41157 }
41158 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41159 }
41160 xid = ep->xid; /* get our XID */
41161 } else if (!ep) {
41162 - atomic_inc(&mp->stats.xid_not_found);
41163 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41164 reject = FC_RJT_RX_ID; /* XID not found */
41165 goto out;
41166 }
41167 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41168 } else {
41169 sp = &ep->seq;
41170 if (sp->id != fh->fh_seq_id) {
41171 - atomic_inc(&mp->stats.seq_not_found);
41172 + atomic_inc_unchecked(&mp->stats.seq_not_found);
41173 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
41174 goto rel;
41175 }
41176 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41177
41178 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
41179 if (!ep) {
41180 - atomic_inc(&mp->stats.xid_not_found);
41181 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41182 goto out;
41183 }
41184 if (ep->esb_stat & ESB_ST_COMPLETE) {
41185 - atomic_inc(&mp->stats.xid_not_found);
41186 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41187 goto out;
41188 }
41189 if (ep->rxid == FC_XID_UNKNOWN)
41190 ep->rxid = ntohs(fh->fh_rx_id);
41191 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
41192 - atomic_inc(&mp->stats.xid_not_found);
41193 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41194 goto rel;
41195 }
41196 if (ep->did != ntoh24(fh->fh_s_id) &&
41197 ep->did != FC_FID_FLOGI) {
41198 - atomic_inc(&mp->stats.xid_not_found);
41199 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41200 goto rel;
41201 }
41202 sof = fr_sof(fp);
41203 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41204 } else {
41205 sp = &ep->seq;
41206 if (sp->id != fh->fh_seq_id) {
41207 - atomic_inc(&mp->stats.seq_not_found);
41208 + atomic_inc_unchecked(&mp->stats.seq_not_found);
41209 goto rel;
41210 }
41211 }
41212 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41213 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
41214
41215 if (!sp)
41216 - atomic_inc(&mp->stats.xid_not_found);
41217 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41218 else
41219 - atomic_inc(&mp->stats.non_bls_resp);
41220 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
41221
41222 fc_frame_free(fp);
41223 }
41224 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
41225 index 0ee989f..a582241 100644
41226 --- a/drivers/scsi/libsas/sas_ata.c
41227 +++ b/drivers/scsi/libsas/sas_ata.c
41228 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
41229 }
41230 }
41231
41232 -static struct ata_port_operations sas_sata_ops = {
41233 +static const struct ata_port_operations sas_sata_ops = {
41234 .phy_reset = sas_ata_phy_reset,
41235 .post_internal_cmd = sas_ata_post_internal,
41236 .qc_defer = ata_std_qc_defer,
41237 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
41238 index aa10f79..5cc79e4 100644
41239 --- a/drivers/scsi/lpfc/lpfc.h
41240 +++ b/drivers/scsi/lpfc/lpfc.h
41241 @@ -400,7 +400,7 @@ struct lpfc_vport {
41242 struct dentry *debug_nodelist;
41243 struct dentry *vport_debugfs_root;
41244 struct lpfc_debugfs_trc *disc_trc;
41245 - atomic_t disc_trc_cnt;
41246 + atomic_unchecked_t disc_trc_cnt;
41247 #endif
41248 uint8_t stat_data_enabled;
41249 uint8_t stat_data_blocked;
41250 @@ -725,8 +725,8 @@ struct lpfc_hba {
41251 struct timer_list fabric_block_timer;
41252 unsigned long bit_flags;
41253 #define FABRIC_COMANDS_BLOCKED 0
41254 - atomic_t num_rsrc_err;
41255 - atomic_t num_cmd_success;
41256 + atomic_unchecked_t num_rsrc_err;
41257 + atomic_unchecked_t num_cmd_success;
41258 unsigned long last_rsrc_error_time;
41259 unsigned long last_ramp_down_time;
41260 unsigned long last_ramp_up_time;
41261 @@ -740,7 +740,7 @@ struct lpfc_hba {
41262 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
41263 struct dentry *debug_slow_ring_trc;
41264 struct lpfc_debugfs_trc *slow_ring_trc;
41265 - atomic_t slow_ring_trc_cnt;
41266 + atomic_unchecked_t slow_ring_trc_cnt;
41267 #endif
41268
41269 /* Used for deferred freeing of ELS data buffers */
41270 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
41271 index 8d0f0de..7c77a62 100644
41272 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
41273 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
41274 @@ -124,7 +124,7 @@ struct lpfc_debug {
41275 int len;
41276 };
41277
41278 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41279 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41280 static unsigned long lpfc_debugfs_start_time = 0L;
41281
41282 /**
41283 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
41284 lpfc_debugfs_enable = 0;
41285
41286 len = 0;
41287 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
41288 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
41289 (lpfc_debugfs_max_disc_trc - 1);
41290 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
41291 dtp = vport->disc_trc + i;
41292 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
41293 lpfc_debugfs_enable = 0;
41294
41295 len = 0;
41296 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
41297 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
41298 (lpfc_debugfs_max_slow_ring_trc - 1);
41299 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
41300 dtp = phba->slow_ring_trc + i;
41301 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
41302 uint32_t *ptr;
41303 char buffer[1024];
41304
41305 + pax_track_stack();
41306 +
41307 off = 0;
41308 spin_lock_irq(&phba->hbalock);
41309
41310 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
41311 !vport || !vport->disc_trc)
41312 return;
41313
41314 - index = atomic_inc_return(&vport->disc_trc_cnt) &
41315 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
41316 (lpfc_debugfs_max_disc_trc - 1);
41317 dtp = vport->disc_trc + index;
41318 dtp->fmt = fmt;
41319 dtp->data1 = data1;
41320 dtp->data2 = data2;
41321 dtp->data3 = data3;
41322 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41323 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41324 dtp->jif = jiffies;
41325 #endif
41326 return;
41327 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
41328 !phba || !phba->slow_ring_trc)
41329 return;
41330
41331 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
41332 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
41333 (lpfc_debugfs_max_slow_ring_trc - 1);
41334 dtp = phba->slow_ring_trc + index;
41335 dtp->fmt = fmt;
41336 dtp->data1 = data1;
41337 dtp->data2 = data2;
41338 dtp->data3 = data3;
41339 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41340 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41341 dtp->jif = jiffies;
41342 #endif
41343 return;
41344 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41345 "slow_ring buffer\n");
41346 goto debug_failed;
41347 }
41348 - atomic_set(&phba->slow_ring_trc_cnt, 0);
41349 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
41350 memset(phba->slow_ring_trc, 0,
41351 (sizeof(struct lpfc_debugfs_trc) *
41352 lpfc_debugfs_max_slow_ring_trc));
41353 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41354 "buffer\n");
41355 goto debug_failed;
41356 }
41357 - atomic_set(&vport->disc_trc_cnt, 0);
41358 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
41359
41360 snprintf(name, sizeof(name), "discovery_trace");
41361 vport->debug_disc_trc =
41362 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
41363 index 549bc7d..8189dbb 100644
41364 --- a/drivers/scsi/lpfc/lpfc_init.c
41365 +++ b/drivers/scsi/lpfc/lpfc_init.c
41366 @@ -8021,8 +8021,10 @@ lpfc_init(void)
41367 printk(LPFC_COPYRIGHT "\n");
41368
41369 if (lpfc_enable_npiv) {
41370 - lpfc_transport_functions.vport_create = lpfc_vport_create;
41371 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41372 + pax_open_kernel();
41373 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
41374 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41375 + pax_close_kernel();
41376 }
41377 lpfc_transport_template =
41378 fc_attach_transport(&lpfc_transport_functions);
41379 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
41380 index c88f59f..ff2a42f 100644
41381 --- a/drivers/scsi/lpfc/lpfc_scsi.c
41382 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
41383 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
41384 uint32_t evt_posted;
41385
41386 spin_lock_irqsave(&phba->hbalock, flags);
41387 - atomic_inc(&phba->num_rsrc_err);
41388 + atomic_inc_unchecked(&phba->num_rsrc_err);
41389 phba->last_rsrc_error_time = jiffies;
41390
41391 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
41392 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
41393 unsigned long flags;
41394 struct lpfc_hba *phba = vport->phba;
41395 uint32_t evt_posted;
41396 - atomic_inc(&phba->num_cmd_success);
41397 + atomic_inc_unchecked(&phba->num_cmd_success);
41398
41399 if (vport->cfg_lun_queue_depth <= queue_depth)
41400 return;
41401 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41402 int i;
41403 struct lpfc_rport_data *rdata;
41404
41405 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
41406 - num_cmd_success = atomic_read(&phba->num_cmd_success);
41407 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
41408 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
41409
41410 vports = lpfc_create_vport_work_array(phba);
41411 if (vports != NULL)
41412 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41413 }
41414 }
41415 lpfc_destroy_vport_work_array(phba, vports);
41416 - atomic_set(&phba->num_rsrc_err, 0);
41417 - atomic_set(&phba->num_cmd_success, 0);
41418 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
41419 + atomic_set_unchecked(&phba->num_cmd_success, 0);
41420 }
41421
41422 /**
41423 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
41424 }
41425 }
41426 lpfc_destroy_vport_work_array(phba, vports);
41427 - atomic_set(&phba->num_rsrc_err, 0);
41428 - atomic_set(&phba->num_cmd_success, 0);
41429 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
41430 + atomic_set_unchecked(&phba->num_cmd_success, 0);
41431 }
41432
41433 /**
41434 diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
41435 index 234f0b7..3020aea 100644
41436 --- a/drivers/scsi/megaraid/megaraid_mbox.c
41437 +++ b/drivers/scsi/megaraid/megaraid_mbox.c
41438 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
41439 int rval;
41440 int i;
41441
41442 + pax_track_stack();
41443 +
41444 // Allocate memory for the base list of scb for management module.
41445 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
41446
41447 diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
41448 index 7a117c1..ee01e9e 100644
41449 --- a/drivers/scsi/osd/osd_initiator.c
41450 +++ b/drivers/scsi/osd/osd_initiator.c
41451 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
41452 int nelem = ARRAY_SIZE(get_attrs), a = 0;
41453 int ret;
41454
41455 + pax_track_stack();
41456 +
41457 or = osd_start_request(od, GFP_KERNEL);
41458 if (!or)
41459 return -ENOMEM;
41460 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
41461 index 9ab8c86..9425ad3 100644
41462 --- a/drivers/scsi/pmcraid.c
41463 +++ b/drivers/scsi/pmcraid.c
41464 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
41465 res->scsi_dev = scsi_dev;
41466 scsi_dev->hostdata = res;
41467 res->change_detected = 0;
41468 - atomic_set(&res->read_failures, 0);
41469 - atomic_set(&res->write_failures, 0);
41470 + atomic_set_unchecked(&res->read_failures, 0);
41471 + atomic_set_unchecked(&res->write_failures, 0);
41472 rc = 0;
41473 }
41474 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
41475 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
41476
41477 /* If this was a SCSI read/write command keep count of errors */
41478 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
41479 - atomic_inc(&res->read_failures);
41480 + atomic_inc_unchecked(&res->read_failures);
41481 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
41482 - atomic_inc(&res->write_failures);
41483 + atomic_inc_unchecked(&res->write_failures);
41484
41485 if (!RES_IS_GSCSI(res->cfg_entry) &&
41486 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
41487 @@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
41488
41489 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
41490 /* add resources only after host is added into system */
41491 - if (!atomic_read(&pinstance->expose_resources))
41492 + if (!atomic_read_unchecked(&pinstance->expose_resources))
41493 return;
41494
41495 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
41496 @@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
41497 init_waitqueue_head(&pinstance->reset_wait_q);
41498
41499 atomic_set(&pinstance->outstanding_cmds, 0);
41500 - atomic_set(&pinstance->expose_resources, 0);
41501 + atomic_set_unchecked(&pinstance->expose_resources, 0);
41502
41503 INIT_LIST_HEAD(&pinstance->free_res_q);
41504 INIT_LIST_HEAD(&pinstance->used_res_q);
41505 @@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
41506 /* Schedule worker thread to handle CCN and take care of adding and
41507 * removing devices to OS
41508 */
41509 - atomic_set(&pinstance->expose_resources, 1);
41510 + atomic_set_unchecked(&pinstance->expose_resources, 1);
41511 schedule_work(&pinstance->worker_q);
41512 return rc;
41513
41514 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
41515 index 3441b3f..6cbe8f7 100644
41516 --- a/drivers/scsi/pmcraid.h
41517 +++ b/drivers/scsi/pmcraid.h
41518 @@ -690,7 +690,7 @@ struct pmcraid_instance {
41519 atomic_t outstanding_cmds;
41520
41521 /* should add/delete resources to mid-layer now ?*/
41522 - atomic_t expose_resources;
41523 + atomic_unchecked_t expose_resources;
41524
41525 /* Tasklet to handle deferred processing */
41526 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
41527 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
41528 struct list_head queue; /* link to "to be exposed" resources */
41529 struct pmcraid_config_table_entry cfg_entry;
41530 struct scsi_device *scsi_dev; /* Link scsi_device structure */
41531 - atomic_t read_failures; /* count of failed READ commands */
41532 - atomic_t write_failures; /* count of failed WRITE commands */
41533 + atomic_unchecked_t read_failures; /* count of failed READ commands */
41534 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
41535
41536 /* To indicate add/delete/modify during CCN */
41537 u8 change_detected;
41538 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
41539 index 2150618..7034215 100644
41540 --- a/drivers/scsi/qla2xxx/qla_def.h
41541 +++ b/drivers/scsi/qla2xxx/qla_def.h
41542 @@ -2089,7 +2089,7 @@ struct isp_operations {
41543
41544 int (*get_flash_version) (struct scsi_qla_host *, void *);
41545 int (*start_scsi) (srb_t *);
41546 -};
41547 +} __no_const;
41548
41549 /* MSI-X Support *************************************************************/
41550
41551 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
41552 index 81b5f29..2ae1fad 100644
41553 --- a/drivers/scsi/qla4xxx/ql4_def.h
41554 +++ b/drivers/scsi/qla4xxx/ql4_def.h
41555 @@ -240,7 +240,7 @@ struct ddb_entry {
41556 atomic_t retry_relogin_timer; /* Min Time between relogins
41557 * (4000 only) */
41558 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
41559 - atomic_t relogin_retry_count; /* Num of times relogin has been
41560 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
41561 * retried */
41562
41563 uint16_t port;
41564 diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
41565 index af8c323..515dd51 100644
41566 --- a/drivers/scsi/qla4xxx/ql4_init.c
41567 +++ b/drivers/scsi/qla4xxx/ql4_init.c
41568 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
41569 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
41570 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
41571 atomic_set(&ddb_entry->relogin_timer, 0);
41572 - atomic_set(&ddb_entry->relogin_retry_count, 0);
41573 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41574 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41575 list_add_tail(&ddb_entry->list, &ha->ddb_list);
41576 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
41577 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
41578 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41579 atomic_set(&ddb_entry->port_down_timer,
41580 ha->port_down_retry_count);
41581 - atomic_set(&ddb_entry->relogin_retry_count, 0);
41582 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41583 atomic_set(&ddb_entry->relogin_timer, 0);
41584 clear_bit(DF_RELOGIN, &ddb_entry->flags);
41585 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
41586 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
41587 index 83c8b5e..a82b348 100644
41588 --- a/drivers/scsi/qla4xxx/ql4_os.c
41589 +++ b/drivers/scsi/qla4xxx/ql4_os.c
41590 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
41591 ddb_entry->fw_ddb_device_state ==
41592 DDB_DS_SESSION_FAILED) {
41593 /* Reset retry relogin timer */
41594 - atomic_inc(&ddb_entry->relogin_retry_count);
41595 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
41596 DEBUG2(printk("scsi%ld: index[%d] relogin"
41597 " timed out-retrying"
41598 " relogin (%d)\n",
41599 ha->host_no,
41600 ddb_entry->fw_ddb_index,
41601 - atomic_read(&ddb_entry->
41602 + atomic_read_unchecked(&ddb_entry->
41603 relogin_retry_count))
41604 );
41605 start_dpc++;
41606 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
41607 index dd098ca..686ce01 100644
41608 --- a/drivers/scsi/scsi.c
41609 +++ b/drivers/scsi/scsi.c
41610 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
41611 unsigned long timeout;
41612 int rtn = 0;
41613
41614 - atomic_inc(&cmd->device->iorequest_cnt);
41615 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41616
41617 /* check if the device is still usable */
41618 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
41619 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
41620 index bc3e363..e1a8e50 100644
41621 --- a/drivers/scsi/scsi_debug.c
41622 +++ b/drivers/scsi/scsi_debug.c
41623 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
41624 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
41625 unsigned char *cmd = (unsigned char *)scp->cmnd;
41626
41627 + pax_track_stack();
41628 +
41629 if ((errsts = check_readiness(scp, 1, devip)))
41630 return errsts;
41631 memset(arr, 0, sizeof(arr));
41632 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
41633 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
41634 unsigned char *cmd = (unsigned char *)scp->cmnd;
41635
41636 + pax_track_stack();
41637 +
41638 if ((errsts = check_readiness(scp, 1, devip)))
41639 return errsts;
41640 memset(arr, 0, sizeof(arr));
41641 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
41642 index 8df12522..c4c1472 100644
41643 --- a/drivers/scsi/scsi_lib.c
41644 +++ b/drivers/scsi/scsi_lib.c
41645 @@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
41646 shost = sdev->host;
41647 scsi_init_cmd_errh(cmd);
41648 cmd->result = DID_NO_CONNECT << 16;
41649 - atomic_inc(&cmd->device->iorequest_cnt);
41650 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41651
41652 /*
41653 * SCSI request completion path will do scsi_device_unbusy(),
41654 @@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq)
41655 */
41656 cmd->serial_number = 0;
41657
41658 - atomic_inc(&cmd->device->iodone_cnt);
41659 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
41660 if (cmd->result)
41661 - atomic_inc(&cmd->device->ioerr_cnt);
41662 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
41663
41664 disposition = scsi_decide_disposition(cmd);
41665 if (disposition != SUCCESS &&
41666 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
41667 index 91a93e0..eae0fe3 100644
41668 --- a/drivers/scsi/scsi_sysfs.c
41669 +++ b/drivers/scsi/scsi_sysfs.c
41670 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
41671 char *buf) \
41672 { \
41673 struct scsi_device *sdev = to_scsi_device(dev); \
41674 - unsigned long long count = atomic_read(&sdev->field); \
41675 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
41676 return snprintf(buf, 20, "0x%llx\n", count); \
41677 } \
41678 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
41679 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
41680 index 1030327..f91fd30 100644
41681 --- a/drivers/scsi/scsi_tgt_lib.c
41682 +++ b/drivers/scsi/scsi_tgt_lib.c
41683 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
41684 int err;
41685
41686 dprintk("%lx %u\n", uaddr, len);
41687 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
41688 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
41689 if (err) {
41690 /*
41691 * TODO: need to fixup sg_tablesize, max_segment_size,
41692 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
41693 index db02e31..1b42ea9 100644
41694 --- a/drivers/scsi/scsi_transport_fc.c
41695 +++ b/drivers/scsi/scsi_transport_fc.c
41696 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
41697 * Netlink Infrastructure
41698 */
41699
41700 -static atomic_t fc_event_seq;
41701 +static atomic_unchecked_t fc_event_seq;
41702
41703 /**
41704 * fc_get_event_number - Obtain the next sequential FC event number
41705 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
41706 u32
41707 fc_get_event_number(void)
41708 {
41709 - return atomic_add_return(1, &fc_event_seq);
41710 + return atomic_add_return_unchecked(1, &fc_event_seq);
41711 }
41712 EXPORT_SYMBOL(fc_get_event_number);
41713
41714 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
41715 {
41716 int error;
41717
41718 - atomic_set(&fc_event_seq, 0);
41719 + atomic_set_unchecked(&fc_event_seq, 0);
41720
41721 error = transport_class_register(&fc_host_class);
41722 if (error)
41723 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
41724 index de2f8c4..63c5278 100644
41725 --- a/drivers/scsi/scsi_transport_iscsi.c
41726 +++ b/drivers/scsi/scsi_transport_iscsi.c
41727 @@ -81,7 +81,7 @@ struct iscsi_internal {
41728 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
41729 };
41730
41731 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
41732 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
41733 static struct workqueue_struct *iscsi_eh_timer_workq;
41734
41735 /*
41736 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
41737 int err;
41738
41739 ihost = shost->shost_data;
41740 - session->sid = atomic_add_return(1, &iscsi_session_nr);
41741 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
41742
41743 if (id == ISCSI_MAX_TARGET) {
41744 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
41745 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
41746 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
41747 ISCSI_TRANSPORT_VERSION);
41748
41749 - atomic_set(&iscsi_session_nr, 0);
41750 + atomic_set_unchecked(&iscsi_session_nr, 0);
41751
41752 err = class_register(&iscsi_transport_class);
41753 if (err)
41754 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
41755 index 21a045e..ec89e03 100644
41756 --- a/drivers/scsi/scsi_transport_srp.c
41757 +++ b/drivers/scsi/scsi_transport_srp.c
41758 @@ -33,7 +33,7 @@
41759 #include "scsi_transport_srp_internal.h"
41760
41761 struct srp_host_attrs {
41762 - atomic_t next_port_id;
41763 + atomic_unchecked_t next_port_id;
41764 };
41765 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
41766
41767 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
41768 struct Scsi_Host *shost = dev_to_shost(dev);
41769 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
41770
41771 - atomic_set(&srp_host->next_port_id, 0);
41772 + atomic_set_unchecked(&srp_host->next_port_id, 0);
41773 return 0;
41774 }
41775
41776 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
41777 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
41778 rport->roles = ids->roles;
41779
41780 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
41781 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
41782 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
41783
41784 transport_setup_device(&rport->dev);
41785 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
41786 index 040f751..98a5ed2 100644
41787 --- a/drivers/scsi/sg.c
41788 +++ b/drivers/scsi/sg.c
41789 @@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
41790 sdp->disk->disk_name,
41791 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
41792 NULL,
41793 - (char *)arg);
41794 + (char __user *)arg);
41795 case BLKTRACESTART:
41796 return blk_trace_startstop(sdp->device->request_queue, 1);
41797 case BLKTRACESTOP:
41798 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
41799 const struct file_operations * fops;
41800 };
41801
41802 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
41803 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
41804 {"allow_dio", &adio_fops},
41805 {"debug", &debug_fops},
41806 {"def_reserved_size", &dressz_fops},
41807 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
41808 {
41809 int k, mask;
41810 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
41811 - struct sg_proc_leaf * leaf;
41812 + const struct sg_proc_leaf * leaf;
41813
41814 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
41815 if (!sg_proc_sgp)
41816 diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
41817 index c19ca5e..3eb5959 100644
41818 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c
41819 +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
41820 @@ -1758,6 +1758,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
41821 int do_iounmap = 0;
41822 int do_disable_device = 1;
41823
41824 + pax_track_stack();
41825 +
41826 memset(&sym_dev, 0, sizeof(sym_dev));
41827 memset(&nvram, 0, sizeof(nvram));
41828 sym_dev.pdev = pdev;
41829 diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
41830 index eadc1ab..2d81457 100644
41831 --- a/drivers/serial/kgdboc.c
41832 +++ b/drivers/serial/kgdboc.c
41833 @@ -18,7 +18,7 @@
41834
41835 #define MAX_CONFIG_LEN 40
41836
41837 -static struct kgdb_io kgdboc_io_ops;
41838 +static const struct kgdb_io kgdboc_io_ops;
41839
41840 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
41841 static int configured = -1;
41842 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
41843 module_put(THIS_MODULE);
41844 }
41845
41846 -static struct kgdb_io kgdboc_io_ops = {
41847 +static const struct kgdb_io kgdboc_io_ops = {
41848 .name = "kgdboc",
41849 .read_char = kgdboc_get_char,
41850 .write_char = kgdboc_put_char,
41851 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
41852 index b76f246..7f41af7 100644
41853 --- a/drivers/spi/spi.c
41854 +++ b/drivers/spi/spi.c
41855 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
41856 EXPORT_SYMBOL_GPL(spi_sync);
41857
41858 /* portable code must never pass more than 32 bytes */
41859 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
41860 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
41861
41862 static u8 *buf;
41863
41864 diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
41865 index b9b37ff..19dfa23 100644
41866 --- a/drivers/staging/android/binder.c
41867 +++ b/drivers/staging/android/binder.c
41868 @@ -2761,7 +2761,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
41869 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
41870 }
41871
41872 -static struct vm_operations_struct binder_vm_ops = {
41873 +static const struct vm_operations_struct binder_vm_ops = {
41874 .open = binder_vma_open,
41875 .close = binder_vma_close,
41876 };
41877 diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
41878 index cda26bb..39fed3f 100644
41879 --- a/drivers/staging/b3dfg/b3dfg.c
41880 +++ b/drivers/staging/b3dfg/b3dfg.c
41881 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
41882 return VM_FAULT_NOPAGE;
41883 }
41884
41885 -static struct vm_operations_struct b3dfg_vm_ops = {
41886 +static const struct vm_operations_struct b3dfg_vm_ops = {
41887 .fault = b3dfg_vma_fault,
41888 };
41889
41890 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
41891 return r;
41892 }
41893
41894 -static struct file_operations b3dfg_fops = {
41895 +static const struct file_operations b3dfg_fops = {
41896 .owner = THIS_MODULE,
41897 .open = b3dfg_open,
41898 .release = b3dfg_release,
41899 diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
41900 index 908f25a..c9a579b 100644
41901 --- a/drivers/staging/comedi/comedi_fops.c
41902 +++ b/drivers/staging/comedi/comedi_fops.c
41903 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
41904 mutex_unlock(&dev->mutex);
41905 }
41906
41907 -static struct vm_operations_struct comedi_vm_ops = {
41908 +static const struct vm_operations_struct comedi_vm_ops = {
41909 .close = comedi_unmap,
41910 };
41911
41912 diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
41913 index e55a0db..577b776 100644
41914 --- a/drivers/staging/dream/qdsp5/adsp_driver.c
41915 +++ b/drivers/staging/dream/qdsp5/adsp_driver.c
41916 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
41917 static dev_t adsp_devno;
41918 static struct class *adsp_class;
41919
41920 -static struct file_operations adsp_fops = {
41921 +static const struct file_operations adsp_fops = {
41922 .owner = THIS_MODULE,
41923 .open = adsp_open,
41924 .unlocked_ioctl = adsp_ioctl,
41925 diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
41926 index ad2390f..4116ee8 100644
41927 --- a/drivers/staging/dream/qdsp5/audio_aac.c
41928 +++ b/drivers/staging/dream/qdsp5/audio_aac.c
41929 @@ -1022,7 +1022,7 @@ done:
41930 return rc;
41931 }
41932
41933 -static struct file_operations audio_aac_fops = {
41934 +static const struct file_operations audio_aac_fops = {
41935 .owner = THIS_MODULE,
41936 .open = audio_open,
41937 .release = audio_release,
41938 diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
41939 index cd818a5..870b37b 100644
41940 --- a/drivers/staging/dream/qdsp5/audio_amrnb.c
41941 +++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
41942 @@ -833,7 +833,7 @@ done:
41943 return rc;
41944 }
41945
41946 -static struct file_operations audio_amrnb_fops = {
41947 +static const struct file_operations audio_amrnb_fops = {
41948 .owner = THIS_MODULE,
41949 .open = audamrnb_open,
41950 .release = audamrnb_release,
41951 diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
41952 index 4b43e18..cedafda 100644
41953 --- a/drivers/staging/dream/qdsp5/audio_evrc.c
41954 +++ b/drivers/staging/dream/qdsp5/audio_evrc.c
41955 @@ -805,7 +805,7 @@ dma_fail:
41956 return rc;
41957 }
41958
41959 -static struct file_operations audio_evrc_fops = {
41960 +static const struct file_operations audio_evrc_fops = {
41961 .owner = THIS_MODULE,
41962 .open = audevrc_open,
41963 .release = audevrc_release,
41964 diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
41965 index 3d950a2..9431118 100644
41966 --- a/drivers/staging/dream/qdsp5/audio_in.c
41967 +++ b/drivers/staging/dream/qdsp5/audio_in.c
41968 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
41969 return 0;
41970 }
41971
41972 -static struct file_operations audio_fops = {
41973 +static const struct file_operations audio_fops = {
41974 .owner = THIS_MODULE,
41975 .open = audio_in_open,
41976 .release = audio_in_release,
41977 @@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
41978 .unlocked_ioctl = audio_in_ioctl,
41979 };
41980
41981 -static struct file_operations audpre_fops = {
41982 +static const struct file_operations audpre_fops = {
41983 .owner = THIS_MODULE,
41984 .open = audpre_open,
41985 .unlocked_ioctl = audpre_ioctl,
41986 diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
41987 index b95574f..286c2f4 100644
41988 --- a/drivers/staging/dream/qdsp5/audio_mp3.c
41989 +++ b/drivers/staging/dream/qdsp5/audio_mp3.c
41990 @@ -941,7 +941,7 @@ done:
41991 return rc;
41992 }
41993
41994 -static struct file_operations audio_mp3_fops = {
41995 +static const struct file_operations audio_mp3_fops = {
41996 .owner = THIS_MODULE,
41997 .open = audio_open,
41998 .release = audio_release,
41999 diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
42000 index d1adcf6..f8f9833 100644
42001 --- a/drivers/staging/dream/qdsp5/audio_out.c
42002 +++ b/drivers/staging/dream/qdsp5/audio_out.c
42003 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
42004 return 0;
42005 }
42006
42007 -static struct file_operations audio_fops = {
42008 +static const struct file_operations audio_fops = {
42009 .owner = THIS_MODULE,
42010 .open = audio_open,
42011 .release = audio_release,
42012 @@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
42013 .unlocked_ioctl = audio_ioctl,
42014 };
42015
42016 -static struct file_operations audpp_fops = {
42017 +static const struct file_operations audpp_fops = {
42018 .owner = THIS_MODULE,
42019 .open = audpp_open,
42020 .unlocked_ioctl = audpp_ioctl,
42021 diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
42022 index f0f50e3..f6b9dbc 100644
42023 --- a/drivers/staging/dream/qdsp5/audio_qcelp.c
42024 +++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
42025 @@ -816,7 +816,7 @@ err:
42026 return rc;
42027 }
42028
42029 -static struct file_operations audio_qcelp_fops = {
42030 +static const struct file_operations audio_qcelp_fops = {
42031 .owner = THIS_MODULE,
42032 .open = audqcelp_open,
42033 .release = audqcelp_release,
42034 diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
42035 index 037d7ff..5469ec3 100644
42036 --- a/drivers/staging/dream/qdsp5/snd.c
42037 +++ b/drivers/staging/dream/qdsp5/snd.c
42038 @@ -242,7 +242,7 @@ err:
42039 return rc;
42040 }
42041
42042 -static struct file_operations snd_fops = {
42043 +static const struct file_operations snd_fops = {
42044 .owner = THIS_MODULE,
42045 .open = snd_open,
42046 .release = snd_release,
42047 diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
42048 index d4e7d88..0ea632a 100644
42049 --- a/drivers/staging/dream/smd/smd_qmi.c
42050 +++ b/drivers/staging/dream/smd/smd_qmi.c
42051 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
42052 return 0;
42053 }
42054
42055 -static struct file_operations qmi_fops = {
42056 +static const struct file_operations qmi_fops = {
42057 .owner = THIS_MODULE,
42058 .read = qmi_read,
42059 .write = qmi_write,
42060 diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
42061 index cd3910b..ff053d3 100644
42062 --- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
42063 +++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
42064 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
42065 return rc;
42066 }
42067
42068 -static struct file_operations rpcrouter_server_fops = {
42069 +static const struct file_operations rpcrouter_server_fops = {
42070 .owner = THIS_MODULE,
42071 .open = rpcrouter_open,
42072 .release = rpcrouter_release,
42073 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
42074 .unlocked_ioctl = rpcrouter_ioctl,
42075 };
42076
42077 -static struct file_operations rpcrouter_router_fops = {
42078 +static const struct file_operations rpcrouter_router_fops = {
42079 .owner = THIS_MODULE,
42080 .open = rpcrouter_open,
42081 .release = rpcrouter_release,
42082 diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
42083 index c24e4e0..07665be 100644
42084 --- a/drivers/staging/dst/dcore.c
42085 +++ b/drivers/staging/dst/dcore.c
42086 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
42087 return 0;
42088 }
42089
42090 -static struct block_device_operations dst_blk_ops = {
42091 +static const struct block_device_operations dst_blk_ops = {
42092 .open = dst_bdev_open,
42093 .release = dst_bdev_release,
42094 .owner = THIS_MODULE,
42095 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
42096 n->size = ctl->size;
42097
42098 atomic_set(&n->refcnt, 1);
42099 - atomic_long_set(&n->gen, 0);
42100 + atomic_long_set_unchecked(&n->gen, 0);
42101 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
42102
42103 err = dst_node_sysfs_init(n);
42104 diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
42105 index 557d372..8d84422 100644
42106 --- a/drivers/staging/dst/trans.c
42107 +++ b/drivers/staging/dst/trans.c
42108 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
42109 t->error = 0;
42110 t->retries = 0;
42111 atomic_set(&t->refcnt, 1);
42112 - t->gen = atomic_long_inc_return(&n->gen);
42113 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
42114
42115 t->enc = bio_data_dir(bio);
42116 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
42117 diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
42118 index 94f7752..d051514 100644
42119 --- a/drivers/staging/et131x/et1310_tx.c
42120 +++ b/drivers/staging/et131x/et1310_tx.c
42121 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
42122 struct net_device_stats *stats = &etdev->net_stats;
42123
42124 if (pMpTcb->Flags & fMP_DEST_BROAD)
42125 - atomic_inc(&etdev->Stats.brdcstxmt);
42126 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
42127 else if (pMpTcb->Flags & fMP_DEST_MULTI)
42128 - atomic_inc(&etdev->Stats.multixmt);
42129 + atomic_inc_unchecked(&etdev->Stats.multixmt);
42130 else
42131 - atomic_inc(&etdev->Stats.unixmt);
42132 + atomic_inc_unchecked(&etdev->Stats.unixmt);
42133
42134 if (pMpTcb->Packet) {
42135 stats->tx_bytes += pMpTcb->Packet->len;
42136 diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
42137 index 1dfe06f..f469b4d 100644
42138 --- a/drivers/staging/et131x/et131x_adapter.h
42139 +++ b/drivers/staging/et131x/et131x_adapter.h
42140 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
42141 * operations
42142 */
42143 u32 unircv; /* # multicast packets received */
42144 - atomic_t unixmt; /* # multicast packets for Tx */
42145 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
42146 u32 multircv; /* # multicast packets received */
42147 - atomic_t multixmt; /* # multicast packets for Tx */
42148 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
42149 u32 brdcstrcv; /* # broadcast packets received */
42150 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
42151 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
42152 u32 norcvbuf; /* # Rx packets discarded */
42153 u32 noxmtbuf; /* # Tx packets discarded */
42154
42155 diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
42156 index 4bd353a..e28f455 100644
42157 --- a/drivers/staging/go7007/go7007-v4l2.c
42158 +++ b/drivers/staging/go7007/go7007-v4l2.c
42159 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42160 return 0;
42161 }
42162
42163 -static struct vm_operations_struct go7007_vm_ops = {
42164 +static const struct vm_operations_struct go7007_vm_ops = {
42165 .open = go7007_vm_open,
42166 .close = go7007_vm_close,
42167 .fault = go7007_vm_fault,
42168 diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
42169 index 366dc95..b974d87 100644
42170 --- a/drivers/staging/hv/Channel.c
42171 +++ b/drivers/staging/hv/Channel.c
42172 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
42173
42174 DPRINT_ENTER(VMBUS);
42175
42176 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
42177 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
42178 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
42179 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
42180
42181 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
42182 ASSERT(msgInfo != NULL);
42183 diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
42184 index b12237f..01ae28a 100644
42185 --- a/drivers/staging/hv/Hv.c
42186 +++ b/drivers/staging/hv/Hv.c
42187 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
42188 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
42189 u32 outputAddressHi = outputAddress >> 32;
42190 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
42191 - volatile void *hypercallPage = gHvContext.HypercallPage;
42192 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
42193
42194 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
42195 Control, Input, Output);
42196 diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
42197 index d089bb1..2ebc158 100644
42198 --- a/drivers/staging/hv/VmbusApi.h
42199 +++ b/drivers/staging/hv/VmbusApi.h
42200 @@ -109,7 +109,7 @@ struct vmbus_channel_interface {
42201 u32 *GpadlHandle);
42202 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
42203 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
42204 -};
42205 +} __no_const;
42206
42207 /* Base driver object */
42208 struct hv_driver {
42209 diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
42210 index 5a37cce..6ecc88c 100644
42211 --- a/drivers/staging/hv/VmbusPrivate.h
42212 +++ b/drivers/staging/hv/VmbusPrivate.h
42213 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
42214 struct VMBUS_CONNECTION {
42215 enum VMBUS_CONNECT_STATE ConnectState;
42216
42217 - atomic_t NextGpadlHandle;
42218 + atomic_unchecked_t NextGpadlHandle;
42219
42220 /*
42221 * Represents channel interrupts. Each bit position represents a
42222 diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
42223 index 871a202..ca50ddf 100644
42224 --- a/drivers/staging/hv/blkvsc_drv.c
42225 +++ b/drivers/staging/hv/blkvsc_drv.c
42226 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
42227 /* The one and only one */
42228 static struct blkvsc_driver_context g_blkvsc_drv;
42229
42230 -static struct block_device_operations block_ops = {
42231 +static const struct block_device_operations block_ops = {
42232 .owner = THIS_MODULE,
42233 .open = blkvsc_open,
42234 .release = blkvsc_release,
42235 diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
42236 index 6acc49a..fbc8d46 100644
42237 --- a/drivers/staging/hv/vmbus_drv.c
42238 +++ b/drivers/staging/hv/vmbus_drv.c
42239 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
42240 to_device_context(root_device_obj);
42241 struct device_context *child_device_ctx =
42242 to_device_context(child_device_obj);
42243 - static atomic_t device_num = ATOMIC_INIT(0);
42244 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42245
42246 DPRINT_ENTER(VMBUS_DRV);
42247
42248 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
42249
42250 /* Set the device name. Otherwise, device_register() will fail. */
42251 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
42252 - atomic_inc_return(&device_num));
42253 + atomic_inc_return_unchecked(&device_num));
42254
42255 /* The new device belongs to this bus */
42256 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
42257 diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
42258 index d926189..17b19fd 100644
42259 --- a/drivers/staging/iio/ring_generic.h
42260 +++ b/drivers/staging/iio/ring_generic.h
42261 @@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
42262
42263 int (*is_enabled)(struct iio_ring_buffer *ring);
42264 int (*enable)(struct iio_ring_buffer *ring);
42265 -};
42266 +} __no_const;
42267
42268 /**
42269 * struct iio_ring_buffer - general ring buffer structure
42270 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
42271 index 1b237b7..88c624e 100644
42272 --- a/drivers/staging/octeon/ethernet-rx.c
42273 +++ b/drivers/staging/octeon/ethernet-rx.c
42274 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42275 /* Increment RX stats for virtual ports */
42276 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
42277 #ifdef CONFIG_64BIT
42278 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
42279 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
42280 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
42281 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
42282 #else
42283 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
42284 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
42285 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
42286 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
42287 #endif
42288 }
42289 netif_receive_skb(skb);
42290 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42291 dev->name);
42292 */
42293 #ifdef CONFIG_64BIT
42294 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
42295 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
42296 #else
42297 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
42298 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
42299 #endif
42300 dev_kfree_skb_irq(skb);
42301 }
42302 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
42303 index 492c502..d9909f1 100644
42304 --- a/drivers/staging/octeon/ethernet.c
42305 +++ b/drivers/staging/octeon/ethernet.c
42306 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
42307 * since the RX tasklet also increments it.
42308 */
42309 #ifdef CONFIG_64BIT
42310 - atomic64_add(rx_status.dropped_packets,
42311 - (atomic64_t *)&priv->stats.rx_dropped);
42312 + atomic64_add_unchecked(rx_status.dropped_packets,
42313 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42314 #else
42315 - atomic_add(rx_status.dropped_packets,
42316 - (atomic_t *)&priv->stats.rx_dropped);
42317 + atomic_add_unchecked(rx_status.dropped_packets,
42318 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
42319 #endif
42320 }
42321
42322 diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
42323 index a35bd5d..28fff45 100644
42324 --- a/drivers/staging/otus/80211core/pub_zfi.h
42325 +++ b/drivers/staging/otus/80211core/pub_zfi.h
42326 @@ -531,7 +531,7 @@ struct zsCbFuncTbl
42327 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
42328
42329 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
42330 -};
42331 +} __no_const;
42332
42333 extern void zfZeroMemory(u8_t* va, u16_t length);
42334 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
42335 diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
42336 index c39a25f..696f5aa 100644
42337 --- a/drivers/staging/panel/panel.c
42338 +++ b/drivers/staging/panel/panel.c
42339 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
42340 return 0;
42341 }
42342
42343 -static struct file_operations lcd_fops = {
42344 +static const struct file_operations lcd_fops = {
42345 .write = lcd_write,
42346 .open = lcd_open,
42347 .release = lcd_release,
42348 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
42349 return 0;
42350 }
42351
42352 -static struct file_operations keypad_fops = {
42353 +static const struct file_operations keypad_fops = {
42354 .read = keypad_read, /* read */
42355 .open = keypad_open, /* open */
42356 .release = keypad_release, /* close */
42357 diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
42358 index 270ebcb..37e46af 100644
42359 --- a/drivers/staging/phison/phison.c
42360 +++ b/drivers/staging/phison/phison.c
42361 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
42362 ATA_BMDMA_SHT(DRV_NAME),
42363 };
42364
42365 -static struct ata_port_operations phison_ops = {
42366 +static const struct ata_port_operations phison_ops = {
42367 .inherits = &ata_bmdma_port_ops,
42368 .prereset = phison_pre_reset,
42369 };
42370 diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
42371 index 2eb8e3d..57616a7 100644
42372 --- a/drivers/staging/poch/poch.c
42373 +++ b/drivers/staging/poch/poch.c
42374 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
42375 return 0;
42376 }
42377
42378 -static struct file_operations poch_fops = {
42379 +static const struct file_operations poch_fops = {
42380 .owner = THIS_MODULE,
42381 .open = poch_open,
42382 .release = poch_release,
42383 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
42384 index c94de31..19402bc 100644
42385 --- a/drivers/staging/pohmelfs/inode.c
42386 +++ b/drivers/staging/pohmelfs/inode.c
42387 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42388 mutex_init(&psb->mcache_lock);
42389 psb->mcache_root = RB_ROOT;
42390 psb->mcache_timeout = msecs_to_jiffies(5000);
42391 - atomic_long_set(&psb->mcache_gen, 0);
42392 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
42393
42394 psb->trans_max_pages = 100;
42395
42396 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42397 INIT_LIST_HEAD(&psb->crypto_ready_list);
42398 INIT_LIST_HEAD(&psb->crypto_active_list);
42399
42400 - atomic_set(&psb->trans_gen, 1);
42401 + atomic_set_unchecked(&psb->trans_gen, 1);
42402 atomic_long_set(&psb->total_inodes, 0);
42403
42404 mutex_init(&psb->state_lock);
42405 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
42406 index e22665c..a2a9390 100644
42407 --- a/drivers/staging/pohmelfs/mcache.c
42408 +++ b/drivers/staging/pohmelfs/mcache.c
42409 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
42410 m->data = data;
42411 m->start = start;
42412 m->size = size;
42413 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
42414 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
42415
42416 mutex_lock(&psb->mcache_lock);
42417 err = pohmelfs_mcache_insert(psb, m);
42418 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
42419 index 623a07d..4035c19 100644
42420 --- a/drivers/staging/pohmelfs/netfs.h
42421 +++ b/drivers/staging/pohmelfs/netfs.h
42422 @@ -570,14 +570,14 @@ struct pohmelfs_config;
42423 struct pohmelfs_sb {
42424 struct rb_root mcache_root;
42425 struct mutex mcache_lock;
42426 - atomic_long_t mcache_gen;
42427 + atomic_long_unchecked_t mcache_gen;
42428 unsigned long mcache_timeout;
42429
42430 unsigned int idx;
42431
42432 unsigned int trans_retries;
42433
42434 - atomic_t trans_gen;
42435 + atomic_unchecked_t trans_gen;
42436
42437 unsigned int crypto_attached_size;
42438 unsigned int crypto_align_size;
42439 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
42440 index 36a2535..0591bf4 100644
42441 --- a/drivers/staging/pohmelfs/trans.c
42442 +++ b/drivers/staging/pohmelfs/trans.c
42443 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
42444 int err;
42445 struct netfs_cmd *cmd = t->iovec.iov_base;
42446
42447 - t->gen = atomic_inc_return(&psb->trans_gen);
42448 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
42449
42450 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
42451 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
42452 diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
42453 index f890a16..509ece8 100644
42454 --- a/drivers/staging/sep/sep_driver.c
42455 +++ b/drivers/staging/sep/sep_driver.c
42456 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
42457 static dev_t sep_devno;
42458
42459 /* the files operations structure of the driver */
42460 -static struct file_operations sep_file_operations = {
42461 +static const struct file_operations sep_file_operations = {
42462 .owner = THIS_MODULE,
42463 .ioctl = sep_ioctl,
42464 .poll = sep_poll,
42465 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
42466 index 5e16bc3..7655b10 100644
42467 --- a/drivers/staging/usbip/usbip_common.h
42468 +++ b/drivers/staging/usbip/usbip_common.h
42469 @@ -374,7 +374,7 @@ struct usbip_device {
42470 void (*shutdown)(struct usbip_device *);
42471 void (*reset)(struct usbip_device *);
42472 void (*unusable)(struct usbip_device *);
42473 - } eh_ops;
42474 + } __no_const eh_ops;
42475 };
42476
42477
42478 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42479 index 57f7946..d9df23d 100644
42480 --- a/drivers/staging/usbip/vhci.h
42481 +++ b/drivers/staging/usbip/vhci.h
42482 @@ -92,7 +92,7 @@ struct vhci_hcd {
42483 unsigned resuming:1;
42484 unsigned long re_timeout;
42485
42486 - atomic_t seqnum;
42487 + atomic_unchecked_t seqnum;
42488
42489 /*
42490 * NOTE:
42491 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42492 index 20cd7db..c2693ff 100644
42493 --- a/drivers/staging/usbip/vhci_hcd.c
42494 +++ b/drivers/staging/usbip/vhci_hcd.c
42495 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
42496 return;
42497 }
42498
42499 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
42500 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42501 if (priv->seqnum == 0xffff)
42502 usbip_uinfo("seqnum max\n");
42503
42504 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
42505 return -ENOMEM;
42506 }
42507
42508 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
42509 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42510 if (unlink->seqnum == 0xffff)
42511 usbip_uinfo("seqnum max\n");
42512
42513 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
42514 vdev->rhport = rhport;
42515 }
42516
42517 - atomic_set(&vhci->seqnum, 0);
42518 + atomic_set_unchecked(&vhci->seqnum, 0);
42519 spin_lock_init(&vhci->lock);
42520
42521
42522 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
42523 index 7fd76fe..673695a 100644
42524 --- a/drivers/staging/usbip/vhci_rx.c
42525 +++ b/drivers/staging/usbip/vhci_rx.c
42526 @@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42527 usbip_uerr("cannot find a urb of seqnum %u\n",
42528 pdu->base.seqnum);
42529 usbip_uinfo("max seqnum %d\n",
42530 - atomic_read(&the_controller->seqnum));
42531 + atomic_read_unchecked(&the_controller->seqnum));
42532 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42533 return;
42534 }
42535 diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
42536 index 7891288..8e31300 100644
42537 --- a/drivers/staging/vme/devices/vme_user.c
42538 +++ b/drivers/staging/vme/devices/vme_user.c
42539 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
42540 static int __init vme_user_probe(struct device *, int, int);
42541 static int __exit vme_user_remove(struct device *, int, int);
42542
42543 -static struct file_operations vme_user_fops = {
42544 +static const struct file_operations vme_user_fops = {
42545 .open = vme_user_open,
42546 .release = vme_user_release,
42547 .read = vme_user_read,
42548 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42549 index 58abf44..00c1fc8 100644
42550 --- a/drivers/staging/vt6655/hostap.c
42551 +++ b/drivers/staging/vt6655/hostap.c
42552 @@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42553 PSDevice apdev_priv;
42554 struct net_device *dev = pDevice->dev;
42555 int ret;
42556 - const struct net_device_ops apdev_netdev_ops = {
42557 + net_device_ops_no_const apdev_netdev_ops = {
42558 .ndo_start_xmit = pDevice->tx_80211,
42559 };
42560
42561 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42562 index 0c8267a..db1f363 100644
42563 --- a/drivers/staging/vt6656/hostap.c
42564 +++ b/drivers/staging/vt6656/hostap.c
42565 @@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42566 PSDevice apdev_priv;
42567 struct net_device *dev = pDevice->dev;
42568 int ret;
42569 - const struct net_device_ops apdev_netdev_ops = {
42570 + net_device_ops_no_const apdev_netdev_ops = {
42571 .ndo_start_xmit = pDevice->tx_80211,
42572 };
42573
42574 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
42575 index 925678b..da7f5ed 100644
42576 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
42577 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
42578 @@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
42579
42580 struct usbctlx_completor {
42581 int (*complete) (struct usbctlx_completor *);
42582 -};
42583 +} __no_const;
42584 typedef struct usbctlx_completor usbctlx_completor_t;
42585
42586 static int
42587 diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
42588 index 40de151..924f268 100644
42589 --- a/drivers/telephony/ixj.c
42590 +++ b/drivers/telephony/ixj.c
42591 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
42592 bool mContinue;
42593 char *pIn, *pOut;
42594
42595 + pax_track_stack();
42596 +
42597 if (!SCI_Prepare(j))
42598 return 0;
42599
42600 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
42601 index e941367..b631f5a 100644
42602 --- a/drivers/uio/uio.c
42603 +++ b/drivers/uio/uio.c
42604 @@ -23,6 +23,7 @@
42605 #include <linux/string.h>
42606 #include <linux/kobject.h>
42607 #include <linux/uio_driver.h>
42608 +#include <asm/local.h>
42609
42610 #define UIO_MAX_DEVICES 255
42611
42612 @@ -30,10 +31,10 @@ struct uio_device {
42613 struct module *owner;
42614 struct device *dev;
42615 int minor;
42616 - atomic_t event;
42617 + atomic_unchecked_t event;
42618 struct fasync_struct *async_queue;
42619 wait_queue_head_t wait;
42620 - int vma_count;
42621 + local_t vma_count;
42622 struct uio_info *info;
42623 struct kobject *map_dir;
42624 struct kobject *portio_dir;
42625 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
42626 return entry->show(mem, buf);
42627 }
42628
42629 -static struct sysfs_ops map_sysfs_ops = {
42630 +static const struct sysfs_ops map_sysfs_ops = {
42631 .show = map_type_show,
42632 };
42633
42634 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
42635 return entry->show(port, buf);
42636 }
42637
42638 -static struct sysfs_ops portio_sysfs_ops = {
42639 +static const struct sysfs_ops portio_sysfs_ops = {
42640 .show = portio_type_show,
42641 };
42642
42643 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
42644 struct uio_device *idev = dev_get_drvdata(dev);
42645 if (idev)
42646 return sprintf(buf, "%u\n",
42647 - (unsigned int)atomic_read(&idev->event));
42648 + (unsigned int)atomic_read_unchecked(&idev->event));
42649 else
42650 return -ENODEV;
42651 }
42652 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
42653 {
42654 struct uio_device *idev = info->uio_dev;
42655
42656 - atomic_inc(&idev->event);
42657 + atomic_inc_unchecked(&idev->event);
42658 wake_up_interruptible(&idev->wait);
42659 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
42660 }
42661 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
42662 }
42663
42664 listener->dev = idev;
42665 - listener->event_count = atomic_read(&idev->event);
42666 + listener->event_count = atomic_read_unchecked(&idev->event);
42667 filep->private_data = listener;
42668
42669 if (idev->info->open) {
42670 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
42671 return -EIO;
42672
42673 poll_wait(filep, &idev->wait, wait);
42674 - if (listener->event_count != atomic_read(&idev->event))
42675 + if (listener->event_count != atomic_read_unchecked(&idev->event))
42676 return POLLIN | POLLRDNORM;
42677 return 0;
42678 }
42679 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
42680 do {
42681 set_current_state(TASK_INTERRUPTIBLE);
42682
42683 - event_count = atomic_read(&idev->event);
42684 + event_count = atomic_read_unchecked(&idev->event);
42685 if (event_count != listener->event_count) {
42686 if (copy_to_user(buf, &event_count, count))
42687 retval = -EFAULT;
42688 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
42689 static void uio_vma_open(struct vm_area_struct *vma)
42690 {
42691 struct uio_device *idev = vma->vm_private_data;
42692 - idev->vma_count++;
42693 + local_inc(&idev->vma_count);
42694 }
42695
42696 static void uio_vma_close(struct vm_area_struct *vma)
42697 {
42698 struct uio_device *idev = vma->vm_private_data;
42699 - idev->vma_count--;
42700 + local_dec(&idev->vma_count);
42701 }
42702
42703 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42704 @@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
42705 idev->owner = owner;
42706 idev->info = info;
42707 init_waitqueue_head(&idev->wait);
42708 - atomic_set(&idev->event, 0);
42709 + atomic_set_unchecked(&idev->event, 0);
42710
42711 ret = uio_get_minor(idev);
42712 if (ret)
42713 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
42714 index fbea856..06efea6 100644
42715 --- a/drivers/usb/atm/usbatm.c
42716 +++ b/drivers/usb/atm/usbatm.c
42717 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42718 if (printk_ratelimit())
42719 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
42720 __func__, vpi, vci);
42721 - atomic_inc(&vcc->stats->rx_err);
42722 + atomic_inc_unchecked(&vcc->stats->rx_err);
42723 return;
42724 }
42725
42726 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42727 if (length > ATM_MAX_AAL5_PDU) {
42728 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
42729 __func__, length, vcc);
42730 - atomic_inc(&vcc->stats->rx_err);
42731 + atomic_inc_unchecked(&vcc->stats->rx_err);
42732 goto out;
42733 }
42734
42735 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42736 if (sarb->len < pdu_length) {
42737 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
42738 __func__, pdu_length, sarb->len, vcc);
42739 - atomic_inc(&vcc->stats->rx_err);
42740 + atomic_inc_unchecked(&vcc->stats->rx_err);
42741 goto out;
42742 }
42743
42744 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
42745 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
42746 __func__, vcc);
42747 - atomic_inc(&vcc->stats->rx_err);
42748 + atomic_inc_unchecked(&vcc->stats->rx_err);
42749 goto out;
42750 }
42751
42752 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42753 if (printk_ratelimit())
42754 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
42755 __func__, length);
42756 - atomic_inc(&vcc->stats->rx_drop);
42757 + atomic_inc_unchecked(&vcc->stats->rx_drop);
42758 goto out;
42759 }
42760
42761 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42762
42763 vcc->push(vcc, skb);
42764
42765 - atomic_inc(&vcc->stats->rx);
42766 + atomic_inc_unchecked(&vcc->stats->rx);
42767 out:
42768 skb_trim(sarb, 0);
42769 }
42770 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
42771 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
42772
42773 usbatm_pop(vcc, skb);
42774 - atomic_inc(&vcc->stats->tx);
42775 + atomic_inc_unchecked(&vcc->stats->tx);
42776
42777 skb = skb_dequeue(&instance->sndqueue);
42778 }
42779 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
42780 if (!left--)
42781 return sprintf(page,
42782 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
42783 - atomic_read(&atm_dev->stats.aal5.tx),
42784 - atomic_read(&atm_dev->stats.aal5.tx_err),
42785 - atomic_read(&atm_dev->stats.aal5.rx),
42786 - atomic_read(&atm_dev->stats.aal5.rx_err),
42787 - atomic_read(&atm_dev->stats.aal5.rx_drop));
42788 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
42789 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
42790 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
42791 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
42792 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
42793
42794 if (!left--) {
42795 if (instance->disconnected)
42796 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
42797 index 24e6205..fe5a5d4 100644
42798 --- a/drivers/usb/core/hcd.c
42799 +++ b/drivers/usb/core/hcd.c
42800 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
42801
42802 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42803
42804 -struct usb_mon_operations *mon_ops;
42805 +const struct usb_mon_operations *mon_ops;
42806
42807 /*
42808 * The registration is unlocked.
42809 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
42810 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
42811 */
42812
42813 -int usb_mon_register (struct usb_mon_operations *ops)
42814 +int usb_mon_register (const struct usb_mon_operations *ops)
42815 {
42816
42817 if (mon_ops)
42818 diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
42819 index bcbe104..9cfd1c6 100644
42820 --- a/drivers/usb/core/hcd.h
42821 +++ b/drivers/usb/core/hcd.h
42822 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
42823 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42824
42825 struct usb_mon_operations {
42826 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
42827 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42828 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42829 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
42830 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42831 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42832 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
42833 };
42834
42835 -extern struct usb_mon_operations *mon_ops;
42836 +extern const struct usb_mon_operations *mon_ops;
42837
42838 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
42839 {
42840 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
42841 (*mon_ops->urb_complete)(bus, urb, status);
42842 }
42843
42844 -int usb_mon_register(struct usb_mon_operations *ops);
42845 +int usb_mon_register(const struct usb_mon_operations *ops);
42846 void usb_mon_deregister(void);
42847
42848 #else
42849 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
42850 index 409cc94..a673bad 100644
42851 --- a/drivers/usb/core/message.c
42852 +++ b/drivers/usb/core/message.c
42853 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
42854 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
42855 if (buf) {
42856 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
42857 - if (len > 0) {
42858 - smallbuf = kmalloc(++len, GFP_NOIO);
42859 + if (len++ > 0) {
42860 + smallbuf = kmalloc(len, GFP_NOIO);
42861 if (!smallbuf)
42862 return buf;
42863 memcpy(smallbuf, buf, len);
42864 diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
42865 index 62ff5e7..530b74e 100644
42866 --- a/drivers/usb/misc/appledisplay.c
42867 +++ b/drivers/usb/misc/appledisplay.c
42868 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
42869 return pdata->msgdata[1];
42870 }
42871
42872 -static struct backlight_ops appledisplay_bl_data = {
42873 +static const struct backlight_ops appledisplay_bl_data = {
42874 .get_brightness = appledisplay_bl_get_brightness,
42875 .update_status = appledisplay_bl_update_status,
42876 };
42877 diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
42878 index e0c2db3..bd8cb66 100644
42879 --- a/drivers/usb/mon/mon_main.c
42880 +++ b/drivers/usb/mon/mon_main.c
42881 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
42882 /*
42883 * Ops
42884 */
42885 -static struct usb_mon_operations mon_ops_0 = {
42886 +static const struct usb_mon_operations mon_ops_0 = {
42887 .urb_submit = mon_submit,
42888 .urb_submit_error = mon_submit_error,
42889 .urb_complete = mon_complete,
42890 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
42891 index d6bea3e..60b250e 100644
42892 --- a/drivers/usb/wusbcore/wa-hc.h
42893 +++ b/drivers/usb/wusbcore/wa-hc.h
42894 @@ -192,7 +192,7 @@ struct wahc {
42895 struct list_head xfer_delayed_list;
42896 spinlock_t xfer_list_lock;
42897 struct work_struct xfer_work;
42898 - atomic_t xfer_id_count;
42899 + atomic_unchecked_t xfer_id_count;
42900 };
42901
42902
42903 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
42904 INIT_LIST_HEAD(&wa->xfer_delayed_list);
42905 spin_lock_init(&wa->xfer_list_lock);
42906 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
42907 - atomic_set(&wa->xfer_id_count, 1);
42908 + atomic_set_unchecked(&wa->xfer_id_count, 1);
42909 }
42910
42911 /**
42912 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
42913 index 613a5fc..3174865 100644
42914 --- a/drivers/usb/wusbcore/wa-xfer.c
42915 +++ b/drivers/usb/wusbcore/wa-xfer.c
42916 @@ -293,7 +293,7 @@ out:
42917 */
42918 static void wa_xfer_id_init(struct wa_xfer *xfer)
42919 {
42920 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
42921 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
42922 }
42923
42924 /*
42925 diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
42926 index aa42fce..f8a828c 100644
42927 --- a/drivers/uwb/wlp/messages.c
42928 +++ b/drivers/uwb/wlp/messages.c
42929 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
42930 size_t len = skb->len;
42931 size_t used;
42932 ssize_t result;
42933 - struct wlp_nonce enonce, rnonce;
42934 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
42935 enum wlp_assc_error assc_err;
42936 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
42937 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
42938 diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
42939 index 0370399..6627c94 100644
42940 --- a/drivers/uwb/wlp/sysfs.c
42941 +++ b/drivers/uwb/wlp/sysfs.c
42942 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
42943 return ret;
42944 }
42945
42946 -static
42947 -struct sysfs_ops wss_sysfs_ops = {
42948 +static const struct sysfs_ops wss_sysfs_ops = {
42949 .show = wlp_wss_attr_show,
42950 .store = wlp_wss_attr_store,
42951 };
42952 diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
42953 index 8c5e432..5ee90ea 100644
42954 --- a/drivers/video/atmel_lcdfb.c
42955 +++ b/drivers/video/atmel_lcdfb.c
42956 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
42957 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
42958 }
42959
42960 -static struct backlight_ops atmel_lcdc_bl_ops = {
42961 +static const struct backlight_ops atmel_lcdc_bl_ops = {
42962 .update_status = atmel_bl_update_status,
42963 .get_brightness = atmel_bl_get_brightness,
42964 };
42965 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
42966 index e4e4d43..66bcbcc 100644
42967 --- a/drivers/video/aty/aty128fb.c
42968 +++ b/drivers/video/aty/aty128fb.c
42969 @@ -149,7 +149,7 @@ enum {
42970 };
42971
42972 /* Must match above enum */
42973 -static const char *r128_family[] __devinitdata = {
42974 +static const char *r128_family[] __devinitconst = {
42975 "AGP",
42976 "PCI",
42977 "PRO AGP",
42978 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
42979 return bd->props.brightness;
42980 }
42981
42982 -static struct backlight_ops aty128_bl_data = {
42983 +static const struct backlight_ops aty128_bl_data = {
42984 .get_brightness = aty128_bl_get_brightness,
42985 .update_status = aty128_bl_update_status,
42986 };
42987 diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
42988 index 913b4a4..9295a38 100644
42989 --- a/drivers/video/aty/atyfb_base.c
42990 +++ b/drivers/video/aty/atyfb_base.c
42991 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
42992 return bd->props.brightness;
42993 }
42994
42995 -static struct backlight_ops aty_bl_data = {
42996 +static const struct backlight_ops aty_bl_data = {
42997 .get_brightness = aty_bl_get_brightness,
42998 .update_status = aty_bl_update_status,
42999 };
43000 diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
43001 index 1a056ad..221bd6a 100644
43002 --- a/drivers/video/aty/radeon_backlight.c
43003 +++ b/drivers/video/aty/radeon_backlight.c
43004 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
43005 return bd->props.brightness;
43006 }
43007
43008 -static struct backlight_ops radeon_bl_data = {
43009 +static const struct backlight_ops radeon_bl_data = {
43010 .get_brightness = radeon_bl_get_brightness,
43011 .update_status = radeon_bl_update_status,
43012 };
43013 diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
43014 index ad05da5..3cb2cb9 100644
43015 --- a/drivers/video/backlight/adp5520_bl.c
43016 +++ b/drivers/video/backlight/adp5520_bl.c
43017 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
43018 return error ? data->current_brightness : reg_val;
43019 }
43020
43021 -static struct backlight_ops adp5520_bl_ops = {
43022 +static const struct backlight_ops adp5520_bl_ops = {
43023 .update_status = adp5520_bl_update_status,
43024 .get_brightness = adp5520_bl_get_brightness,
43025 };
43026 diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
43027 index 2c3bdfc..d769b0b 100644
43028 --- a/drivers/video/backlight/adx_bl.c
43029 +++ b/drivers/video/backlight/adx_bl.c
43030 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
43031 return 1;
43032 }
43033
43034 -static struct backlight_ops adx_backlight_ops = {
43035 +static const struct backlight_ops adx_backlight_ops = {
43036 .options = 0,
43037 .update_status = adx_backlight_update_status,
43038 .get_brightness = adx_backlight_get_brightness,
43039 diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
43040 index 505c082..6b6b3cc 100644
43041 --- a/drivers/video/backlight/atmel-pwm-bl.c
43042 +++ b/drivers/video/backlight/atmel-pwm-bl.c
43043 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
43044 return pwm_channel_enable(&pwmbl->pwmc);
43045 }
43046
43047 -static struct backlight_ops atmel_pwm_bl_ops = {
43048 +static const struct backlight_ops atmel_pwm_bl_ops = {
43049 .get_brightness = atmel_pwm_bl_get_intensity,
43050 .update_status = atmel_pwm_bl_set_intensity,
43051 };
43052 diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
43053 index 5e20e6e..89025e6 100644
43054 --- a/drivers/video/backlight/backlight.c
43055 +++ b/drivers/video/backlight/backlight.c
43056 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
43057 * ERR_PTR() or a pointer to the newly allocated device.
43058 */
43059 struct backlight_device *backlight_device_register(const char *name,
43060 - struct device *parent, void *devdata, struct backlight_ops *ops)
43061 + struct device *parent, void *devdata, const struct backlight_ops *ops)
43062 {
43063 struct backlight_device *new_bd;
43064 int rc;
43065 diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
43066 index 9677494..b4bcf80 100644
43067 --- a/drivers/video/backlight/corgi_lcd.c
43068 +++ b/drivers/video/backlight/corgi_lcd.c
43069 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
43070 }
43071 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
43072
43073 -static struct backlight_ops corgi_bl_ops = {
43074 +static const struct backlight_ops corgi_bl_ops = {
43075 .get_brightness = corgi_bl_get_intensity,
43076 .update_status = corgi_bl_update_status,
43077 };
43078 diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
43079 index b9fe62b..2914bf1 100644
43080 --- a/drivers/video/backlight/cr_bllcd.c
43081 +++ b/drivers/video/backlight/cr_bllcd.c
43082 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
43083 return intensity;
43084 }
43085
43086 -static struct backlight_ops cr_backlight_ops = {
43087 +static const struct backlight_ops cr_backlight_ops = {
43088 .get_brightness = cr_backlight_get_intensity,
43089 .update_status = cr_backlight_set_intensity,
43090 };
43091 diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
43092 index 701a108..feacfd5 100644
43093 --- a/drivers/video/backlight/da903x_bl.c
43094 +++ b/drivers/video/backlight/da903x_bl.c
43095 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
43096 return data->current_brightness;
43097 }
43098
43099 -static struct backlight_ops da903x_backlight_ops = {
43100 +static const struct backlight_ops da903x_backlight_ops = {
43101 .update_status = da903x_backlight_update_status,
43102 .get_brightness = da903x_backlight_get_brightness,
43103 };
43104 diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
43105 index 6d27f62..e6d348e 100644
43106 --- a/drivers/video/backlight/generic_bl.c
43107 +++ b/drivers/video/backlight/generic_bl.c
43108 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
43109 }
43110 EXPORT_SYMBOL(corgibl_limit_intensity);
43111
43112 -static struct backlight_ops genericbl_ops = {
43113 +static const struct backlight_ops genericbl_ops = {
43114 .options = BL_CORE_SUSPENDRESUME,
43115 .get_brightness = genericbl_get_intensity,
43116 .update_status = genericbl_send_intensity,
43117 diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
43118 index 7fb4eef..f7cc528 100644
43119 --- a/drivers/video/backlight/hp680_bl.c
43120 +++ b/drivers/video/backlight/hp680_bl.c
43121 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
43122 return current_intensity;
43123 }
43124
43125 -static struct backlight_ops hp680bl_ops = {
43126 +static const struct backlight_ops hp680bl_ops = {
43127 .get_brightness = hp680bl_get_intensity,
43128 .update_status = hp680bl_set_intensity,
43129 };
43130 diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
43131 index 7aed256..db9071f 100644
43132 --- a/drivers/video/backlight/jornada720_bl.c
43133 +++ b/drivers/video/backlight/jornada720_bl.c
43134 @@ -93,7 +93,7 @@ out:
43135 return ret;
43136 }
43137
43138 -static struct backlight_ops jornada_bl_ops = {
43139 +static const struct backlight_ops jornada_bl_ops = {
43140 .get_brightness = jornada_bl_get_brightness,
43141 .update_status = jornada_bl_update_status,
43142 .options = BL_CORE_SUSPENDRESUME,
43143 diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
43144 index a38fda1..939e7b8 100644
43145 --- a/drivers/video/backlight/kb3886_bl.c
43146 +++ b/drivers/video/backlight/kb3886_bl.c
43147 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
43148 return kb3886bl_intensity;
43149 }
43150
43151 -static struct backlight_ops kb3886bl_ops = {
43152 +static const struct backlight_ops kb3886bl_ops = {
43153 .get_brightness = kb3886bl_get_intensity,
43154 .update_status = kb3886bl_send_intensity,
43155 };
43156 diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
43157 index 6b488b8..00a9591 100644
43158 --- a/drivers/video/backlight/locomolcd.c
43159 +++ b/drivers/video/backlight/locomolcd.c
43160 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
43161 return current_intensity;
43162 }
43163
43164 -static struct backlight_ops locomobl_data = {
43165 +static const struct backlight_ops locomobl_data = {
43166 .get_brightness = locomolcd_get_intensity,
43167 .update_status = locomolcd_set_intensity,
43168 };
43169 diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
43170 index 99bdfa8..3dac448 100644
43171 --- a/drivers/video/backlight/mbp_nvidia_bl.c
43172 +++ b/drivers/video/backlight/mbp_nvidia_bl.c
43173 @@ -33,7 +33,7 @@ struct dmi_match_data {
43174 unsigned long iostart;
43175 unsigned long iolen;
43176 /* Backlight operations structure. */
43177 - struct backlight_ops backlight_ops;
43178 + const struct backlight_ops backlight_ops;
43179 };
43180
43181 /* Module parameters. */
43182 diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
43183 index cbad67e..3cf900e 100644
43184 --- a/drivers/video/backlight/omap1_bl.c
43185 +++ b/drivers/video/backlight/omap1_bl.c
43186 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
43187 return bl->current_intensity;
43188 }
43189
43190 -static struct backlight_ops omapbl_ops = {
43191 +static const struct backlight_ops omapbl_ops = {
43192 .get_brightness = omapbl_get_intensity,
43193 .update_status = omapbl_update_status,
43194 };
43195 diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
43196 index 9edaf24..075786e 100644
43197 --- a/drivers/video/backlight/progear_bl.c
43198 +++ b/drivers/video/backlight/progear_bl.c
43199 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
43200 return intensity - HW_LEVEL_MIN;
43201 }
43202
43203 -static struct backlight_ops progearbl_ops = {
43204 +static const struct backlight_ops progearbl_ops = {
43205 .get_brightness = progearbl_get_intensity,
43206 .update_status = progearbl_set_intensity,
43207 };
43208 diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
43209 index 8871662..df9e0b3 100644
43210 --- a/drivers/video/backlight/pwm_bl.c
43211 +++ b/drivers/video/backlight/pwm_bl.c
43212 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
43213 return bl->props.brightness;
43214 }
43215
43216 -static struct backlight_ops pwm_backlight_ops = {
43217 +static const struct backlight_ops pwm_backlight_ops = {
43218 .update_status = pwm_backlight_update_status,
43219 .get_brightness = pwm_backlight_get_brightness,
43220 };
43221 diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
43222 index 43edbad..e14ce4d 100644
43223 --- a/drivers/video/backlight/tosa_bl.c
43224 +++ b/drivers/video/backlight/tosa_bl.c
43225 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
43226 return props->brightness;
43227 }
43228
43229 -static struct backlight_ops bl_ops = {
43230 +static const struct backlight_ops bl_ops = {
43231 .get_brightness = tosa_bl_get_brightness,
43232 .update_status = tosa_bl_update_status,
43233 };
43234 diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
43235 index 467bdb7..e32add3 100644
43236 --- a/drivers/video/backlight/wm831x_bl.c
43237 +++ b/drivers/video/backlight/wm831x_bl.c
43238 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
43239 return data->current_brightness;
43240 }
43241
43242 -static struct backlight_ops wm831x_backlight_ops = {
43243 +static const struct backlight_ops wm831x_backlight_ops = {
43244 .options = BL_CORE_SUSPENDRESUME,
43245 .update_status = wm831x_backlight_update_status,
43246 .get_brightness = wm831x_backlight_get_brightness,
43247 diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
43248 index e49ae5e..db4e6f7 100644
43249 --- a/drivers/video/bf54x-lq043fb.c
43250 +++ b/drivers/video/bf54x-lq043fb.c
43251 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
43252 return 0;
43253 }
43254
43255 -static struct backlight_ops bfin_lq043fb_bl_ops = {
43256 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
43257 .get_brightness = bl_get_brightness,
43258 };
43259
43260 diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
43261 index 2c72a7c..d523e52 100644
43262 --- a/drivers/video/bfin-t350mcqb-fb.c
43263 +++ b/drivers/video/bfin-t350mcqb-fb.c
43264 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
43265 return 0;
43266 }
43267
43268 -static struct backlight_ops bfin_lq043fb_bl_ops = {
43269 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
43270 .get_brightness = bl_get_brightness,
43271 };
43272
43273 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
43274 index f53b9f1..958bf4e 100644
43275 --- a/drivers/video/fbcmap.c
43276 +++ b/drivers/video/fbcmap.c
43277 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
43278 rc = -ENODEV;
43279 goto out;
43280 }
43281 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
43282 - !info->fbops->fb_setcmap)) {
43283 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
43284 rc = -EINVAL;
43285 goto out1;
43286 }
43287 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
43288 index 99bbd28..ad3829e 100644
43289 --- a/drivers/video/fbmem.c
43290 +++ b/drivers/video/fbmem.c
43291 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43292 image->dx += image->width + 8;
43293 }
43294 } else if (rotate == FB_ROTATE_UD) {
43295 - for (x = 0; x < num && image->dx >= 0; x++) {
43296 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
43297 info->fbops->fb_imageblit(info, image);
43298 image->dx -= image->width + 8;
43299 }
43300 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43301 image->dy += image->height + 8;
43302 }
43303 } else if (rotate == FB_ROTATE_CCW) {
43304 - for (x = 0; x < num && image->dy >= 0; x++) {
43305 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
43306 info->fbops->fb_imageblit(info, image);
43307 image->dy -= image->height + 8;
43308 }
43309 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
43310 int flags = info->flags;
43311 int ret = 0;
43312
43313 + pax_track_stack();
43314 +
43315 if (var->activate & FB_ACTIVATE_INV_MODE) {
43316 struct fb_videomode mode1, mode2;
43317
43318 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43319 void __user *argp = (void __user *)arg;
43320 long ret = 0;
43321
43322 + pax_track_stack();
43323 +
43324 switch (cmd) {
43325 case FBIOGET_VSCREENINFO:
43326 if (!lock_fb_info(info))
43327 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43328 return -EFAULT;
43329 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
43330 return -EINVAL;
43331 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
43332 + if (con2fb.framebuffer >= FB_MAX)
43333 return -EINVAL;
43334 if (!registered_fb[con2fb.framebuffer])
43335 request_module("fb%d", con2fb.framebuffer);
43336 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
43337 index f20eff8..3e4f622 100644
43338 --- a/drivers/video/geode/gx1fb_core.c
43339 +++ b/drivers/video/geode/gx1fb_core.c
43340 @@ -30,7 +30,7 @@ static int crt_option = 1;
43341 static char panel_option[32] = "";
43342
43343 /* Modes relevant to the GX1 (taken from modedb.c) */
43344 -static const struct fb_videomode __initdata gx1_modedb[] = {
43345 +static const struct fb_videomode __initconst gx1_modedb[] = {
43346 /* 640x480-60 VESA */
43347 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
43348 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
43349 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
43350 index 896e53d..4d87d0b 100644
43351 --- a/drivers/video/gxt4500.c
43352 +++ b/drivers/video/gxt4500.c
43353 @@ -156,7 +156,7 @@ struct gxt4500_par {
43354 static char *mode_option;
43355
43356 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
43357 -static const struct fb_videomode defaultmode __devinitdata = {
43358 +static const struct fb_videomode defaultmode __devinitconst = {
43359 .refresh = 60,
43360 .xres = 1280,
43361 .yres = 1024,
43362 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
43363 return 0;
43364 }
43365
43366 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
43367 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
43368 .id = "IBM GXT4500P",
43369 .type = FB_TYPE_PACKED_PIXELS,
43370 .visual = FB_VISUAL_PSEUDOCOLOR,
43371 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
43372 index f5bedee..28c6028 100644
43373 --- a/drivers/video/i810/i810_accel.c
43374 +++ b/drivers/video/i810/i810_accel.c
43375 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
43376 }
43377 }
43378 printk("ringbuffer lockup!!!\n");
43379 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
43380 i810_report_error(mmio);
43381 par->dev_flags |= LOCKUP;
43382 info->pixmap.scan_align = 1;
43383 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
43384 index 5743ea2..457f82c 100644
43385 --- a/drivers/video/i810/i810_main.c
43386 +++ b/drivers/video/i810/i810_main.c
43387 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
43388 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
43389
43390 /* PCI */
43391 -static const char *i810_pci_list[] __devinitdata = {
43392 +static const char *i810_pci_list[] __devinitconst = {
43393 "Intel(R) 810 Framebuffer Device" ,
43394 "Intel(R) 810-DC100 Framebuffer Device" ,
43395 "Intel(R) 810E Framebuffer Device" ,
43396 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
43397 index 3c14e43..eafa544 100644
43398 --- a/drivers/video/logo/logo_linux_clut224.ppm
43399 +++ b/drivers/video/logo/logo_linux_clut224.ppm
43400 @@ -1,1604 +1,1123 @@
43401 P3
43402 -# Standard 224-color Linux logo
43403 80 80
43404 255
43405 - 0 0 0 0 0 0 0 0 0 0 0 0
43406 - 0 0 0 0 0 0 0 0 0 0 0 0
43407 - 0 0 0 0 0 0 0 0 0 0 0 0
43408 - 0 0 0 0 0 0 0 0 0 0 0 0
43409 - 0 0 0 0 0 0 0 0 0 0 0 0
43410 - 0 0 0 0 0 0 0 0 0 0 0 0
43411 - 0 0 0 0 0 0 0 0 0 0 0 0
43412 - 0 0 0 0 0 0 0 0 0 0 0 0
43413 - 0 0 0 0 0 0 0 0 0 0 0 0
43414 - 6 6 6 6 6 6 10 10 10 10 10 10
43415 - 10 10 10 6 6 6 6 6 6 6 6 6
43416 - 0 0 0 0 0 0 0 0 0 0 0 0
43417 - 0 0 0 0 0 0 0 0 0 0 0 0
43418 - 0 0 0 0 0 0 0 0 0 0 0 0
43419 - 0 0 0 0 0 0 0 0 0 0 0 0
43420 - 0 0 0 0 0 0 0 0 0 0 0 0
43421 - 0 0 0 0 0 0 0 0 0 0 0 0
43422 - 0 0 0 0 0 0 0 0 0 0 0 0
43423 - 0 0 0 0 0 0 0 0 0 0 0 0
43424 - 0 0 0 0 0 0 0 0 0 0 0 0
43425 - 0 0 0 0 0 0 0 0 0 0 0 0
43426 - 0 0 0 0 0 0 0 0 0 0 0 0
43427 - 0 0 0 0 0 0 0 0 0 0 0 0
43428 - 0 0 0 0 0 0 0 0 0 0 0 0
43429 - 0 0 0 0 0 0 0 0 0 0 0 0
43430 - 0 0 0 0 0 0 0 0 0 0 0 0
43431 - 0 0 0 0 0 0 0 0 0 0 0 0
43432 - 0 0 0 0 0 0 0 0 0 0 0 0
43433 - 0 0 0 6 6 6 10 10 10 14 14 14
43434 - 22 22 22 26 26 26 30 30 30 34 34 34
43435 - 30 30 30 30 30 30 26 26 26 18 18 18
43436 - 14 14 14 10 10 10 6 6 6 0 0 0
43437 - 0 0 0 0 0 0 0 0 0 0 0 0
43438 - 0 0 0 0 0 0 0 0 0 0 0 0
43439 - 0 0 0 0 0 0 0 0 0 0 0 0
43440 - 0 0 0 0 0 0 0 0 0 0 0 0
43441 - 0 0 0 0 0 0 0 0 0 0 0 0
43442 - 0 0 0 0 0 0 0 0 0 0 0 0
43443 - 0 0 0 0 0 0 0 0 0 0 0 0
43444 - 0 0 0 0 0 0 0 0 0 0 0 0
43445 - 0 0 0 0 0 0 0 0 0 0 0 0
43446 - 0 0 0 0 0 1 0 0 1 0 0 0
43447 - 0 0 0 0 0 0 0 0 0 0 0 0
43448 - 0 0 0 0 0 0 0 0 0 0 0 0
43449 - 0 0 0 0 0 0 0 0 0 0 0 0
43450 - 0 0 0 0 0 0 0 0 0 0 0 0
43451 - 0 0 0 0 0 0 0 0 0 0 0 0
43452 - 0 0 0 0 0 0 0 0 0 0 0 0
43453 - 6 6 6 14 14 14 26 26 26 42 42 42
43454 - 54 54 54 66 66 66 78 78 78 78 78 78
43455 - 78 78 78 74 74 74 66 66 66 54 54 54
43456 - 42 42 42 26 26 26 18 18 18 10 10 10
43457 - 6 6 6 0 0 0 0 0 0 0 0 0
43458 - 0 0 0 0 0 0 0 0 0 0 0 0
43459 - 0 0 0 0 0 0 0 0 0 0 0 0
43460 - 0 0 0 0 0 0 0 0 0 0 0 0
43461 - 0 0 0 0 0 0 0 0 0 0 0 0
43462 - 0 0 0 0 0 0 0 0 0 0 0 0
43463 - 0 0 0 0 0 0 0 0 0 0 0 0
43464 - 0 0 0 0 0 0 0 0 0 0 0 0
43465 - 0 0 0 0 0 0 0 0 0 0 0 0
43466 - 0 0 1 0 0 0 0 0 0 0 0 0
43467 - 0 0 0 0 0 0 0 0 0 0 0 0
43468 - 0 0 0 0 0 0 0 0 0 0 0 0
43469 - 0 0 0 0 0 0 0 0 0 0 0 0
43470 - 0 0 0 0 0 0 0 0 0 0 0 0
43471 - 0 0 0 0 0 0 0 0 0 0 0 0
43472 - 0 0 0 0 0 0 0 0 0 10 10 10
43473 - 22 22 22 42 42 42 66 66 66 86 86 86
43474 - 66 66 66 38 38 38 38 38 38 22 22 22
43475 - 26 26 26 34 34 34 54 54 54 66 66 66
43476 - 86 86 86 70 70 70 46 46 46 26 26 26
43477 - 14 14 14 6 6 6 0 0 0 0 0 0
43478 - 0 0 0 0 0 0 0 0 0 0 0 0
43479 - 0 0 0 0 0 0 0 0 0 0 0 0
43480 - 0 0 0 0 0 0 0 0 0 0 0 0
43481 - 0 0 0 0 0 0 0 0 0 0 0 0
43482 - 0 0 0 0 0 0 0 0 0 0 0 0
43483 - 0 0 0 0 0 0 0 0 0 0 0 0
43484 - 0 0 0 0 0 0 0 0 0 0 0 0
43485 - 0 0 0 0 0 0 0 0 0 0 0 0
43486 - 0 0 1 0 0 1 0 0 1 0 0 0
43487 - 0 0 0 0 0 0 0 0 0 0 0 0
43488 - 0 0 0 0 0 0 0 0 0 0 0 0
43489 - 0 0 0 0 0 0 0 0 0 0 0 0
43490 - 0 0 0 0 0 0 0 0 0 0 0 0
43491 - 0 0 0 0 0 0 0 0 0 0 0 0
43492 - 0 0 0 0 0 0 10 10 10 26 26 26
43493 - 50 50 50 82 82 82 58 58 58 6 6 6
43494 - 2 2 6 2 2 6 2 2 6 2 2 6
43495 - 2 2 6 2 2 6 2 2 6 2 2 6
43496 - 6 6 6 54 54 54 86 86 86 66 66 66
43497 - 38 38 38 18 18 18 6 6 6 0 0 0
43498 - 0 0 0 0 0 0 0 0 0 0 0 0
43499 - 0 0 0 0 0 0 0 0 0 0 0 0
43500 - 0 0 0 0 0 0 0 0 0 0 0 0
43501 - 0 0 0 0 0 0 0 0 0 0 0 0
43502 - 0 0 0 0 0 0 0 0 0 0 0 0
43503 - 0 0 0 0 0 0 0 0 0 0 0 0
43504 - 0 0 0 0 0 0 0 0 0 0 0 0
43505 - 0 0 0 0 0 0 0 0 0 0 0 0
43506 - 0 0 0 0 0 0 0 0 0 0 0 0
43507 - 0 0 0 0 0 0 0 0 0 0 0 0
43508 - 0 0 0 0 0 0 0 0 0 0 0 0
43509 - 0 0 0 0 0 0 0 0 0 0 0 0
43510 - 0 0 0 0 0 0 0 0 0 0 0 0
43511 - 0 0 0 0 0 0 0 0 0 0 0 0
43512 - 0 0 0 6 6 6 22 22 22 50 50 50
43513 - 78 78 78 34 34 34 2 2 6 2 2 6
43514 - 2 2 6 2 2 6 2 2 6 2 2 6
43515 - 2 2 6 2 2 6 2 2 6 2 2 6
43516 - 2 2 6 2 2 6 6 6 6 70 70 70
43517 - 78 78 78 46 46 46 22 22 22 6 6 6
43518 - 0 0 0 0 0 0 0 0 0 0 0 0
43519 - 0 0 0 0 0 0 0 0 0 0 0 0
43520 - 0 0 0 0 0 0 0 0 0 0 0 0
43521 - 0 0 0 0 0 0 0 0 0 0 0 0
43522 - 0 0 0 0 0 0 0 0 0 0 0 0
43523 - 0 0 0 0 0 0 0 0 0 0 0 0
43524 - 0 0 0 0 0 0 0 0 0 0 0 0
43525 - 0 0 0 0 0 0 0 0 0 0 0 0
43526 - 0 0 1 0 0 1 0 0 1 0 0 0
43527 - 0 0 0 0 0 0 0 0 0 0 0 0
43528 - 0 0 0 0 0 0 0 0 0 0 0 0
43529 - 0 0 0 0 0 0 0 0 0 0 0 0
43530 - 0 0 0 0 0 0 0 0 0 0 0 0
43531 - 0 0 0 0 0 0 0 0 0 0 0 0
43532 - 6 6 6 18 18 18 42 42 42 82 82 82
43533 - 26 26 26 2 2 6 2 2 6 2 2 6
43534 - 2 2 6 2 2 6 2 2 6 2 2 6
43535 - 2 2 6 2 2 6 2 2 6 14 14 14
43536 - 46 46 46 34 34 34 6 6 6 2 2 6
43537 - 42 42 42 78 78 78 42 42 42 18 18 18
43538 - 6 6 6 0 0 0 0 0 0 0 0 0
43539 - 0 0 0 0 0 0 0 0 0 0 0 0
43540 - 0 0 0 0 0 0 0 0 0 0 0 0
43541 - 0 0 0 0 0 0 0 0 0 0 0 0
43542 - 0 0 0 0 0 0 0 0 0 0 0 0
43543 - 0 0 0 0 0 0 0 0 0 0 0 0
43544 - 0 0 0 0 0 0 0 0 0 0 0 0
43545 - 0 0 0 0 0 0 0 0 0 0 0 0
43546 - 0 0 1 0 0 0 0 0 1 0 0 0
43547 - 0 0 0 0 0 0 0 0 0 0 0 0
43548 - 0 0 0 0 0 0 0 0 0 0 0 0
43549 - 0 0 0 0 0 0 0 0 0 0 0 0
43550 - 0 0 0 0 0 0 0 0 0 0 0 0
43551 - 0 0 0 0 0 0 0 0 0 0 0 0
43552 - 10 10 10 30 30 30 66 66 66 58 58 58
43553 - 2 2 6 2 2 6 2 2 6 2 2 6
43554 - 2 2 6 2 2 6 2 2 6 2 2 6
43555 - 2 2 6 2 2 6 2 2 6 26 26 26
43556 - 86 86 86 101 101 101 46 46 46 10 10 10
43557 - 2 2 6 58 58 58 70 70 70 34 34 34
43558 - 10 10 10 0 0 0 0 0 0 0 0 0
43559 - 0 0 0 0 0 0 0 0 0 0 0 0
43560 - 0 0 0 0 0 0 0 0 0 0 0 0
43561 - 0 0 0 0 0 0 0 0 0 0 0 0
43562 - 0 0 0 0 0 0 0 0 0 0 0 0
43563 - 0 0 0 0 0 0 0 0 0 0 0 0
43564 - 0 0 0 0 0 0 0 0 0 0 0 0
43565 - 0 0 0 0 0 0 0 0 0 0 0 0
43566 - 0 0 1 0 0 1 0 0 1 0 0 0
43567 - 0 0 0 0 0 0 0 0 0 0 0 0
43568 - 0 0 0 0 0 0 0 0 0 0 0 0
43569 - 0 0 0 0 0 0 0 0 0 0 0 0
43570 - 0 0 0 0 0 0 0 0 0 0 0 0
43571 - 0 0 0 0 0 0 0 0 0 0 0 0
43572 - 14 14 14 42 42 42 86 86 86 10 10 10
43573 - 2 2 6 2 2 6 2 2 6 2 2 6
43574 - 2 2 6 2 2 6 2 2 6 2 2 6
43575 - 2 2 6 2 2 6 2 2 6 30 30 30
43576 - 94 94 94 94 94 94 58 58 58 26 26 26
43577 - 2 2 6 6 6 6 78 78 78 54 54 54
43578 - 22 22 22 6 6 6 0 0 0 0 0 0
43579 - 0 0 0 0 0 0 0 0 0 0 0 0
43580 - 0 0 0 0 0 0 0 0 0 0 0 0
43581 - 0 0 0 0 0 0 0 0 0 0 0 0
43582 - 0 0 0 0 0 0 0 0 0 0 0 0
43583 - 0 0 0 0 0 0 0 0 0 0 0 0
43584 - 0 0 0 0 0 0 0 0 0 0 0 0
43585 - 0 0 0 0 0 0 0 0 0 0 0 0
43586 - 0 0 0 0 0 0 0 0 0 0 0 0
43587 - 0 0 0 0 0 0 0 0 0 0 0 0
43588 - 0 0 0 0 0 0 0 0 0 0 0 0
43589 - 0 0 0 0 0 0 0 0 0 0 0 0
43590 - 0 0 0 0 0 0 0 0 0 0 0 0
43591 - 0 0 0 0 0 0 0 0 0 6 6 6
43592 - 22 22 22 62 62 62 62 62 62 2 2 6
43593 - 2 2 6 2 2 6 2 2 6 2 2 6
43594 - 2 2 6 2 2 6 2 2 6 2 2 6
43595 - 2 2 6 2 2 6 2 2 6 26 26 26
43596 - 54 54 54 38 38 38 18 18 18 10 10 10
43597 - 2 2 6 2 2 6 34 34 34 82 82 82
43598 - 38 38 38 14 14 14 0 0 0 0 0 0
43599 - 0 0 0 0 0 0 0 0 0 0 0 0
43600 - 0 0 0 0 0 0 0 0 0 0 0 0
43601 - 0 0 0 0 0 0 0 0 0 0 0 0
43602 - 0 0 0 0 0 0 0 0 0 0 0 0
43603 - 0 0 0 0 0 0 0 0 0 0 0 0
43604 - 0 0 0 0 0 0 0 0 0 0 0 0
43605 - 0 0 0 0 0 0 0 0 0 0 0 0
43606 - 0 0 0 0 0 1 0 0 1 0 0 0
43607 - 0 0 0 0 0 0 0 0 0 0 0 0
43608 - 0 0 0 0 0 0 0 0 0 0 0 0
43609 - 0 0 0 0 0 0 0 0 0 0 0 0
43610 - 0 0 0 0 0 0 0 0 0 0 0 0
43611 - 0 0 0 0 0 0 0 0 0 6 6 6
43612 - 30 30 30 78 78 78 30 30 30 2 2 6
43613 - 2 2 6 2 2 6 2 2 6 2 2 6
43614 - 2 2 6 2 2 6 2 2 6 2 2 6
43615 - 2 2 6 2 2 6 2 2 6 10 10 10
43616 - 10 10 10 2 2 6 2 2 6 2 2 6
43617 - 2 2 6 2 2 6 2 2 6 78 78 78
43618 - 50 50 50 18 18 18 6 6 6 0 0 0
43619 - 0 0 0 0 0 0 0 0 0 0 0 0
43620 - 0 0 0 0 0 0 0 0 0 0 0 0
43621 - 0 0 0 0 0 0 0 0 0 0 0 0
43622 - 0 0 0 0 0 0 0 0 0 0 0 0
43623 - 0 0 0 0 0 0 0 0 0 0 0 0
43624 - 0 0 0 0 0 0 0 0 0 0 0 0
43625 - 0 0 0 0 0 0 0 0 0 0 0 0
43626 - 0 0 1 0 0 0 0 0 0 0 0 0
43627 - 0 0 0 0 0 0 0 0 0 0 0 0
43628 - 0 0 0 0 0 0 0 0 0 0 0 0
43629 - 0 0 0 0 0 0 0 0 0 0 0 0
43630 - 0 0 0 0 0 0 0 0 0 0 0 0
43631 - 0 0 0 0 0 0 0 0 0 10 10 10
43632 - 38 38 38 86 86 86 14 14 14 2 2 6
43633 - 2 2 6 2 2 6 2 2 6 2 2 6
43634 - 2 2 6 2 2 6 2 2 6 2 2 6
43635 - 2 2 6 2 2 6 2 2 6 2 2 6
43636 - 2 2 6 2 2 6 2 2 6 2 2 6
43637 - 2 2 6 2 2 6 2 2 6 54 54 54
43638 - 66 66 66 26 26 26 6 6 6 0 0 0
43639 - 0 0 0 0 0 0 0 0 0 0 0 0
43640 - 0 0 0 0 0 0 0 0 0 0 0 0
43641 - 0 0 0 0 0 0 0 0 0 0 0 0
43642 - 0 0 0 0 0 0 0 0 0 0 0 0
43643 - 0 0 0 0 0 0 0 0 0 0 0 0
43644 - 0 0 0 0 0 0 0 0 0 0 0 0
43645 - 0 0 0 0 0 0 0 0 0 0 0 0
43646 - 0 0 0 0 0 1 0 0 1 0 0 0
43647 - 0 0 0 0 0 0 0 0 0 0 0 0
43648 - 0 0 0 0 0 0 0 0 0 0 0 0
43649 - 0 0 0 0 0 0 0 0 0 0 0 0
43650 - 0 0 0 0 0 0 0 0 0 0 0 0
43651 - 0 0 0 0 0 0 0 0 0 14 14 14
43652 - 42 42 42 82 82 82 2 2 6 2 2 6
43653 - 2 2 6 6 6 6 10 10 10 2 2 6
43654 - 2 2 6 2 2 6 2 2 6 2 2 6
43655 - 2 2 6 2 2 6 2 2 6 6 6 6
43656 - 14 14 14 10 10 10 2 2 6 2 2 6
43657 - 2 2 6 2 2 6 2 2 6 18 18 18
43658 - 82 82 82 34 34 34 10 10 10 0 0 0
43659 - 0 0 0 0 0 0 0 0 0 0 0 0
43660 - 0 0 0 0 0 0 0 0 0 0 0 0
43661 - 0 0 0 0 0 0 0 0 0 0 0 0
43662 - 0 0 0 0 0 0 0 0 0 0 0 0
43663 - 0 0 0 0 0 0 0 0 0 0 0 0
43664 - 0 0 0 0 0 0 0 0 0 0 0 0
43665 - 0 0 0 0 0 0 0 0 0 0 0 0
43666 - 0 0 1 0 0 0 0 0 0 0 0 0
43667 - 0 0 0 0 0 0 0 0 0 0 0 0
43668 - 0 0 0 0 0 0 0 0 0 0 0 0
43669 - 0 0 0 0 0 0 0 0 0 0 0 0
43670 - 0 0 0 0 0 0 0 0 0 0 0 0
43671 - 0 0 0 0 0 0 0 0 0 14 14 14
43672 - 46 46 46 86 86 86 2 2 6 2 2 6
43673 - 6 6 6 6 6 6 22 22 22 34 34 34
43674 - 6 6 6 2 2 6 2 2 6 2 2 6
43675 - 2 2 6 2 2 6 18 18 18 34 34 34
43676 - 10 10 10 50 50 50 22 22 22 2 2 6
43677 - 2 2 6 2 2 6 2 2 6 10 10 10
43678 - 86 86 86 42 42 42 14 14 14 0 0 0
43679 - 0 0 0 0 0 0 0 0 0 0 0 0
43680 - 0 0 0 0 0 0 0 0 0 0 0 0
43681 - 0 0 0 0 0 0 0 0 0 0 0 0
43682 - 0 0 0 0 0 0 0 0 0 0 0 0
43683 - 0 0 0 0 0 0 0 0 0 0 0 0
43684 - 0 0 0 0 0 0 0 0 0 0 0 0
43685 - 0 0 0 0 0 0 0 0 0 0 0 0
43686 - 0 0 1 0 0 1 0 0 1 0 0 0
43687 - 0 0 0 0 0 0 0 0 0 0 0 0
43688 - 0 0 0 0 0 0 0 0 0 0 0 0
43689 - 0 0 0 0 0 0 0 0 0 0 0 0
43690 - 0 0 0 0 0 0 0 0 0 0 0 0
43691 - 0 0 0 0 0 0 0 0 0 14 14 14
43692 - 46 46 46 86 86 86 2 2 6 2 2 6
43693 - 38 38 38 116 116 116 94 94 94 22 22 22
43694 - 22 22 22 2 2 6 2 2 6 2 2 6
43695 - 14 14 14 86 86 86 138 138 138 162 162 162
43696 -154 154 154 38 38 38 26 26 26 6 6 6
43697 - 2 2 6 2 2 6 2 2 6 2 2 6
43698 - 86 86 86 46 46 46 14 14 14 0 0 0
43699 - 0 0 0 0 0 0 0 0 0 0 0 0
43700 - 0 0 0 0 0 0 0 0 0 0 0 0
43701 - 0 0 0 0 0 0 0 0 0 0 0 0
43702 - 0 0 0 0 0 0 0 0 0 0 0 0
43703 - 0 0 0 0 0 0 0 0 0 0 0 0
43704 - 0 0 0 0 0 0 0 0 0 0 0 0
43705 - 0 0 0 0 0 0 0 0 0 0 0 0
43706 - 0 0 0 0 0 0 0 0 0 0 0 0
43707 - 0 0 0 0 0 0 0 0 0 0 0 0
43708 - 0 0 0 0 0 0 0 0 0 0 0 0
43709 - 0 0 0 0 0 0 0 0 0 0 0 0
43710 - 0 0 0 0 0 0 0 0 0 0 0 0
43711 - 0 0 0 0 0 0 0 0 0 14 14 14
43712 - 46 46 46 86 86 86 2 2 6 14 14 14
43713 -134 134 134 198 198 198 195 195 195 116 116 116
43714 - 10 10 10 2 2 6 2 2 6 6 6 6
43715 -101 98 89 187 187 187 210 210 210 218 218 218
43716 -214 214 214 134 134 134 14 14 14 6 6 6
43717 - 2 2 6 2 2 6 2 2 6 2 2 6
43718 - 86 86 86 50 50 50 18 18 18 6 6 6
43719 - 0 0 0 0 0 0 0 0 0 0 0 0
43720 - 0 0 0 0 0 0 0 0 0 0 0 0
43721 - 0 0 0 0 0 0 0 0 0 0 0 0
43722 - 0 0 0 0 0 0 0 0 0 0 0 0
43723 - 0 0 0 0 0 0 0 0 0 0 0 0
43724 - 0 0 0 0 0 0 0 0 0 0 0 0
43725 - 0 0 0 0 0 0 0 0 1 0 0 0
43726 - 0 0 1 0 0 1 0 0 1 0 0 0
43727 - 0 0 0 0 0 0 0 0 0 0 0 0
43728 - 0 0 0 0 0 0 0 0 0 0 0 0
43729 - 0 0 0 0 0 0 0 0 0 0 0 0
43730 - 0 0 0 0 0 0 0 0 0 0 0 0
43731 - 0 0 0 0 0 0 0 0 0 14 14 14
43732 - 46 46 46 86 86 86 2 2 6 54 54 54
43733 -218 218 218 195 195 195 226 226 226 246 246 246
43734 - 58 58 58 2 2 6 2 2 6 30 30 30
43735 -210 210 210 253 253 253 174 174 174 123 123 123
43736 -221 221 221 234 234 234 74 74 74 2 2 6
43737 - 2 2 6 2 2 6 2 2 6 2 2 6
43738 - 70 70 70 58 58 58 22 22 22 6 6 6
43739 - 0 0 0 0 0 0 0 0 0 0 0 0
43740 - 0 0 0 0 0 0 0 0 0 0 0 0
43741 - 0 0 0 0 0 0 0 0 0 0 0 0
43742 - 0 0 0 0 0 0 0 0 0 0 0 0
43743 - 0 0 0 0 0 0 0 0 0 0 0 0
43744 - 0 0 0 0 0 0 0 0 0 0 0 0
43745 - 0 0 0 0 0 0 0 0 0 0 0 0
43746 - 0 0 0 0 0 0 0 0 0 0 0 0
43747 - 0 0 0 0 0 0 0 0 0 0 0 0
43748 - 0 0 0 0 0 0 0 0 0 0 0 0
43749 - 0 0 0 0 0 0 0 0 0 0 0 0
43750 - 0 0 0 0 0 0 0 0 0 0 0 0
43751 - 0 0 0 0 0 0 0 0 0 14 14 14
43752 - 46 46 46 82 82 82 2 2 6 106 106 106
43753 -170 170 170 26 26 26 86 86 86 226 226 226
43754 -123 123 123 10 10 10 14 14 14 46 46 46
43755 -231 231 231 190 190 190 6 6 6 70 70 70
43756 - 90 90 90 238 238 238 158 158 158 2 2 6
43757 - 2 2 6 2 2 6 2 2 6 2 2 6
43758 - 70 70 70 58 58 58 22 22 22 6 6 6
43759 - 0 0 0 0 0 0 0 0 0 0 0 0
43760 - 0 0 0 0 0 0 0 0 0 0 0 0
43761 - 0 0 0 0 0 0 0 0 0 0 0 0
43762 - 0 0 0 0 0 0 0 0 0 0 0 0
43763 - 0 0 0 0 0 0 0 0 0 0 0 0
43764 - 0 0 0 0 0 0 0 0 0 0 0 0
43765 - 0 0 0 0 0 0 0 0 1 0 0 0
43766 - 0 0 1 0 0 1 0 0 1 0 0 0
43767 - 0 0 0 0 0 0 0 0 0 0 0 0
43768 - 0 0 0 0 0 0 0 0 0 0 0 0
43769 - 0 0 0 0 0 0 0 0 0 0 0 0
43770 - 0 0 0 0 0 0 0 0 0 0 0 0
43771 - 0 0 0 0 0 0 0 0 0 14 14 14
43772 - 42 42 42 86 86 86 6 6 6 116 116 116
43773 -106 106 106 6 6 6 70 70 70 149 149 149
43774 -128 128 128 18 18 18 38 38 38 54 54 54
43775 -221 221 221 106 106 106 2 2 6 14 14 14
43776 - 46 46 46 190 190 190 198 198 198 2 2 6
43777 - 2 2 6 2 2 6 2 2 6 2 2 6
43778 - 74 74 74 62 62 62 22 22 22 6 6 6
43779 - 0 0 0 0 0 0 0 0 0 0 0 0
43780 - 0 0 0 0 0 0 0 0 0 0 0 0
43781 - 0 0 0 0 0 0 0 0 0 0 0 0
43782 - 0 0 0 0 0 0 0 0 0 0 0 0
43783 - 0 0 0 0 0 0 0 0 0 0 0 0
43784 - 0 0 0 0 0 0 0 0 0 0 0 0
43785 - 0 0 0 0 0 0 0 0 1 0 0 0
43786 - 0 0 1 0 0 0 0 0 1 0 0 0
43787 - 0 0 0 0 0 0 0 0 0 0 0 0
43788 - 0 0 0 0 0 0 0 0 0 0 0 0
43789 - 0 0 0 0 0 0 0 0 0 0 0 0
43790 - 0 0 0 0 0 0 0 0 0 0 0 0
43791 - 0 0 0 0 0 0 0 0 0 14 14 14
43792 - 42 42 42 94 94 94 14 14 14 101 101 101
43793 -128 128 128 2 2 6 18 18 18 116 116 116
43794 -118 98 46 121 92 8 121 92 8 98 78 10
43795 -162 162 162 106 106 106 2 2 6 2 2 6
43796 - 2 2 6 195 195 195 195 195 195 6 6 6
43797 - 2 2 6 2 2 6 2 2 6 2 2 6
43798 - 74 74 74 62 62 62 22 22 22 6 6 6
43799 - 0 0 0 0 0 0 0 0 0 0 0 0
43800 - 0 0 0 0 0 0 0 0 0 0 0 0
43801 - 0 0 0 0 0 0 0 0 0 0 0 0
43802 - 0 0 0 0 0 0 0 0 0 0 0 0
43803 - 0 0 0 0 0 0 0 0 0 0 0 0
43804 - 0 0 0 0 0 0 0 0 0 0 0 0
43805 - 0 0 0 0 0 0 0 0 1 0 0 1
43806 - 0 0 1 0 0 0 0 0 1 0 0 0
43807 - 0 0 0 0 0 0 0 0 0 0 0 0
43808 - 0 0 0 0 0 0 0 0 0 0 0 0
43809 - 0 0 0 0 0 0 0 0 0 0 0 0
43810 - 0 0 0 0 0 0 0 0 0 0 0 0
43811 - 0 0 0 0 0 0 0 0 0 10 10 10
43812 - 38 38 38 90 90 90 14 14 14 58 58 58
43813 -210 210 210 26 26 26 54 38 6 154 114 10
43814 -226 170 11 236 186 11 225 175 15 184 144 12
43815 -215 174 15 175 146 61 37 26 9 2 2 6
43816 - 70 70 70 246 246 246 138 138 138 2 2 6
43817 - 2 2 6 2 2 6 2 2 6 2 2 6
43818 - 70 70 70 66 66 66 26 26 26 6 6 6
43819 - 0 0 0 0 0 0 0 0 0 0 0 0
43820 - 0 0 0 0 0 0 0 0 0 0 0 0
43821 - 0 0 0 0 0 0 0 0 0 0 0 0
43822 - 0 0 0 0 0 0 0 0 0 0 0 0
43823 - 0 0 0 0 0 0 0 0 0 0 0 0
43824 - 0 0 0 0 0 0 0 0 0 0 0 0
43825 - 0 0 0 0 0 0 0 0 0 0 0 0
43826 - 0 0 0 0 0 0 0 0 0 0 0 0
43827 - 0 0 0 0 0 0 0 0 0 0 0 0
43828 - 0 0 0 0 0 0 0 0 0 0 0 0
43829 - 0 0 0 0 0 0 0 0 0 0 0 0
43830 - 0 0 0 0 0 0 0 0 0 0 0 0
43831 - 0 0 0 0 0 0 0 0 0 10 10 10
43832 - 38 38 38 86 86 86 14 14 14 10 10 10
43833 -195 195 195 188 164 115 192 133 9 225 175 15
43834 -239 182 13 234 190 10 232 195 16 232 200 30
43835 -245 207 45 241 208 19 232 195 16 184 144 12
43836 -218 194 134 211 206 186 42 42 42 2 2 6
43837 - 2 2 6 2 2 6 2 2 6 2 2 6
43838 - 50 50 50 74 74 74 30 30 30 6 6 6
43839 - 0 0 0 0 0 0 0 0 0 0 0 0
43840 - 0 0 0 0 0 0 0 0 0 0 0 0
43841 - 0 0 0 0 0 0 0 0 0 0 0 0
43842 - 0 0 0 0 0 0 0 0 0 0 0 0
43843 - 0 0 0 0 0 0 0 0 0 0 0 0
43844 - 0 0 0 0 0 0 0 0 0 0 0 0
43845 - 0 0 0 0 0 0 0 0 0 0 0 0
43846 - 0 0 0 0 0 0 0 0 0 0 0 0
43847 - 0 0 0 0 0 0 0 0 0 0 0 0
43848 - 0 0 0 0 0 0 0 0 0 0 0 0
43849 - 0 0 0 0 0 0 0 0 0 0 0 0
43850 - 0 0 0 0 0 0 0 0 0 0 0 0
43851 - 0 0 0 0 0 0 0 0 0 10 10 10
43852 - 34 34 34 86 86 86 14 14 14 2 2 6
43853 -121 87 25 192 133 9 219 162 10 239 182 13
43854 -236 186 11 232 195 16 241 208 19 244 214 54
43855 -246 218 60 246 218 38 246 215 20 241 208 19
43856 -241 208 19 226 184 13 121 87 25 2 2 6
43857 - 2 2 6 2 2 6 2 2 6 2 2 6
43858 - 50 50 50 82 82 82 34 34 34 10 10 10
43859 - 0 0 0 0 0 0 0 0 0 0 0 0
43860 - 0 0 0 0 0 0 0 0 0 0 0 0
43861 - 0 0 0 0 0 0 0 0 0 0 0 0
43862 - 0 0 0 0 0 0 0 0 0 0 0 0
43863 - 0 0 0 0 0 0 0 0 0 0 0 0
43864 - 0 0 0 0 0 0 0 0 0 0 0 0
43865 - 0 0 0 0 0 0 0 0 0 0 0 0
43866 - 0 0 0 0 0 0 0 0 0 0 0 0
43867 - 0 0 0 0 0 0 0 0 0 0 0 0
43868 - 0 0 0 0 0 0 0 0 0 0 0 0
43869 - 0 0 0 0 0 0 0 0 0 0 0 0
43870 - 0 0 0 0 0 0 0 0 0 0 0 0
43871 - 0 0 0 0 0 0 0 0 0 10 10 10
43872 - 34 34 34 82 82 82 30 30 30 61 42 6
43873 -180 123 7 206 145 10 230 174 11 239 182 13
43874 -234 190 10 238 202 15 241 208 19 246 218 74
43875 -246 218 38 246 215 20 246 215 20 246 215 20
43876 -226 184 13 215 174 15 184 144 12 6 6 6
43877 - 2 2 6 2 2 6 2 2 6 2 2 6
43878 - 26 26 26 94 94 94 42 42 42 14 14 14
43879 - 0 0 0 0 0 0 0 0 0 0 0 0
43880 - 0 0 0 0 0 0 0 0 0 0 0 0
43881 - 0 0 0 0 0 0 0 0 0 0 0 0
43882 - 0 0 0 0 0 0 0 0 0 0 0 0
43883 - 0 0 0 0 0 0 0 0 0 0 0 0
43884 - 0 0 0 0 0 0 0 0 0 0 0 0
43885 - 0 0 0 0 0 0 0 0 0 0 0 0
43886 - 0 0 0 0 0 0 0 0 0 0 0 0
43887 - 0 0 0 0 0 0 0 0 0 0 0 0
43888 - 0 0 0 0 0 0 0 0 0 0 0 0
43889 - 0 0 0 0 0 0 0 0 0 0 0 0
43890 - 0 0 0 0 0 0 0 0 0 0 0 0
43891 - 0 0 0 0 0 0 0 0 0 10 10 10
43892 - 30 30 30 78 78 78 50 50 50 104 69 6
43893 -192 133 9 216 158 10 236 178 12 236 186 11
43894 -232 195 16 241 208 19 244 214 54 245 215 43
43895 -246 215 20 246 215 20 241 208 19 198 155 10
43896 -200 144 11 216 158 10 156 118 10 2 2 6
43897 - 2 2 6 2 2 6 2 2 6 2 2 6
43898 - 6 6 6 90 90 90 54 54 54 18 18 18
43899 - 6 6 6 0 0 0 0 0 0 0 0 0
43900 - 0 0 0 0 0 0 0 0 0 0 0 0
43901 - 0 0 0 0 0 0 0 0 0 0 0 0
43902 - 0 0 0 0 0 0 0 0 0 0 0 0
43903 - 0 0 0 0 0 0 0 0 0 0 0 0
43904 - 0 0 0 0 0 0 0 0 0 0 0 0
43905 - 0 0 0 0 0 0 0 0 0 0 0 0
43906 - 0 0 0 0 0 0 0 0 0 0 0 0
43907 - 0 0 0 0 0 0 0 0 0 0 0 0
43908 - 0 0 0 0 0 0 0 0 0 0 0 0
43909 - 0 0 0 0 0 0 0 0 0 0 0 0
43910 - 0 0 0 0 0 0 0 0 0 0 0 0
43911 - 0 0 0 0 0 0 0 0 0 10 10 10
43912 - 30 30 30 78 78 78 46 46 46 22 22 22
43913 -137 92 6 210 162 10 239 182 13 238 190 10
43914 -238 202 15 241 208 19 246 215 20 246 215 20
43915 -241 208 19 203 166 17 185 133 11 210 150 10
43916 -216 158 10 210 150 10 102 78 10 2 2 6
43917 - 6 6 6 54 54 54 14 14 14 2 2 6
43918 - 2 2 6 62 62 62 74 74 74 30 30 30
43919 - 10 10 10 0 0 0 0 0 0 0 0 0
43920 - 0 0 0 0 0 0 0 0 0 0 0 0
43921 - 0 0 0 0 0 0 0 0 0 0 0 0
43922 - 0 0 0 0 0 0 0 0 0 0 0 0
43923 - 0 0 0 0 0 0 0 0 0 0 0 0
43924 - 0 0 0 0 0 0 0 0 0 0 0 0
43925 - 0 0 0 0 0 0 0 0 0 0 0 0
43926 - 0 0 0 0 0 0 0 0 0 0 0 0
43927 - 0 0 0 0 0 0 0 0 0 0 0 0
43928 - 0 0 0 0 0 0 0 0 0 0 0 0
43929 - 0 0 0 0 0 0 0 0 0 0 0 0
43930 - 0 0 0 0 0 0 0 0 0 0 0 0
43931 - 0 0 0 0 0 0 0 0 0 10 10 10
43932 - 34 34 34 78 78 78 50 50 50 6 6 6
43933 - 94 70 30 139 102 15 190 146 13 226 184 13
43934 -232 200 30 232 195 16 215 174 15 190 146 13
43935 -168 122 10 192 133 9 210 150 10 213 154 11
43936 -202 150 34 182 157 106 101 98 89 2 2 6
43937 - 2 2 6 78 78 78 116 116 116 58 58 58
43938 - 2 2 6 22 22 22 90 90 90 46 46 46
43939 - 18 18 18 6 6 6 0 0 0 0 0 0
43940 - 0 0 0 0 0 0 0 0 0 0 0 0
43941 - 0 0 0 0 0 0 0 0 0 0 0 0
43942 - 0 0 0 0 0 0 0 0 0 0 0 0
43943 - 0 0 0 0 0 0 0 0 0 0 0 0
43944 - 0 0 0 0 0 0 0 0 0 0 0 0
43945 - 0 0 0 0 0 0 0 0 0 0 0 0
43946 - 0 0 0 0 0 0 0 0 0 0 0 0
43947 - 0 0 0 0 0 0 0 0 0 0 0 0
43948 - 0 0 0 0 0 0 0 0 0 0 0 0
43949 - 0 0 0 0 0 0 0 0 0 0 0 0
43950 - 0 0 0 0 0 0 0 0 0 0 0 0
43951 - 0 0 0 0 0 0 0 0 0 10 10 10
43952 - 38 38 38 86 86 86 50 50 50 6 6 6
43953 -128 128 128 174 154 114 156 107 11 168 122 10
43954 -198 155 10 184 144 12 197 138 11 200 144 11
43955 -206 145 10 206 145 10 197 138 11 188 164 115
43956 -195 195 195 198 198 198 174 174 174 14 14 14
43957 - 2 2 6 22 22 22 116 116 116 116 116 116
43958 - 22 22 22 2 2 6 74 74 74 70 70 70
43959 - 30 30 30 10 10 10 0 0 0 0 0 0
43960 - 0 0 0 0 0 0 0 0 0 0 0 0
43961 - 0 0 0 0 0 0 0 0 0 0 0 0
43962 - 0 0 0 0 0 0 0 0 0 0 0 0
43963 - 0 0 0 0 0 0 0 0 0 0 0 0
43964 - 0 0 0 0 0 0 0 0 0 0 0 0
43965 - 0 0 0 0 0 0 0 0 0 0 0 0
43966 - 0 0 0 0 0 0 0 0 0 0 0 0
43967 - 0 0 0 0 0 0 0 0 0 0 0 0
43968 - 0 0 0 0 0 0 0 0 0 0 0 0
43969 - 0 0 0 0 0 0 0 0 0 0 0 0
43970 - 0 0 0 0 0 0 0 0 0 0 0 0
43971 - 0 0 0 0 0 0 6 6 6 18 18 18
43972 - 50 50 50 101 101 101 26 26 26 10 10 10
43973 -138 138 138 190 190 190 174 154 114 156 107 11
43974 -197 138 11 200 144 11 197 138 11 192 133 9
43975 -180 123 7 190 142 34 190 178 144 187 187 187
43976 -202 202 202 221 221 221 214 214 214 66 66 66
43977 - 2 2 6 2 2 6 50 50 50 62 62 62
43978 - 6 6 6 2 2 6 10 10 10 90 90 90
43979 - 50 50 50 18 18 18 6 6 6 0 0 0
43980 - 0 0 0 0 0 0 0 0 0 0 0 0
43981 - 0 0 0 0 0 0 0 0 0 0 0 0
43982 - 0 0 0 0 0 0 0 0 0 0 0 0
43983 - 0 0 0 0 0 0 0 0 0 0 0 0
43984 - 0 0 0 0 0 0 0 0 0 0 0 0
43985 - 0 0 0 0 0 0 0 0 0 0 0 0
43986 - 0 0 0 0 0 0 0 0 0 0 0 0
43987 - 0 0 0 0 0 0 0 0 0 0 0 0
43988 - 0 0 0 0 0 0 0 0 0 0 0 0
43989 - 0 0 0 0 0 0 0 0 0 0 0 0
43990 - 0 0 0 0 0 0 0 0 0 0 0 0
43991 - 0 0 0 0 0 0 10 10 10 34 34 34
43992 - 74 74 74 74 74 74 2 2 6 6 6 6
43993 -144 144 144 198 198 198 190 190 190 178 166 146
43994 -154 121 60 156 107 11 156 107 11 168 124 44
43995 -174 154 114 187 187 187 190 190 190 210 210 210
43996 -246 246 246 253 253 253 253 253 253 182 182 182
43997 - 6 6 6 2 2 6 2 2 6 2 2 6
43998 - 2 2 6 2 2 6 2 2 6 62 62 62
43999 - 74 74 74 34 34 34 14 14 14 0 0 0
44000 - 0 0 0 0 0 0 0 0 0 0 0 0
44001 - 0 0 0 0 0 0 0 0 0 0 0 0
44002 - 0 0 0 0 0 0 0 0 0 0 0 0
44003 - 0 0 0 0 0 0 0 0 0 0 0 0
44004 - 0 0 0 0 0 0 0 0 0 0 0 0
44005 - 0 0 0 0 0 0 0 0 0 0 0 0
44006 - 0 0 0 0 0 0 0 0 0 0 0 0
44007 - 0 0 0 0 0 0 0 0 0 0 0 0
44008 - 0 0 0 0 0 0 0 0 0 0 0 0
44009 - 0 0 0 0 0 0 0 0 0 0 0 0
44010 - 0 0 0 0 0 0 0 0 0 0 0 0
44011 - 0 0 0 10 10 10 22 22 22 54 54 54
44012 - 94 94 94 18 18 18 2 2 6 46 46 46
44013 -234 234 234 221 221 221 190 190 190 190 190 190
44014 -190 190 190 187 187 187 187 187 187 190 190 190
44015 -190 190 190 195 195 195 214 214 214 242 242 242
44016 -253 253 253 253 253 253 253 253 253 253 253 253
44017 - 82 82 82 2 2 6 2 2 6 2 2 6
44018 - 2 2 6 2 2 6 2 2 6 14 14 14
44019 - 86 86 86 54 54 54 22 22 22 6 6 6
44020 - 0 0 0 0 0 0 0 0 0 0 0 0
44021 - 0 0 0 0 0 0 0 0 0 0 0 0
44022 - 0 0 0 0 0 0 0 0 0 0 0 0
44023 - 0 0 0 0 0 0 0 0 0 0 0 0
44024 - 0 0 0 0 0 0 0 0 0 0 0 0
44025 - 0 0 0 0 0 0 0 0 0 0 0 0
44026 - 0 0 0 0 0 0 0 0 0 0 0 0
44027 - 0 0 0 0 0 0 0 0 0 0 0 0
44028 - 0 0 0 0 0 0 0 0 0 0 0 0
44029 - 0 0 0 0 0 0 0 0 0 0 0 0
44030 - 0 0 0 0 0 0 0 0 0 0 0 0
44031 - 6 6 6 18 18 18 46 46 46 90 90 90
44032 - 46 46 46 18 18 18 6 6 6 182 182 182
44033 -253 253 253 246 246 246 206 206 206 190 190 190
44034 -190 190 190 190 190 190 190 190 190 190 190 190
44035 -206 206 206 231 231 231 250 250 250 253 253 253
44036 -253 253 253 253 253 253 253 253 253 253 253 253
44037 -202 202 202 14 14 14 2 2 6 2 2 6
44038 - 2 2 6 2 2 6 2 2 6 2 2 6
44039 - 42 42 42 86 86 86 42 42 42 18 18 18
44040 - 6 6 6 0 0 0 0 0 0 0 0 0
44041 - 0 0 0 0 0 0 0 0 0 0 0 0
44042 - 0 0 0 0 0 0 0 0 0 0 0 0
44043 - 0 0 0 0 0 0 0 0 0 0 0 0
44044 - 0 0 0 0 0 0 0 0 0 0 0 0
44045 - 0 0 0 0 0 0 0 0 0 0 0 0
44046 - 0 0 0 0 0 0 0 0 0 0 0 0
44047 - 0 0 0 0 0 0 0 0 0 0 0 0
44048 - 0 0 0 0 0 0 0 0 0 0 0 0
44049 - 0 0 0 0 0 0 0 0 0 0 0 0
44050 - 0 0 0 0 0 0 0 0 0 6 6 6
44051 - 14 14 14 38 38 38 74 74 74 66 66 66
44052 - 2 2 6 6 6 6 90 90 90 250 250 250
44053 -253 253 253 253 253 253 238 238 238 198 198 198
44054 -190 190 190 190 190 190 195 195 195 221 221 221
44055 -246 246 246 253 253 253 253 253 253 253 253 253
44056 -253 253 253 253 253 253 253 253 253 253 253 253
44057 -253 253 253 82 82 82 2 2 6 2 2 6
44058 - 2 2 6 2 2 6 2 2 6 2 2 6
44059 - 2 2 6 78 78 78 70 70 70 34 34 34
44060 - 14 14 14 6 6 6 0 0 0 0 0 0
44061 - 0 0 0 0 0 0 0 0 0 0 0 0
44062 - 0 0 0 0 0 0 0 0 0 0 0 0
44063 - 0 0 0 0 0 0 0 0 0 0 0 0
44064 - 0 0 0 0 0 0 0 0 0 0 0 0
44065 - 0 0 0 0 0 0 0 0 0 0 0 0
44066 - 0 0 0 0 0 0 0 0 0 0 0 0
44067 - 0 0 0 0 0 0 0 0 0 0 0 0
44068 - 0 0 0 0 0 0 0 0 0 0 0 0
44069 - 0 0 0 0 0 0 0 0 0 0 0 0
44070 - 0 0 0 0 0 0 0 0 0 14 14 14
44071 - 34 34 34 66 66 66 78 78 78 6 6 6
44072 - 2 2 6 18 18 18 218 218 218 253 253 253
44073 -253 253 253 253 253 253 253 253 253 246 246 246
44074 -226 226 226 231 231 231 246 246 246 253 253 253
44075 -253 253 253 253 253 253 253 253 253 253 253 253
44076 -253 253 253 253 253 253 253 253 253 253 253 253
44077 -253 253 253 178 178 178 2 2 6 2 2 6
44078 - 2 2 6 2 2 6 2 2 6 2 2 6
44079 - 2 2 6 18 18 18 90 90 90 62 62 62
44080 - 30 30 30 10 10 10 0 0 0 0 0 0
44081 - 0 0 0 0 0 0 0 0 0 0 0 0
44082 - 0 0 0 0 0 0 0 0 0 0 0 0
44083 - 0 0 0 0 0 0 0 0 0 0 0 0
44084 - 0 0 0 0 0 0 0 0 0 0 0 0
44085 - 0 0 0 0 0 0 0 0 0 0 0 0
44086 - 0 0 0 0 0 0 0 0 0 0 0 0
44087 - 0 0 0 0 0 0 0 0 0 0 0 0
44088 - 0 0 0 0 0 0 0 0 0 0 0 0
44089 - 0 0 0 0 0 0 0 0 0 0 0 0
44090 - 0 0 0 0 0 0 10 10 10 26 26 26
44091 - 58 58 58 90 90 90 18 18 18 2 2 6
44092 - 2 2 6 110 110 110 253 253 253 253 253 253
44093 -253 253 253 253 253 253 253 253 253 253 253 253
44094 -250 250 250 253 253 253 253 253 253 253 253 253
44095 -253 253 253 253 253 253 253 253 253 253 253 253
44096 -253 253 253 253 253 253 253 253 253 253 253 253
44097 -253 253 253 231 231 231 18 18 18 2 2 6
44098 - 2 2 6 2 2 6 2 2 6 2 2 6
44099 - 2 2 6 2 2 6 18 18 18 94 94 94
44100 - 54 54 54 26 26 26 10 10 10 0 0 0
44101 - 0 0 0 0 0 0 0 0 0 0 0 0
44102 - 0 0 0 0 0 0 0 0 0 0 0 0
44103 - 0 0 0 0 0 0 0 0 0 0 0 0
44104 - 0 0 0 0 0 0 0 0 0 0 0 0
44105 - 0 0 0 0 0 0 0 0 0 0 0 0
44106 - 0 0 0 0 0 0 0 0 0 0 0 0
44107 - 0 0 0 0 0 0 0 0 0 0 0 0
44108 - 0 0 0 0 0 0 0 0 0 0 0 0
44109 - 0 0 0 0 0 0 0 0 0 0 0 0
44110 - 0 0 0 6 6 6 22 22 22 50 50 50
44111 - 90 90 90 26 26 26 2 2 6 2 2 6
44112 - 14 14 14 195 195 195 250 250 250 253 253 253
44113 -253 253 253 253 253 253 253 253 253 253 253 253
44114 -253 253 253 253 253 253 253 253 253 253 253 253
44115 -253 253 253 253 253 253 253 253 253 253 253 253
44116 -253 253 253 253 253 253 253 253 253 253 253 253
44117 -250 250 250 242 242 242 54 54 54 2 2 6
44118 - 2 2 6 2 2 6 2 2 6 2 2 6
44119 - 2 2 6 2 2 6 2 2 6 38 38 38
44120 - 86 86 86 50 50 50 22 22 22 6 6 6
44121 - 0 0 0 0 0 0 0 0 0 0 0 0
44122 - 0 0 0 0 0 0 0 0 0 0 0 0
44123 - 0 0 0 0 0 0 0 0 0 0 0 0
44124 - 0 0 0 0 0 0 0 0 0 0 0 0
44125 - 0 0 0 0 0 0 0 0 0 0 0 0
44126 - 0 0 0 0 0 0 0 0 0 0 0 0
44127 - 0 0 0 0 0 0 0 0 0 0 0 0
44128 - 0 0 0 0 0 0 0 0 0 0 0 0
44129 - 0 0 0 0 0 0 0 0 0 0 0 0
44130 - 6 6 6 14 14 14 38 38 38 82 82 82
44131 - 34 34 34 2 2 6 2 2 6 2 2 6
44132 - 42 42 42 195 195 195 246 246 246 253 253 253
44133 -253 253 253 253 253 253 253 253 253 250 250 250
44134 -242 242 242 242 242 242 250 250 250 253 253 253
44135 -253 253 253 253 253 253 253 253 253 253 253 253
44136 -253 253 253 250 250 250 246 246 246 238 238 238
44137 -226 226 226 231 231 231 101 101 101 6 6 6
44138 - 2 2 6 2 2 6 2 2 6 2 2 6
44139 - 2 2 6 2 2 6 2 2 6 2 2 6
44140 - 38 38 38 82 82 82 42 42 42 14 14 14
44141 - 6 6 6 0 0 0 0 0 0 0 0 0
44142 - 0 0 0 0 0 0 0 0 0 0 0 0
44143 - 0 0 0 0 0 0 0 0 0 0 0 0
44144 - 0 0 0 0 0 0 0 0 0 0 0 0
44145 - 0 0 0 0 0 0 0 0 0 0 0 0
44146 - 0 0 0 0 0 0 0 0 0 0 0 0
44147 - 0 0 0 0 0 0 0 0 0 0 0 0
44148 - 0 0 0 0 0 0 0 0 0 0 0 0
44149 - 0 0 0 0 0 0 0 0 0 0 0 0
44150 - 10 10 10 26 26 26 62 62 62 66 66 66
44151 - 2 2 6 2 2 6 2 2 6 6 6 6
44152 - 70 70 70 170 170 170 206 206 206 234 234 234
44153 -246 246 246 250 250 250 250 250 250 238 238 238
44154 -226 226 226 231 231 231 238 238 238 250 250 250
44155 -250 250 250 250 250 250 246 246 246 231 231 231
44156 -214 214 214 206 206 206 202 202 202 202 202 202
44157 -198 198 198 202 202 202 182 182 182 18 18 18
44158 - 2 2 6 2 2 6 2 2 6 2 2 6
44159 - 2 2 6 2 2 6 2 2 6 2 2 6
44160 - 2 2 6 62 62 62 66 66 66 30 30 30
44161 - 10 10 10 0 0 0 0 0 0 0 0 0
44162 - 0 0 0 0 0 0 0 0 0 0 0 0
44163 - 0 0 0 0 0 0 0 0 0 0 0 0
44164 - 0 0 0 0 0 0 0 0 0 0 0 0
44165 - 0 0 0 0 0 0 0 0 0 0 0 0
44166 - 0 0 0 0 0 0 0 0 0 0 0 0
44167 - 0 0 0 0 0 0 0 0 0 0 0 0
44168 - 0 0 0 0 0 0 0 0 0 0 0 0
44169 - 0 0 0 0 0 0 0 0 0 0 0 0
44170 - 14 14 14 42 42 42 82 82 82 18 18 18
44171 - 2 2 6 2 2 6 2 2 6 10 10 10
44172 - 94 94 94 182 182 182 218 218 218 242 242 242
44173 -250 250 250 253 253 253 253 253 253 250 250 250
44174 -234 234 234 253 253 253 253 253 253 253 253 253
44175 -253 253 253 253 253 253 253 253 253 246 246 246
44176 -238 238 238 226 226 226 210 210 210 202 202 202
44177 -195 195 195 195 195 195 210 210 210 158 158 158
44178 - 6 6 6 14 14 14 50 50 50 14 14 14
44179 - 2 2 6 2 2 6 2 2 6 2 2 6
44180 - 2 2 6 6 6 6 86 86 86 46 46 46
44181 - 18 18 18 6 6 6 0 0 0 0 0 0
44182 - 0 0 0 0 0 0 0 0 0 0 0 0
44183 - 0 0 0 0 0 0 0 0 0 0 0 0
44184 - 0 0 0 0 0 0 0 0 0 0 0 0
44185 - 0 0 0 0 0 0 0 0 0 0 0 0
44186 - 0 0 0 0 0 0 0 0 0 0 0 0
44187 - 0 0 0 0 0 0 0 0 0 0 0 0
44188 - 0 0 0 0 0 0 0 0 0 0 0 0
44189 - 0 0 0 0 0 0 0 0 0 6 6 6
44190 - 22 22 22 54 54 54 70 70 70 2 2 6
44191 - 2 2 6 10 10 10 2 2 6 22 22 22
44192 -166 166 166 231 231 231 250 250 250 253 253 253
44193 -253 253 253 253 253 253 253 253 253 250 250 250
44194 -242 242 242 253 253 253 253 253 253 253 253 253
44195 -253 253 253 253 253 253 253 253 253 253 253 253
44196 -253 253 253 253 253 253 253 253 253 246 246 246
44197 -231 231 231 206 206 206 198 198 198 226 226 226
44198 - 94 94 94 2 2 6 6 6 6 38 38 38
44199 - 30 30 30 2 2 6 2 2 6 2 2 6
44200 - 2 2 6 2 2 6 62 62 62 66 66 66
44201 - 26 26 26 10 10 10 0 0 0 0 0 0
44202 - 0 0 0 0 0 0 0 0 0 0 0 0
44203 - 0 0 0 0 0 0 0 0 0 0 0 0
44204 - 0 0 0 0 0 0 0 0 0 0 0 0
44205 - 0 0 0 0 0 0 0 0 0 0 0 0
44206 - 0 0 0 0 0 0 0 0 0 0 0 0
44207 - 0 0 0 0 0 0 0 0 0 0 0 0
44208 - 0 0 0 0 0 0 0 0 0 0 0 0
44209 - 0 0 0 0 0 0 0 0 0 10 10 10
44210 - 30 30 30 74 74 74 50 50 50 2 2 6
44211 - 26 26 26 26 26 26 2 2 6 106 106 106
44212 -238 238 238 253 253 253 253 253 253 253 253 253
44213 -253 253 253 253 253 253 253 253 253 253 253 253
44214 -253 253 253 253 253 253 253 253 253 253 253 253
44215 -253 253 253 253 253 253 253 253 253 253 253 253
44216 -253 253 253 253 253 253 253 253 253 253 253 253
44217 -253 253 253 246 246 246 218 218 218 202 202 202
44218 -210 210 210 14 14 14 2 2 6 2 2 6
44219 - 30 30 30 22 22 22 2 2 6 2 2 6
44220 - 2 2 6 2 2 6 18 18 18 86 86 86
44221 - 42 42 42 14 14 14 0 0 0 0 0 0
44222 - 0 0 0 0 0 0 0 0 0 0 0 0
44223 - 0 0 0 0 0 0 0 0 0 0 0 0
44224 - 0 0 0 0 0 0 0 0 0 0 0 0
44225 - 0 0 0 0 0 0 0 0 0 0 0 0
44226 - 0 0 0 0 0 0 0 0 0 0 0 0
44227 - 0 0 0 0 0 0 0 0 0 0 0 0
44228 - 0 0 0 0 0 0 0 0 0 0 0 0
44229 - 0 0 0 0 0 0 0 0 0 14 14 14
44230 - 42 42 42 90 90 90 22 22 22 2 2 6
44231 - 42 42 42 2 2 6 18 18 18 218 218 218
44232 -253 253 253 253 253 253 253 253 253 253 253 253
44233 -253 253 253 253 253 253 253 253 253 253 253 253
44234 -253 253 253 253 253 253 253 253 253 253 253 253
44235 -253 253 253 253 253 253 253 253 253 253 253 253
44236 -253 253 253 253 253 253 253 253 253 253 253 253
44237 -253 253 253 253 253 253 250 250 250 221 221 221
44238 -218 218 218 101 101 101 2 2 6 14 14 14
44239 - 18 18 18 38 38 38 10 10 10 2 2 6
44240 - 2 2 6 2 2 6 2 2 6 78 78 78
44241 - 58 58 58 22 22 22 6 6 6 0 0 0
44242 - 0 0 0 0 0 0 0 0 0 0 0 0
44243 - 0 0 0 0 0 0 0 0 0 0 0 0
44244 - 0 0 0 0 0 0 0 0 0 0 0 0
44245 - 0 0 0 0 0 0 0 0 0 0 0 0
44246 - 0 0 0 0 0 0 0 0 0 0 0 0
44247 - 0 0 0 0 0 0 0 0 0 0 0 0
44248 - 0 0 0 0 0 0 0 0 0 0 0 0
44249 - 0 0 0 0 0 0 6 6 6 18 18 18
44250 - 54 54 54 82 82 82 2 2 6 26 26 26
44251 - 22 22 22 2 2 6 123 123 123 253 253 253
44252 -253 253 253 253 253 253 253 253 253 253 253 253
44253 -253 253 253 253 253 253 253 253 253 253 253 253
44254 -253 253 253 253 253 253 253 253 253 253 253 253
44255 -253 253 253 253 253 253 253 253 253 253 253 253
44256 -253 253 253 253 253 253 253 253 253 253 253 253
44257 -253 253 253 253 253 253 253 253 253 250 250 250
44258 -238 238 238 198 198 198 6 6 6 38 38 38
44259 - 58 58 58 26 26 26 38 38 38 2 2 6
44260 - 2 2 6 2 2 6 2 2 6 46 46 46
44261 - 78 78 78 30 30 30 10 10 10 0 0 0
44262 - 0 0 0 0 0 0 0 0 0 0 0 0
44263 - 0 0 0 0 0 0 0 0 0 0 0 0
44264 - 0 0 0 0 0 0 0 0 0 0 0 0
44265 - 0 0 0 0 0 0 0 0 0 0 0 0
44266 - 0 0 0 0 0 0 0 0 0 0 0 0
44267 - 0 0 0 0 0 0 0 0 0 0 0 0
44268 - 0 0 0 0 0 0 0 0 0 0 0 0
44269 - 0 0 0 0 0 0 10 10 10 30 30 30
44270 - 74 74 74 58 58 58 2 2 6 42 42 42
44271 - 2 2 6 22 22 22 231 231 231 253 253 253
44272 -253 253 253 253 253 253 253 253 253 253 253 253
44273 -253 253 253 253 253 253 253 253 253 250 250 250
44274 -253 253 253 253 253 253 253 253 253 253 253 253
44275 -253 253 253 253 253 253 253 253 253 253 253 253
44276 -253 253 253 253 253 253 253 253 253 253 253 253
44277 -253 253 253 253 253 253 253 253 253 253 253 253
44278 -253 253 253 246 246 246 46 46 46 38 38 38
44279 - 42 42 42 14 14 14 38 38 38 14 14 14
44280 - 2 2 6 2 2 6 2 2 6 6 6 6
44281 - 86 86 86 46 46 46 14 14 14 0 0 0
44282 - 0 0 0 0 0 0 0 0 0 0 0 0
44283 - 0 0 0 0 0 0 0 0 0 0 0 0
44284 - 0 0 0 0 0 0 0 0 0 0 0 0
44285 - 0 0 0 0 0 0 0 0 0 0 0 0
44286 - 0 0 0 0 0 0 0 0 0 0 0 0
44287 - 0 0 0 0 0 0 0 0 0 0 0 0
44288 - 0 0 0 0 0 0 0 0 0 0 0 0
44289 - 0 0 0 6 6 6 14 14 14 42 42 42
44290 - 90 90 90 18 18 18 18 18 18 26 26 26
44291 - 2 2 6 116 116 116 253 253 253 253 253 253
44292 -253 253 253 253 253 253 253 253 253 253 253 253
44293 -253 253 253 253 253 253 250 250 250 238 238 238
44294 -253 253 253 253 253 253 253 253 253 253 253 253
44295 -253 253 253 253 253 253 253 253 253 253 253 253
44296 -253 253 253 253 253 253 253 253 253 253 253 253
44297 -253 253 253 253 253 253 253 253 253 253 253 253
44298 -253 253 253 253 253 253 94 94 94 6 6 6
44299 - 2 2 6 2 2 6 10 10 10 34 34 34
44300 - 2 2 6 2 2 6 2 2 6 2 2 6
44301 - 74 74 74 58 58 58 22 22 22 6 6 6
44302 - 0 0 0 0 0 0 0 0 0 0 0 0
44303 - 0 0 0 0 0 0 0 0 0 0 0 0
44304 - 0 0 0 0 0 0 0 0 0 0 0 0
44305 - 0 0 0 0 0 0 0 0 0 0 0 0
44306 - 0 0 0 0 0 0 0 0 0 0 0 0
44307 - 0 0 0 0 0 0 0 0 0 0 0 0
44308 - 0 0 0 0 0 0 0 0 0 0 0 0
44309 - 0 0 0 10 10 10 26 26 26 66 66 66
44310 - 82 82 82 2 2 6 38 38 38 6 6 6
44311 - 14 14 14 210 210 210 253 253 253 253 253 253
44312 -253 253 253 253 253 253 253 253 253 253 253 253
44313 -253 253 253 253 253 253 246 246 246 242 242 242
44314 -253 253 253 253 253 253 253 253 253 253 253 253
44315 -253 253 253 253 253 253 253 253 253 253 253 253
44316 -253 253 253 253 253 253 253 253 253 253 253 253
44317 -253 253 253 253 253 253 253 253 253 253 253 253
44318 -253 253 253 253 253 253 144 144 144 2 2 6
44319 - 2 2 6 2 2 6 2 2 6 46 46 46
44320 - 2 2 6 2 2 6 2 2 6 2 2 6
44321 - 42 42 42 74 74 74 30 30 30 10 10 10
44322 - 0 0 0 0 0 0 0 0 0 0 0 0
44323 - 0 0 0 0 0 0 0 0 0 0 0 0
44324 - 0 0 0 0 0 0 0 0 0 0 0 0
44325 - 0 0 0 0 0 0 0 0 0 0 0 0
44326 - 0 0 0 0 0 0 0 0 0 0 0 0
44327 - 0 0 0 0 0 0 0 0 0 0 0 0
44328 - 0 0 0 0 0 0 0 0 0 0 0 0
44329 - 6 6 6 14 14 14 42 42 42 90 90 90
44330 - 26 26 26 6 6 6 42 42 42 2 2 6
44331 - 74 74 74 250 250 250 253 253 253 253 253 253
44332 -253 253 253 253 253 253 253 253 253 253 253 253
44333 -253 253 253 253 253 253 242 242 242 242 242 242
44334 -253 253 253 253 253 253 253 253 253 253 253 253
44335 -253 253 253 253 253 253 253 253 253 253 253 253
44336 -253 253 253 253 253 253 253 253 253 253 253 253
44337 -253 253 253 253 253 253 253 253 253 253 253 253
44338 -253 253 253 253 253 253 182 182 182 2 2 6
44339 - 2 2 6 2 2 6 2 2 6 46 46 46
44340 - 2 2 6 2 2 6 2 2 6 2 2 6
44341 - 10 10 10 86 86 86 38 38 38 10 10 10
44342 - 0 0 0 0 0 0 0 0 0 0 0 0
44343 - 0 0 0 0 0 0 0 0 0 0 0 0
44344 - 0 0 0 0 0 0 0 0 0 0 0 0
44345 - 0 0 0 0 0 0 0 0 0 0 0 0
44346 - 0 0 0 0 0 0 0 0 0 0 0 0
44347 - 0 0 0 0 0 0 0 0 0 0 0 0
44348 - 0 0 0 0 0 0 0 0 0 0 0 0
44349 - 10 10 10 26 26 26 66 66 66 82 82 82
44350 - 2 2 6 22 22 22 18 18 18 2 2 6
44351 -149 149 149 253 253 253 253 253 253 253 253 253
44352 -253 253 253 253 253 253 253 253 253 253 253 253
44353 -253 253 253 253 253 253 234 234 234 242 242 242
44354 -253 253 253 253 253 253 253 253 253 253 253 253
44355 -253 253 253 253 253 253 253 253 253 253 253 253
44356 -253 253 253 253 253 253 253 253 253 253 253 253
44357 -253 253 253 253 253 253 253 253 253 253 253 253
44358 -253 253 253 253 253 253 206 206 206 2 2 6
44359 - 2 2 6 2 2 6 2 2 6 38 38 38
44360 - 2 2 6 2 2 6 2 2 6 2 2 6
44361 - 6 6 6 86 86 86 46 46 46 14 14 14
44362 - 0 0 0 0 0 0 0 0 0 0 0 0
44363 - 0 0 0 0 0 0 0 0 0 0 0 0
44364 - 0 0 0 0 0 0 0 0 0 0 0 0
44365 - 0 0 0 0 0 0 0 0 0 0 0 0
44366 - 0 0 0 0 0 0 0 0 0 0 0 0
44367 - 0 0 0 0 0 0 0 0 0 0 0 0
44368 - 0 0 0 0 0 0 0 0 0 6 6 6
44369 - 18 18 18 46 46 46 86 86 86 18 18 18
44370 - 2 2 6 34 34 34 10 10 10 6 6 6
44371 -210 210 210 253 253 253 253 253 253 253 253 253
44372 -253 253 253 253 253 253 253 253 253 253 253 253
44373 -253 253 253 253 253 253 234 234 234 242 242 242
44374 -253 253 253 253 253 253 253 253 253 253 253 253
44375 -253 253 253 253 253 253 253 253 253 253 253 253
44376 -253 253 253 253 253 253 253 253 253 253 253 253
44377 -253 253 253 253 253 253 253 253 253 253 253 253
44378 -253 253 253 253 253 253 221 221 221 6 6 6
44379 - 2 2 6 2 2 6 6 6 6 30 30 30
44380 - 2 2 6 2 2 6 2 2 6 2 2 6
44381 - 2 2 6 82 82 82 54 54 54 18 18 18
44382 - 6 6 6 0 0 0 0 0 0 0 0 0
44383 - 0 0 0 0 0 0 0 0 0 0 0 0
44384 - 0 0 0 0 0 0 0 0 0 0 0 0
44385 - 0 0 0 0 0 0 0 0 0 0 0 0
44386 - 0 0 0 0 0 0 0 0 0 0 0 0
44387 - 0 0 0 0 0 0 0 0 0 0 0 0
44388 - 0 0 0 0 0 0 0 0 0 10 10 10
44389 - 26 26 26 66 66 66 62 62 62 2 2 6
44390 - 2 2 6 38 38 38 10 10 10 26 26 26
44391 -238 238 238 253 253 253 253 253 253 253 253 253
44392 -253 253 253 253 253 253 253 253 253 253 253 253
44393 -253 253 253 253 253 253 231 231 231 238 238 238
44394 -253 253 253 253 253 253 253 253 253 253 253 253
44395 -253 253 253 253 253 253 253 253 253 253 253 253
44396 -253 253 253 253 253 253 253 253 253 253 253 253
44397 -253 253 253 253 253 253 253 253 253 253 253 253
44398 -253 253 253 253 253 253 231 231 231 6 6 6
44399 - 2 2 6 2 2 6 10 10 10 30 30 30
44400 - 2 2 6 2 2 6 2 2 6 2 2 6
44401 - 2 2 6 66 66 66 58 58 58 22 22 22
44402 - 6 6 6 0 0 0 0 0 0 0 0 0
44403 - 0 0 0 0 0 0 0 0 0 0 0 0
44404 - 0 0 0 0 0 0 0 0 0 0 0 0
44405 - 0 0 0 0 0 0 0 0 0 0 0 0
44406 - 0 0 0 0 0 0 0 0 0 0 0 0
44407 - 0 0 0 0 0 0 0 0 0 0 0 0
44408 - 0 0 0 0 0 0 0 0 0 10 10 10
44409 - 38 38 38 78 78 78 6 6 6 2 2 6
44410 - 2 2 6 46 46 46 14 14 14 42 42 42
44411 -246 246 246 253 253 253 253 253 253 253 253 253
44412 -253 253 253 253 253 253 253 253 253 253 253 253
44413 -253 253 253 253 253 253 231 231 231 242 242 242
44414 -253 253 253 253 253 253 253 253 253 253 253 253
44415 -253 253 253 253 253 253 253 253 253 253 253 253
44416 -253 253 253 253 253 253 253 253 253 253 253 253
44417 -253 253 253 253 253 253 253 253 253 253 253 253
44418 -253 253 253 253 253 253 234 234 234 10 10 10
44419 - 2 2 6 2 2 6 22 22 22 14 14 14
44420 - 2 2 6 2 2 6 2 2 6 2 2 6
44421 - 2 2 6 66 66 66 62 62 62 22 22 22
44422 - 6 6 6 0 0 0 0 0 0 0 0 0
44423 - 0 0 0 0 0 0 0 0 0 0 0 0
44424 - 0 0 0 0 0 0 0 0 0 0 0 0
44425 - 0 0 0 0 0 0 0 0 0 0 0 0
44426 - 0 0 0 0 0 0 0 0 0 0 0 0
44427 - 0 0 0 0 0 0 0 0 0 0 0 0
44428 - 0 0 0 0 0 0 6 6 6 18 18 18
44429 - 50 50 50 74 74 74 2 2 6 2 2 6
44430 - 14 14 14 70 70 70 34 34 34 62 62 62
44431 -250 250 250 253 253 253 253 253 253 253 253 253
44432 -253 253 253 253 253 253 253 253 253 253 253 253
44433 -253 253 253 253 253 253 231 231 231 246 246 246
44434 -253 253 253 253 253 253 253 253 253 253 253 253
44435 -253 253 253 253 253 253 253 253 253 253 253 253
44436 -253 253 253 253 253 253 253 253 253 253 253 253
44437 -253 253 253 253 253 253 253 253 253 253 253 253
44438 -253 253 253 253 253 253 234 234 234 14 14 14
44439 - 2 2 6 2 2 6 30 30 30 2 2 6
44440 - 2 2 6 2 2 6 2 2 6 2 2 6
44441 - 2 2 6 66 66 66 62 62 62 22 22 22
44442 - 6 6 6 0 0 0 0 0 0 0 0 0
44443 - 0 0 0 0 0 0 0 0 0 0 0 0
44444 - 0 0 0 0 0 0 0 0 0 0 0 0
44445 - 0 0 0 0 0 0 0 0 0 0 0 0
44446 - 0 0 0 0 0 0 0 0 0 0 0 0
44447 - 0 0 0 0 0 0 0 0 0 0 0 0
44448 - 0 0 0 0 0 0 6 6 6 18 18 18
44449 - 54 54 54 62 62 62 2 2 6 2 2 6
44450 - 2 2 6 30 30 30 46 46 46 70 70 70
44451 -250 250 250 253 253 253 253 253 253 253 253 253
44452 -253 253 253 253 253 253 253 253 253 253 253 253
44453 -253 253 253 253 253 253 231 231 231 246 246 246
44454 -253 253 253 253 253 253 253 253 253 253 253 253
44455 -253 253 253 253 253 253 253 253 253 253 253 253
44456 -253 253 253 253 253 253 253 253 253 253 253 253
44457 -253 253 253 253 253 253 253 253 253 253 253 253
44458 -253 253 253 253 253 253 226 226 226 10 10 10
44459 - 2 2 6 6 6 6 30 30 30 2 2 6
44460 - 2 2 6 2 2 6 2 2 6 2 2 6
44461 - 2 2 6 66 66 66 58 58 58 22 22 22
44462 - 6 6 6 0 0 0 0 0 0 0 0 0
44463 - 0 0 0 0 0 0 0 0 0 0 0 0
44464 - 0 0 0 0 0 0 0 0 0 0 0 0
44465 - 0 0 0 0 0 0 0 0 0 0 0 0
44466 - 0 0 0 0 0 0 0 0 0 0 0 0
44467 - 0 0 0 0 0 0 0 0 0 0 0 0
44468 - 0 0 0 0 0 0 6 6 6 22 22 22
44469 - 58 58 58 62 62 62 2 2 6 2 2 6
44470 - 2 2 6 2 2 6 30 30 30 78 78 78
44471 -250 250 250 253 253 253 253 253 253 253 253 253
44472 -253 253 253 253 253 253 253 253 253 253 253 253
44473 -253 253 253 253 253 253 231 231 231 246 246 246
44474 -253 253 253 253 253 253 253 253 253 253 253 253
44475 -253 253 253 253 253 253 253 253 253 253 253 253
44476 -253 253 253 253 253 253 253 253 253 253 253 253
44477 -253 253 253 253 253 253 253 253 253 253 253 253
44478 -253 253 253 253 253 253 206 206 206 2 2 6
44479 - 22 22 22 34 34 34 18 14 6 22 22 22
44480 - 26 26 26 18 18 18 6 6 6 2 2 6
44481 - 2 2 6 82 82 82 54 54 54 18 18 18
44482 - 6 6 6 0 0 0 0 0 0 0 0 0
44483 - 0 0 0 0 0 0 0 0 0 0 0 0
44484 - 0 0 0 0 0 0 0 0 0 0 0 0
44485 - 0 0 0 0 0 0 0 0 0 0 0 0
44486 - 0 0 0 0 0 0 0 0 0 0 0 0
44487 - 0 0 0 0 0 0 0 0 0 0 0 0
44488 - 0 0 0 0 0 0 6 6 6 26 26 26
44489 - 62 62 62 106 106 106 74 54 14 185 133 11
44490 -210 162 10 121 92 8 6 6 6 62 62 62
44491 -238 238 238 253 253 253 253 253 253 253 253 253
44492 -253 253 253 253 253 253 253 253 253 253 253 253
44493 -253 253 253 253 253 253 231 231 231 246 246 246
44494 -253 253 253 253 253 253 253 253 253 253 253 253
44495 -253 253 253 253 253 253 253 253 253 253 253 253
44496 -253 253 253 253 253 253 253 253 253 253 253 253
44497 -253 253 253 253 253 253 253 253 253 253 253 253
44498 -253 253 253 253 253 253 158 158 158 18 18 18
44499 - 14 14 14 2 2 6 2 2 6 2 2 6
44500 - 6 6 6 18 18 18 66 66 66 38 38 38
44501 - 6 6 6 94 94 94 50 50 50 18 18 18
44502 - 6 6 6 0 0 0 0 0 0 0 0 0
44503 - 0 0 0 0 0 0 0 0 0 0 0 0
44504 - 0 0 0 0 0 0 0 0 0 0 0 0
44505 - 0 0 0 0 0 0 0 0 0 0 0 0
44506 - 0 0 0 0 0 0 0 0 0 0 0 0
44507 - 0 0 0 0 0 0 0 0 0 6 6 6
44508 - 10 10 10 10 10 10 18 18 18 38 38 38
44509 - 78 78 78 142 134 106 216 158 10 242 186 14
44510 -246 190 14 246 190 14 156 118 10 10 10 10
44511 - 90 90 90 238 238 238 253 253 253 253 253 253
44512 -253 253 253 253 253 253 253 253 253 253 253 253
44513 -253 253 253 253 253 253 231 231 231 250 250 250
44514 -253 253 253 253 253 253 253 253 253 253 253 253
44515 -253 253 253 253 253 253 253 253 253 253 253 253
44516 -253 253 253 253 253 253 253 253 253 253 253 253
44517 -253 253 253 253 253 253 253 253 253 246 230 190
44518 -238 204 91 238 204 91 181 142 44 37 26 9
44519 - 2 2 6 2 2 6 2 2 6 2 2 6
44520 - 2 2 6 2 2 6 38 38 38 46 46 46
44521 - 26 26 26 106 106 106 54 54 54 18 18 18
44522 - 6 6 6 0 0 0 0 0 0 0 0 0
44523 - 0 0 0 0 0 0 0 0 0 0 0 0
44524 - 0 0 0 0 0 0 0 0 0 0 0 0
44525 - 0 0 0 0 0 0 0 0 0 0 0 0
44526 - 0 0 0 0 0 0 0 0 0 0 0 0
44527 - 0 0 0 6 6 6 14 14 14 22 22 22
44528 - 30 30 30 38 38 38 50 50 50 70 70 70
44529 -106 106 106 190 142 34 226 170 11 242 186 14
44530 -246 190 14 246 190 14 246 190 14 154 114 10
44531 - 6 6 6 74 74 74 226 226 226 253 253 253
44532 -253 253 253 253 253 253 253 253 253 253 253 253
44533 -253 253 253 253 253 253 231 231 231 250 250 250
44534 -253 253 253 253 253 253 253 253 253 253 253 253
44535 -253 253 253 253 253 253 253 253 253 253 253 253
44536 -253 253 253 253 253 253 253 253 253 253 253 253
44537 -253 253 253 253 253 253 253 253 253 228 184 62
44538 -241 196 14 241 208 19 232 195 16 38 30 10
44539 - 2 2 6 2 2 6 2 2 6 2 2 6
44540 - 2 2 6 6 6 6 30 30 30 26 26 26
44541 -203 166 17 154 142 90 66 66 66 26 26 26
44542 - 6 6 6 0 0 0 0 0 0 0 0 0
44543 - 0 0 0 0 0 0 0 0 0 0 0 0
44544 - 0 0 0 0 0 0 0 0 0 0 0 0
44545 - 0 0 0 0 0 0 0 0 0 0 0 0
44546 - 0 0 0 0 0 0 0 0 0 0 0 0
44547 - 6 6 6 18 18 18 38 38 38 58 58 58
44548 - 78 78 78 86 86 86 101 101 101 123 123 123
44549 -175 146 61 210 150 10 234 174 13 246 186 14
44550 -246 190 14 246 190 14 246 190 14 238 190 10
44551 -102 78 10 2 2 6 46 46 46 198 198 198
44552 -253 253 253 253 253 253 253 253 253 253 253 253
44553 -253 253 253 253 253 253 234 234 234 242 242 242
44554 -253 253 253 253 253 253 253 253 253 253 253 253
44555 -253 253 253 253 253 253 253 253 253 253 253 253
44556 -253 253 253 253 253 253 253 253 253 253 253 253
44557 -253 253 253 253 253 253 253 253 253 224 178 62
44558 -242 186 14 241 196 14 210 166 10 22 18 6
44559 - 2 2 6 2 2 6 2 2 6 2 2 6
44560 - 2 2 6 2 2 6 6 6 6 121 92 8
44561 -238 202 15 232 195 16 82 82 82 34 34 34
44562 - 10 10 10 0 0 0 0 0 0 0 0 0
44563 - 0 0 0 0 0 0 0 0 0 0 0 0
44564 - 0 0 0 0 0 0 0 0 0 0 0 0
44565 - 0 0 0 0 0 0 0 0 0 0 0 0
44566 - 0 0 0 0 0 0 0 0 0 0 0 0
44567 - 14 14 14 38 38 38 70 70 70 154 122 46
44568 -190 142 34 200 144 11 197 138 11 197 138 11
44569 -213 154 11 226 170 11 242 186 14 246 190 14
44570 -246 190 14 246 190 14 246 190 14 246 190 14
44571 -225 175 15 46 32 6 2 2 6 22 22 22
44572 -158 158 158 250 250 250 253 253 253 253 253 253
44573 -253 253 253 253 253 253 253 253 253 253 253 253
44574 -253 253 253 253 253 253 253 253 253 253 253 253
44575 -253 253 253 253 253 253 253 253 253 253 253 253
44576 -253 253 253 253 253 253 253 253 253 253 253 253
44577 -253 253 253 250 250 250 242 242 242 224 178 62
44578 -239 182 13 236 186 11 213 154 11 46 32 6
44579 - 2 2 6 2 2 6 2 2 6 2 2 6
44580 - 2 2 6 2 2 6 61 42 6 225 175 15
44581 -238 190 10 236 186 11 112 100 78 42 42 42
44582 - 14 14 14 0 0 0 0 0 0 0 0 0
44583 - 0 0 0 0 0 0 0 0 0 0 0 0
44584 - 0 0 0 0 0 0 0 0 0 0 0 0
44585 - 0 0 0 0 0 0 0 0 0 0 0 0
44586 - 0 0 0 0 0 0 0 0 0 6 6 6
44587 - 22 22 22 54 54 54 154 122 46 213 154 11
44588 -226 170 11 230 174 11 226 170 11 226 170 11
44589 -236 178 12 242 186 14 246 190 14 246 190 14
44590 -246 190 14 246 190 14 246 190 14 246 190 14
44591 -241 196 14 184 144 12 10 10 10 2 2 6
44592 - 6 6 6 116 116 116 242 242 242 253 253 253
44593 -253 253 253 253 253 253 253 253 253 253 253 253
44594 -253 253 253 253 253 253 253 253 253 253 253 253
44595 -253 253 253 253 253 253 253 253 253 253 253 253
44596 -253 253 253 253 253 253 253 253 253 253 253 253
44597 -253 253 253 231 231 231 198 198 198 214 170 54
44598 -236 178 12 236 178 12 210 150 10 137 92 6
44599 - 18 14 6 2 2 6 2 2 6 2 2 6
44600 - 6 6 6 70 47 6 200 144 11 236 178 12
44601 -239 182 13 239 182 13 124 112 88 58 58 58
44602 - 22 22 22 6 6 6 0 0 0 0 0 0
44603 - 0 0 0 0 0 0 0 0 0 0 0 0
44604 - 0 0 0 0 0 0 0 0 0 0 0 0
44605 - 0 0 0 0 0 0 0 0 0 0 0 0
44606 - 0 0 0 0 0 0 0 0 0 10 10 10
44607 - 30 30 30 70 70 70 180 133 36 226 170 11
44608 -239 182 13 242 186 14 242 186 14 246 186 14
44609 -246 190 14 246 190 14 246 190 14 246 190 14
44610 -246 190 14 246 190 14 246 190 14 246 190 14
44611 -246 190 14 232 195 16 98 70 6 2 2 6
44612 - 2 2 6 2 2 6 66 66 66 221 221 221
44613 -253 253 253 253 253 253 253 253 253 253 253 253
44614 -253 253 253 253 253 253 253 253 253 253 253 253
44615 -253 253 253 253 253 253 253 253 253 253 253 253
44616 -253 253 253 253 253 253 253 253 253 253 253 253
44617 -253 253 253 206 206 206 198 198 198 214 166 58
44618 -230 174 11 230 174 11 216 158 10 192 133 9
44619 -163 110 8 116 81 8 102 78 10 116 81 8
44620 -167 114 7 197 138 11 226 170 11 239 182 13
44621 -242 186 14 242 186 14 162 146 94 78 78 78
44622 - 34 34 34 14 14 14 6 6 6 0 0 0
44623 - 0 0 0 0 0 0 0 0 0 0 0 0
44624 - 0 0 0 0 0 0 0 0 0 0 0 0
44625 - 0 0 0 0 0 0 0 0 0 0 0 0
44626 - 0 0 0 0 0 0 0 0 0 6 6 6
44627 - 30 30 30 78 78 78 190 142 34 226 170 11
44628 -239 182 13 246 190 14 246 190 14 246 190 14
44629 -246 190 14 246 190 14 246 190 14 246 190 14
44630 -246 190 14 246 190 14 246 190 14 246 190 14
44631 -246 190 14 241 196 14 203 166 17 22 18 6
44632 - 2 2 6 2 2 6 2 2 6 38 38 38
44633 -218 218 218 253 253 253 253 253 253 253 253 253
44634 -253 253 253 253 253 253 253 253 253 253 253 253
44635 -253 253 253 253 253 253 253 253 253 253 253 253
44636 -253 253 253 253 253 253 253 253 253 253 253 253
44637 -250 250 250 206 206 206 198 198 198 202 162 69
44638 -226 170 11 236 178 12 224 166 10 210 150 10
44639 -200 144 11 197 138 11 192 133 9 197 138 11
44640 -210 150 10 226 170 11 242 186 14 246 190 14
44641 -246 190 14 246 186 14 225 175 15 124 112 88
44642 - 62 62 62 30 30 30 14 14 14 6 6 6
44643 - 0 0 0 0 0 0 0 0 0 0 0 0
44644 - 0 0 0 0 0 0 0 0 0 0 0 0
44645 - 0 0 0 0 0 0 0 0 0 0 0 0
44646 - 0 0 0 0 0 0 0 0 0 10 10 10
44647 - 30 30 30 78 78 78 174 135 50 224 166 10
44648 -239 182 13 246 190 14 246 190 14 246 190 14
44649 -246 190 14 246 190 14 246 190 14 246 190 14
44650 -246 190 14 246 190 14 246 190 14 246 190 14
44651 -246 190 14 246 190 14 241 196 14 139 102 15
44652 - 2 2 6 2 2 6 2 2 6 2 2 6
44653 - 78 78 78 250 250 250 253 253 253 253 253 253
44654 -253 253 253 253 253 253 253 253 253 253 253 253
44655 -253 253 253 253 253 253 253 253 253 253 253 253
44656 -253 253 253 253 253 253 253 253 253 253 253 253
44657 -250 250 250 214 214 214 198 198 198 190 150 46
44658 -219 162 10 236 178 12 234 174 13 224 166 10
44659 -216 158 10 213 154 11 213 154 11 216 158 10
44660 -226 170 11 239 182 13 246 190 14 246 190 14
44661 -246 190 14 246 190 14 242 186 14 206 162 42
44662 -101 101 101 58 58 58 30 30 30 14 14 14
44663 - 6 6 6 0 0 0 0 0 0 0 0 0
44664 - 0 0 0 0 0 0 0 0 0 0 0 0
44665 - 0 0 0 0 0 0 0 0 0 0 0 0
44666 - 0 0 0 0 0 0 0 0 0 10 10 10
44667 - 30 30 30 74 74 74 174 135 50 216 158 10
44668 -236 178 12 246 190 14 246 190 14 246 190 14
44669 -246 190 14 246 190 14 246 190 14 246 190 14
44670 -246 190 14 246 190 14 246 190 14 246 190 14
44671 -246 190 14 246 190 14 241 196 14 226 184 13
44672 - 61 42 6 2 2 6 2 2 6 2 2 6
44673 - 22 22 22 238 238 238 253 253 253 253 253 253
44674 -253 253 253 253 253 253 253 253 253 253 253 253
44675 -253 253 253 253 253 253 253 253 253 253 253 253
44676 -253 253 253 253 253 253 253 253 253 253 253 253
44677 -253 253 253 226 226 226 187 187 187 180 133 36
44678 -216 158 10 236 178 12 239 182 13 236 178 12
44679 -230 174 11 226 170 11 226 170 11 230 174 11
44680 -236 178 12 242 186 14 246 190 14 246 190 14
44681 -246 190 14 246 190 14 246 186 14 239 182 13
44682 -206 162 42 106 106 106 66 66 66 34 34 34
44683 - 14 14 14 6 6 6 0 0 0 0 0 0
44684 - 0 0 0 0 0 0 0 0 0 0 0 0
44685 - 0 0 0 0 0 0 0 0 0 0 0 0
44686 - 0 0 0 0 0 0 0 0 0 6 6 6
44687 - 26 26 26 70 70 70 163 133 67 213 154 11
44688 -236 178 12 246 190 14 246 190 14 246 190 14
44689 -246 190 14 246 190 14 246 190 14 246 190 14
44690 -246 190 14 246 190 14 246 190 14 246 190 14
44691 -246 190 14 246 190 14 246 190 14 241 196 14
44692 -190 146 13 18 14 6 2 2 6 2 2 6
44693 - 46 46 46 246 246 246 253 253 253 253 253 253
44694 -253 253 253 253 253 253 253 253 253 253 253 253
44695 -253 253 253 253 253 253 253 253 253 253 253 253
44696 -253 253 253 253 253 253 253 253 253 253 253 253
44697 -253 253 253 221 221 221 86 86 86 156 107 11
44698 -216 158 10 236 178 12 242 186 14 246 186 14
44699 -242 186 14 239 182 13 239 182 13 242 186 14
44700 -242 186 14 246 186 14 246 190 14 246 190 14
44701 -246 190 14 246 190 14 246 190 14 246 190 14
44702 -242 186 14 225 175 15 142 122 72 66 66 66
44703 - 30 30 30 10 10 10 0 0 0 0 0 0
44704 - 0 0 0 0 0 0 0 0 0 0 0 0
44705 - 0 0 0 0 0 0 0 0 0 0 0 0
44706 - 0 0 0 0 0 0 0 0 0 6 6 6
44707 - 26 26 26 70 70 70 163 133 67 210 150 10
44708 -236 178 12 246 190 14 246 190 14 246 190 14
44709 -246 190 14 246 190 14 246 190 14 246 190 14
44710 -246 190 14 246 190 14 246 190 14 246 190 14
44711 -246 190 14 246 190 14 246 190 14 246 190 14
44712 -232 195 16 121 92 8 34 34 34 106 106 106
44713 -221 221 221 253 253 253 253 253 253 253 253 253
44714 -253 253 253 253 253 253 253 253 253 253 253 253
44715 -253 253 253 253 253 253 253 253 253 253 253 253
44716 -253 253 253 253 253 253 253 253 253 253 253 253
44717 -242 242 242 82 82 82 18 14 6 163 110 8
44718 -216 158 10 236 178 12 242 186 14 246 190 14
44719 -246 190 14 246 190 14 246 190 14 246 190 14
44720 -246 190 14 246 190 14 246 190 14 246 190 14
44721 -246 190 14 246 190 14 246 190 14 246 190 14
44722 -246 190 14 246 190 14 242 186 14 163 133 67
44723 - 46 46 46 18 18 18 6 6 6 0 0 0
44724 - 0 0 0 0 0 0 0 0 0 0 0 0
44725 - 0 0 0 0 0 0 0 0 0 0 0 0
44726 - 0 0 0 0 0 0 0 0 0 10 10 10
44727 - 30 30 30 78 78 78 163 133 67 210 150 10
44728 -236 178 12 246 186 14 246 190 14 246 190 14
44729 -246 190 14 246 190 14 246 190 14 246 190 14
44730 -246 190 14 246 190 14 246 190 14 246 190 14
44731 -246 190 14 246 190 14 246 190 14 246 190 14
44732 -241 196 14 215 174 15 190 178 144 253 253 253
44733 -253 253 253 253 253 253 253 253 253 253 253 253
44734 -253 253 253 253 253 253 253 253 253 253 253 253
44735 -253 253 253 253 253 253 253 253 253 253 253 253
44736 -253 253 253 253 253 253 253 253 253 218 218 218
44737 - 58 58 58 2 2 6 22 18 6 167 114 7
44738 -216 158 10 236 178 12 246 186 14 246 190 14
44739 -246 190 14 246 190 14 246 190 14 246 190 14
44740 -246 190 14 246 190 14 246 190 14 246 190 14
44741 -246 190 14 246 190 14 246 190 14 246 190 14
44742 -246 190 14 246 186 14 242 186 14 190 150 46
44743 - 54 54 54 22 22 22 6 6 6 0 0 0
44744 - 0 0 0 0 0 0 0 0 0 0 0 0
44745 - 0 0 0 0 0 0 0 0 0 0 0 0
44746 - 0 0 0 0 0 0 0 0 0 14 14 14
44747 - 38 38 38 86 86 86 180 133 36 213 154 11
44748 -236 178 12 246 186 14 246 190 14 246 190 14
44749 -246 190 14 246 190 14 246 190 14 246 190 14
44750 -246 190 14 246 190 14 246 190 14 246 190 14
44751 -246 190 14 246 190 14 246 190 14 246 190 14
44752 -246 190 14 232 195 16 190 146 13 214 214 214
44753 -253 253 253 253 253 253 253 253 253 253 253 253
44754 -253 253 253 253 253 253 253 253 253 253 253 253
44755 -253 253 253 253 253 253 253 253 253 253 253 253
44756 -253 253 253 250 250 250 170 170 170 26 26 26
44757 - 2 2 6 2 2 6 37 26 9 163 110 8
44758 -219 162 10 239 182 13 246 186 14 246 190 14
44759 -246 190 14 246 190 14 246 190 14 246 190 14
44760 -246 190 14 246 190 14 246 190 14 246 190 14
44761 -246 190 14 246 190 14 246 190 14 246 190 14
44762 -246 186 14 236 178 12 224 166 10 142 122 72
44763 - 46 46 46 18 18 18 6 6 6 0 0 0
44764 - 0 0 0 0 0 0 0 0 0 0 0 0
44765 - 0 0 0 0 0 0 0 0 0 0 0 0
44766 - 0 0 0 0 0 0 6 6 6 18 18 18
44767 - 50 50 50 109 106 95 192 133 9 224 166 10
44768 -242 186 14 246 190 14 246 190 14 246 190 14
44769 -246 190 14 246 190 14 246 190 14 246 190 14
44770 -246 190 14 246 190 14 246 190 14 246 190 14
44771 -246 190 14 246 190 14 246 190 14 246 190 14
44772 -242 186 14 226 184 13 210 162 10 142 110 46
44773 -226 226 226 253 253 253 253 253 253 253 253 253
44774 -253 253 253 253 253 253 253 253 253 253 253 253
44775 -253 253 253 253 253 253 253 253 253 253 253 253
44776 -198 198 198 66 66 66 2 2 6 2 2 6
44777 - 2 2 6 2 2 6 50 34 6 156 107 11
44778 -219 162 10 239 182 13 246 186 14 246 190 14
44779 -246 190 14 246 190 14 246 190 14 246 190 14
44780 -246 190 14 246 190 14 246 190 14 246 190 14
44781 -246 190 14 246 190 14 246 190 14 242 186 14
44782 -234 174 13 213 154 11 154 122 46 66 66 66
44783 - 30 30 30 10 10 10 0 0 0 0 0 0
44784 - 0 0 0 0 0 0 0 0 0 0 0 0
44785 - 0 0 0 0 0 0 0 0 0 0 0 0
44786 - 0 0 0 0 0 0 6 6 6 22 22 22
44787 - 58 58 58 154 121 60 206 145 10 234 174 13
44788 -242 186 14 246 186 14 246 190 14 246 190 14
44789 -246 190 14 246 190 14 246 190 14 246 190 14
44790 -246 190 14 246 190 14 246 190 14 246 190 14
44791 -246 190 14 246 190 14 246 190 14 246 190 14
44792 -246 186 14 236 178 12 210 162 10 163 110 8
44793 - 61 42 6 138 138 138 218 218 218 250 250 250
44794 -253 253 253 253 253 253 253 253 253 250 250 250
44795 -242 242 242 210 210 210 144 144 144 66 66 66
44796 - 6 6 6 2 2 6 2 2 6 2 2 6
44797 - 2 2 6 2 2 6 61 42 6 163 110 8
44798 -216 158 10 236 178 12 246 190 14 246 190 14
44799 -246 190 14 246 190 14 246 190 14 246 190 14
44800 -246 190 14 246 190 14 246 190 14 246 190 14
44801 -246 190 14 239 182 13 230 174 11 216 158 10
44802 -190 142 34 124 112 88 70 70 70 38 38 38
44803 - 18 18 18 6 6 6 0 0 0 0 0 0
44804 - 0 0 0 0 0 0 0 0 0 0 0 0
44805 - 0 0 0 0 0 0 0 0 0 0 0 0
44806 - 0 0 0 0 0 0 6 6 6 22 22 22
44807 - 62 62 62 168 124 44 206 145 10 224 166 10
44808 -236 178 12 239 182 13 242 186 14 242 186 14
44809 -246 186 14 246 190 14 246 190 14 246 190 14
44810 -246 190 14 246 190 14 246 190 14 246 190 14
44811 -246 190 14 246 190 14 246 190 14 246 190 14
44812 -246 190 14 236 178 12 216 158 10 175 118 6
44813 - 80 54 7 2 2 6 6 6 6 30 30 30
44814 - 54 54 54 62 62 62 50 50 50 38 38 38
44815 - 14 14 14 2 2 6 2 2 6 2 2 6
44816 - 2 2 6 2 2 6 2 2 6 2 2 6
44817 - 2 2 6 6 6 6 80 54 7 167 114 7
44818 -213 154 11 236 178 12 246 190 14 246 190 14
44819 -246 190 14 246 190 14 246 190 14 246 190 14
44820 -246 190 14 242 186 14 239 182 13 239 182 13
44821 -230 174 11 210 150 10 174 135 50 124 112 88
44822 - 82 82 82 54 54 54 34 34 34 18 18 18
44823 - 6 6 6 0 0 0 0 0 0 0 0 0
44824 - 0 0 0 0 0 0 0 0 0 0 0 0
44825 - 0 0 0 0 0 0 0 0 0 0 0 0
44826 - 0 0 0 0 0 0 6 6 6 18 18 18
44827 - 50 50 50 158 118 36 192 133 9 200 144 11
44828 -216 158 10 219 162 10 224 166 10 226 170 11
44829 -230 174 11 236 178 12 239 182 13 239 182 13
44830 -242 186 14 246 186 14 246 190 14 246 190 14
44831 -246 190 14 246 190 14 246 190 14 246 190 14
44832 -246 186 14 230 174 11 210 150 10 163 110 8
44833 -104 69 6 10 10 10 2 2 6 2 2 6
44834 - 2 2 6 2 2 6 2 2 6 2 2 6
44835 - 2 2 6 2 2 6 2 2 6 2 2 6
44836 - 2 2 6 2 2 6 2 2 6 2 2 6
44837 - 2 2 6 6 6 6 91 60 6 167 114 7
44838 -206 145 10 230 174 11 242 186 14 246 190 14
44839 -246 190 14 246 190 14 246 186 14 242 186 14
44840 -239 182 13 230 174 11 224 166 10 213 154 11
44841 -180 133 36 124 112 88 86 86 86 58 58 58
44842 - 38 38 38 22 22 22 10 10 10 6 6 6
44843 - 0 0 0 0 0 0 0 0 0 0 0 0
44844 - 0 0 0 0 0 0 0 0 0 0 0 0
44845 - 0 0 0 0 0 0 0 0 0 0 0 0
44846 - 0 0 0 0 0 0 0 0 0 14 14 14
44847 - 34 34 34 70 70 70 138 110 50 158 118 36
44848 -167 114 7 180 123 7 192 133 9 197 138 11
44849 -200 144 11 206 145 10 213 154 11 219 162 10
44850 -224 166 10 230 174 11 239 182 13 242 186 14
44851 -246 186 14 246 186 14 246 186 14 246 186 14
44852 -239 182 13 216 158 10 185 133 11 152 99 6
44853 -104 69 6 18 14 6 2 2 6 2 2 6
44854 - 2 2 6 2 2 6 2 2 6 2 2 6
44855 - 2 2 6 2 2 6 2 2 6 2 2 6
44856 - 2 2 6 2 2 6 2 2 6 2 2 6
44857 - 2 2 6 6 6 6 80 54 7 152 99 6
44858 -192 133 9 219 162 10 236 178 12 239 182 13
44859 -246 186 14 242 186 14 239 182 13 236 178 12
44860 -224 166 10 206 145 10 192 133 9 154 121 60
44861 - 94 94 94 62 62 62 42 42 42 22 22 22
44862 - 14 14 14 6 6 6 0 0 0 0 0 0
44863 - 0 0 0 0 0 0 0 0 0 0 0 0
44864 - 0 0 0 0 0 0 0 0 0 0 0 0
44865 - 0 0 0 0 0 0 0 0 0 0 0 0
44866 - 0 0 0 0 0 0 0 0 0 6 6 6
44867 - 18 18 18 34 34 34 58 58 58 78 78 78
44868 -101 98 89 124 112 88 142 110 46 156 107 11
44869 -163 110 8 167 114 7 175 118 6 180 123 7
44870 -185 133 11 197 138 11 210 150 10 219 162 10
44871 -226 170 11 236 178 12 236 178 12 234 174 13
44872 -219 162 10 197 138 11 163 110 8 130 83 6
44873 - 91 60 6 10 10 10 2 2 6 2 2 6
44874 - 18 18 18 38 38 38 38 38 38 38 38 38
44875 - 38 38 38 38 38 38 38 38 38 38 38 38
44876 - 38 38 38 38 38 38 26 26 26 2 2 6
44877 - 2 2 6 6 6 6 70 47 6 137 92 6
44878 -175 118 6 200 144 11 219 162 10 230 174 11
44879 -234 174 13 230 174 11 219 162 10 210 150 10
44880 -192 133 9 163 110 8 124 112 88 82 82 82
44881 - 50 50 50 30 30 30 14 14 14 6 6 6
44882 - 0 0 0 0 0 0 0 0 0 0 0 0
44883 - 0 0 0 0 0 0 0 0 0 0 0 0
44884 - 0 0 0 0 0 0 0 0 0 0 0 0
44885 - 0 0 0 0 0 0 0 0 0 0 0 0
44886 - 0 0 0 0 0 0 0 0 0 0 0 0
44887 - 6 6 6 14 14 14 22 22 22 34 34 34
44888 - 42 42 42 58 58 58 74 74 74 86 86 86
44889 -101 98 89 122 102 70 130 98 46 121 87 25
44890 -137 92 6 152 99 6 163 110 8 180 123 7
44891 -185 133 11 197 138 11 206 145 10 200 144 11
44892 -180 123 7 156 107 11 130 83 6 104 69 6
44893 - 50 34 6 54 54 54 110 110 110 101 98 89
44894 - 86 86 86 82 82 82 78 78 78 78 78 78
44895 - 78 78 78 78 78 78 78 78 78 78 78 78
44896 - 78 78 78 82 82 82 86 86 86 94 94 94
44897 -106 106 106 101 101 101 86 66 34 124 80 6
44898 -156 107 11 180 123 7 192 133 9 200 144 11
44899 -206 145 10 200 144 11 192 133 9 175 118 6
44900 -139 102 15 109 106 95 70 70 70 42 42 42
44901 - 22 22 22 10 10 10 0 0 0 0 0 0
44902 - 0 0 0 0 0 0 0 0 0 0 0 0
44903 - 0 0 0 0 0 0 0 0 0 0 0 0
44904 - 0 0 0 0 0 0 0 0 0 0 0 0
44905 - 0 0 0 0 0 0 0 0 0 0 0 0
44906 - 0 0 0 0 0 0 0 0 0 0 0 0
44907 - 0 0 0 0 0 0 6 6 6 10 10 10
44908 - 14 14 14 22 22 22 30 30 30 38 38 38
44909 - 50 50 50 62 62 62 74 74 74 90 90 90
44910 -101 98 89 112 100 78 121 87 25 124 80 6
44911 -137 92 6 152 99 6 152 99 6 152 99 6
44912 -138 86 6 124 80 6 98 70 6 86 66 30
44913 -101 98 89 82 82 82 58 58 58 46 46 46
44914 - 38 38 38 34 34 34 34 34 34 34 34 34
44915 - 34 34 34 34 34 34 34 34 34 34 34 34
44916 - 34 34 34 34 34 34 38 38 38 42 42 42
44917 - 54 54 54 82 82 82 94 86 76 91 60 6
44918 -134 86 6 156 107 11 167 114 7 175 118 6
44919 -175 118 6 167 114 7 152 99 6 121 87 25
44920 -101 98 89 62 62 62 34 34 34 18 18 18
44921 - 6 6 6 0 0 0 0 0 0 0 0 0
44922 - 0 0 0 0 0 0 0 0 0 0 0 0
44923 - 0 0 0 0 0 0 0 0 0 0 0 0
44924 - 0 0 0 0 0 0 0 0 0 0 0 0
44925 - 0 0 0 0 0 0 0 0 0 0 0 0
44926 - 0 0 0 0 0 0 0 0 0 0 0 0
44927 - 0 0 0 0 0 0 0 0 0 0 0 0
44928 - 0 0 0 6 6 6 6 6 6 10 10 10
44929 - 18 18 18 22 22 22 30 30 30 42 42 42
44930 - 50 50 50 66 66 66 86 86 86 101 98 89
44931 -106 86 58 98 70 6 104 69 6 104 69 6
44932 -104 69 6 91 60 6 82 62 34 90 90 90
44933 - 62 62 62 38 38 38 22 22 22 14 14 14
44934 - 10 10 10 10 10 10 10 10 10 10 10 10
44935 - 10 10 10 10 10 10 6 6 6 10 10 10
44936 - 10 10 10 10 10 10 10 10 10 14 14 14
44937 - 22 22 22 42 42 42 70 70 70 89 81 66
44938 - 80 54 7 104 69 6 124 80 6 137 92 6
44939 -134 86 6 116 81 8 100 82 52 86 86 86
44940 - 58 58 58 30 30 30 14 14 14 6 6 6
44941 - 0 0 0 0 0 0 0 0 0 0 0 0
44942 - 0 0 0 0 0 0 0 0 0 0 0 0
44943 - 0 0 0 0 0 0 0 0 0 0 0 0
44944 - 0 0 0 0 0 0 0 0 0 0 0 0
44945 - 0 0 0 0 0 0 0 0 0 0 0 0
44946 - 0 0 0 0 0 0 0 0 0 0 0 0
44947 - 0 0 0 0 0 0 0 0 0 0 0 0
44948 - 0 0 0 0 0 0 0 0 0 0 0 0
44949 - 0 0 0 6 6 6 10 10 10 14 14 14
44950 - 18 18 18 26 26 26 38 38 38 54 54 54
44951 - 70 70 70 86 86 86 94 86 76 89 81 66
44952 - 89 81 66 86 86 86 74 74 74 50 50 50
44953 - 30 30 30 14 14 14 6 6 6 0 0 0
44954 - 0 0 0 0 0 0 0 0 0 0 0 0
44955 - 0 0 0 0 0 0 0 0 0 0 0 0
44956 - 0 0 0 0 0 0 0 0 0 0 0 0
44957 - 6 6 6 18 18 18 34 34 34 58 58 58
44958 - 82 82 82 89 81 66 89 81 66 89 81 66
44959 - 94 86 66 94 86 76 74 74 74 50 50 50
44960 - 26 26 26 14 14 14 6 6 6 0 0 0
44961 - 0 0 0 0 0 0 0 0 0 0 0 0
44962 - 0 0 0 0 0 0 0 0 0 0 0 0
44963 - 0 0 0 0 0 0 0 0 0 0 0 0
44964 - 0 0 0 0 0 0 0 0 0 0 0 0
44965 - 0 0 0 0 0 0 0 0 0 0 0 0
44966 - 0 0 0 0 0 0 0 0 0 0 0 0
44967 - 0 0 0 0 0 0 0 0 0 0 0 0
44968 - 0 0 0 0 0 0 0 0 0 0 0 0
44969 - 0 0 0 0 0 0 0 0 0 0 0 0
44970 - 6 6 6 6 6 6 14 14 14 18 18 18
44971 - 30 30 30 38 38 38 46 46 46 54 54 54
44972 - 50 50 50 42 42 42 30 30 30 18 18 18
44973 - 10 10 10 0 0 0 0 0 0 0 0 0
44974 - 0 0 0 0 0 0 0 0 0 0 0 0
44975 - 0 0 0 0 0 0 0 0 0 0 0 0
44976 - 0 0 0 0 0 0 0 0 0 0 0 0
44977 - 0 0 0 6 6 6 14 14 14 26 26 26
44978 - 38 38 38 50 50 50 58 58 58 58 58 58
44979 - 54 54 54 42 42 42 30 30 30 18 18 18
44980 - 10 10 10 0 0 0 0 0 0 0 0 0
44981 - 0 0 0 0 0 0 0 0 0 0 0 0
44982 - 0 0 0 0 0 0 0 0 0 0 0 0
44983 - 0 0 0 0 0 0 0 0 0 0 0 0
44984 - 0 0 0 0 0 0 0 0 0 0 0 0
44985 - 0 0 0 0 0 0 0 0 0 0 0 0
44986 - 0 0 0 0 0 0 0 0 0 0 0 0
44987 - 0 0 0 0 0 0 0 0 0 0 0 0
44988 - 0 0 0 0 0 0 0 0 0 0 0 0
44989 - 0 0 0 0 0 0 0 0 0 0 0 0
44990 - 0 0 0 0 0 0 0 0 0 6 6 6
44991 - 6 6 6 10 10 10 14 14 14 18 18 18
44992 - 18 18 18 14 14 14 10 10 10 6 6 6
44993 - 0 0 0 0 0 0 0 0 0 0 0 0
44994 - 0 0 0 0 0 0 0 0 0 0 0 0
44995 - 0 0 0 0 0 0 0 0 0 0 0 0
44996 - 0 0 0 0 0 0 0 0 0 0 0 0
44997 - 0 0 0 0 0 0 0 0 0 6 6 6
44998 - 14 14 14 18 18 18 22 22 22 22 22 22
44999 - 18 18 18 14 14 14 10 10 10 6 6 6
45000 - 0 0 0 0 0 0 0 0 0 0 0 0
45001 - 0 0 0 0 0 0 0 0 0 0 0 0
45002 - 0 0 0 0 0 0 0 0 0 0 0 0
45003 - 0 0 0 0 0 0 0 0 0 0 0 0
45004 - 0 0 0 0 0 0 0 0 0 0 0 0
45005 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45006 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45007 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45008 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45009 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45011 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45012 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45013 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45014 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45016 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45017 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45018 +4 4 4 4 4 4
45019 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45020 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45021 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45022 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45023 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45025 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45026 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45027 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45028 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45030 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45031 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45032 +4 4 4 4 4 4
45033 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45034 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45036 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45037 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45039 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45040 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45041 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45044 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45045 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45046 +4 4 4 4 4 4
45047 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45050 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45051 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45053 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45054 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45055 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45058 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45059 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45060 +4 4 4 4 4 4
45061 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45063 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45064 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45065 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45066 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45067 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45068 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45069 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45072 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45073 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45074 +4 4 4 4 4 4
45075 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45077 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45078 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45079 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45080 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45081 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45082 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45086 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45087 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45088 +4 4 4 4 4 4
45089 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45090 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45091 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45092 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45093 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
45094 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
45095 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45096 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45098 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
45099 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45100 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
45101 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45102 +4 4 4 4 4 4
45103 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45104 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45105 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45106 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45107 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
45108 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
45109 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45110 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45112 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
45113 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
45114 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
45115 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45116 +4 4 4 4 4 4
45117 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45118 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45119 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45120 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45121 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
45122 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
45123 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45124 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45125 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45126 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
45127 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
45128 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
45129 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
45130 +4 4 4 4 4 4
45131 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45132 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45133 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45134 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
45135 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
45136 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
45137 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
45138 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45139 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45140 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
45141 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
45142 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
45143 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
45144 +4 4 4 4 4 4
45145 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45146 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45147 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45148 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
45149 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
45150 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
45151 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
45152 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45153 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
45154 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
45155 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
45156 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
45157 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
45158 +4 4 4 4 4 4
45159 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45160 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45161 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45162 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
45163 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
45164 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
45165 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
45166 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45167 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
45168 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
45169 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
45170 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
45171 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
45172 +4 4 4 4 4 4
45173 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45174 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45175 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
45176 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
45177 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
45178 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
45179 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
45180 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
45181 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
45182 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
45183 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
45184 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
45185 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
45186 +4 4 4 4 4 4
45187 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45188 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45189 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
45190 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
45191 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
45192 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
45193 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
45194 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
45195 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
45196 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
45197 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
45198 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
45199 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
45200 +4 4 4 4 4 4
45201 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45202 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45203 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
45204 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
45205 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
45206 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
45207 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
45208 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
45209 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
45210 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
45211 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
45212 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
45213 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45214 +4 4 4 4 4 4
45215 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45216 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45217 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
45218 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
45219 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
45220 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
45221 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
45222 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
45223 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
45224 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
45225 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
45226 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
45227 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
45228 +4 4 4 4 4 4
45229 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45230 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
45231 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
45232 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
45233 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
45234 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
45235 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
45236 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
45237 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
45238 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
45239 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
45240 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
45241 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
45242 +4 4 4 4 4 4
45243 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45244 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
45245 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
45246 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
45247 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45248 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
45249 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
45250 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
45251 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
45252 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
45253 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
45254 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
45255 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
45256 +0 0 0 4 4 4
45257 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45258 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
45259 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
45260 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
45261 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
45262 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
45263 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
45264 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
45265 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
45266 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
45267 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
45268 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
45269 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
45270 +2 0 0 0 0 0
45271 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
45272 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
45273 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
45274 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
45275 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
45276 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
45277 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
45278 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
45279 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
45280 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
45281 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
45282 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
45283 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
45284 +37 38 37 0 0 0
45285 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45286 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
45287 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
45288 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
45289 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
45290 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
45291 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
45292 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
45293 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
45294 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
45295 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
45296 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
45297 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
45298 +85 115 134 4 0 0
45299 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
45300 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
45301 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
45302 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
45303 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
45304 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
45305 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
45306 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
45307 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
45308 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
45309 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
45310 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
45311 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
45312 +60 73 81 4 0 0
45313 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
45314 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
45315 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
45316 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
45317 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
45318 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
45319 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
45320 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
45321 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
45322 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
45323 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
45324 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
45325 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
45326 +16 19 21 4 0 0
45327 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
45328 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
45329 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
45330 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
45331 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
45332 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
45333 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
45334 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
45335 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
45336 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
45337 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
45338 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
45339 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
45340 +4 0 0 4 3 3
45341 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
45342 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
45343 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
45344 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
45345 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
45346 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
45347 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
45348 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
45349 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
45350 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
45351 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
45352 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
45353 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
45354 +3 2 2 4 4 4
45355 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
45356 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
45357 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
45358 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45359 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
45360 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
45361 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
45362 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
45363 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
45364 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
45365 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
45366 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
45367 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
45368 +4 4 4 4 4 4
45369 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
45370 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
45371 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
45372 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
45373 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
45374 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
45375 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
45376 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
45377 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
45378 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
45379 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
45380 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
45381 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
45382 +4 4 4 4 4 4
45383 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
45384 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
45385 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
45386 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
45387 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
45388 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45389 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
45390 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
45391 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
45392 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
45393 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
45394 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
45395 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
45396 +5 5 5 5 5 5
45397 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
45398 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
45399 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
45400 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
45401 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
45402 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45403 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
45404 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
45405 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
45406 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
45407 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
45408 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
45409 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45410 +5 5 5 4 4 4
45411 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
45412 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
45413 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
45414 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
45415 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45416 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
45417 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
45418 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
45419 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
45420 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
45421 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
45422 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45423 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45424 +4 4 4 4 4 4
45425 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
45426 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
45427 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
45428 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
45429 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
45430 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45431 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45432 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
45433 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
45434 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
45435 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
45436 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
45437 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45438 +4 4 4 4 4 4
45439 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
45440 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
45441 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
45442 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
45443 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45444 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
45445 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
45446 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
45447 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
45448 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
45449 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
45450 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45451 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45452 +4 4 4 4 4 4
45453 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
45454 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
45455 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
45456 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
45457 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45458 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45459 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45460 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
45461 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
45462 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
45463 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
45464 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45465 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45466 +4 4 4 4 4 4
45467 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
45468 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
45469 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
45470 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
45471 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45472 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
45473 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45474 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
45475 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
45476 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
45477 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45478 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45479 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45480 +4 4 4 4 4 4
45481 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
45482 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
45483 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
45484 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
45485 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45486 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
45487 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
45488 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
45489 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
45490 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
45491 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
45492 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45493 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45494 +4 4 4 4 4 4
45495 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
45496 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
45497 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
45498 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
45499 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45500 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
45501 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
45502 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
45503 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
45504 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
45505 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
45506 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45507 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45508 +4 4 4 4 4 4
45509 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
45510 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
45511 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
45512 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45513 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
45514 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
45515 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
45516 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
45517 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
45518 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
45519 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45520 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45521 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45522 +4 4 4 4 4 4
45523 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
45524 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
45525 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
45526 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45527 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45528 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
45529 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
45530 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
45531 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
45532 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
45533 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45534 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45535 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45536 +4 4 4 4 4 4
45537 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
45538 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
45539 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45540 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45541 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45542 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
45543 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
45544 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
45545 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
45546 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
45547 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45548 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45549 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45550 +4 4 4 4 4 4
45551 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
45552 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
45553 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45554 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45555 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45556 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
45557 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
45558 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
45559 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45560 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45561 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45562 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45563 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45564 +4 4 4 4 4 4
45565 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45566 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
45567 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45568 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
45569 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
45570 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
45571 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
45572 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
45573 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45574 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45575 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45576 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45577 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45578 +4 4 4 4 4 4
45579 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45580 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
45581 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45582 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
45583 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45584 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
45585 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
45586 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
45587 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45588 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45589 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45590 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45591 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45592 +4 4 4 4 4 4
45593 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
45594 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
45595 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45596 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
45597 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
45598 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
45599 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
45600 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
45601 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45602 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45603 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45604 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45605 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45606 +4 4 4 4 4 4
45607 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
45608 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
45609 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45610 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
45611 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
45612 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
45613 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
45614 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
45615 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45616 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45617 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45618 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45619 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45620 +4 4 4 4 4 4
45621 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45622 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
45623 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45624 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
45625 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
45626 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
45627 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
45628 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
45629 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45630 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45631 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45632 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45633 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45634 +4 4 4 4 4 4
45635 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
45636 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
45637 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45638 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
45639 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
45640 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
45641 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
45642 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
45643 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
45644 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45645 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45646 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45647 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45648 +4 4 4 4 4 4
45649 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45650 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
45651 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
45652 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
45653 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
45654 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
45655 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
45656 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
45657 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45658 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45659 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45660 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45661 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45662 +4 4 4 4 4 4
45663 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45664 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
45665 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45666 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
45667 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
45668 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
45669 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
45670 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
45671 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45672 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45673 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45674 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45675 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45676 +4 4 4 4 4 4
45677 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45678 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
45679 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
45680 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
45681 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
45682 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
45683 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45684 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
45685 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45686 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45687 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45688 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45689 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45690 +4 4 4 4 4 4
45691 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45692 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
45693 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
45694 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45695 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
45696 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
45697 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45698 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
45699 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45700 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45701 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45702 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45703 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45704 +4 4 4 4 4 4
45705 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45706 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
45707 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
45708 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
45709 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
45710 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
45711 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
45712 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
45713 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
45714 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45715 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45716 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45717 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45718 +4 4 4 4 4 4
45719 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45720 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
45721 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
45722 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
45723 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
45724 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
45725 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
45726 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
45727 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
45728 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45729 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45730 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45731 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45732 +4 4 4 4 4 4
45733 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
45734 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
45735 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
45736 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
45737 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45738 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
45739 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
45740 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
45741 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
45742 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45743 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45744 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45745 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45746 +4 4 4 4 4 4
45747 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45748 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
45749 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
45750 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
45751 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
45752 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
45753 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
45754 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
45755 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
45756 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45757 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45758 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45759 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45760 +4 4 4 4 4 4
45761 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
45762 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
45763 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
45764 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
45765 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
45766 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
45767 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
45768 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
45769 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
45770 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
45771 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45772 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45773 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45774 +4 4 4 4 4 4
45775 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
45776 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45777 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
45778 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
45779 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
45780 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
45781 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
45782 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
45783 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
45784 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
45785 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45786 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45787 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45788 +4 4 4 4 4 4
45789 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
45790 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45791 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
45792 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
45793 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
45794 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
45795 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45796 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
45797 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
45798 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
45799 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45800 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45801 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45802 +4 4 4 4 4 4
45803 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
45804 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
45805 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
45806 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
45807 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
45808 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
45809 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
45810 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
45811 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
45812 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
45813 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45814 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45815 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45816 +4 4 4 4 4 4
45817 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
45818 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
45819 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45820 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
45821 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
45822 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
45823 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
45824 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
45825 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
45826 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
45827 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45828 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45829 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45830 +4 4 4 4 4 4
45831 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45832 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
45833 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
45834 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
45835 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
45836 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
45837 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
45838 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
45839 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
45840 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45841 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45842 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45843 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45844 +4 4 4 4 4 4
45845 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
45846 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
45847 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
45848 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
45849 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
45850 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
45851 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
45852 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
45853 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
45854 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45855 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45856 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45857 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45858 +4 4 4 4 4 4
45859 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
45860 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
45861 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
45862 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
45863 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
45864 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
45865 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
45866 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
45867 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45868 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45869 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45870 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45871 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45872 +4 4 4 4 4 4
45873 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
45874 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45875 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
45876 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45877 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
45878 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
45879 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
45880 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
45881 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
45882 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45883 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45884 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45885 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45886 +4 4 4 4 4 4
45887 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
45888 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
45889 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
45890 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
45891 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
45892 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
45893 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
45894 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
45895 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
45896 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45897 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45898 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45899 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45900 +4 4 4 4 4 4
45901 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45902 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
45903 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
45904 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
45905 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
45906 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
45907 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
45908 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
45909 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45910 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45911 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45912 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45913 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45914 +4 4 4 4 4 4
45915 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
45916 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
45917 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45918 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
45919 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
45920 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
45921 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
45922 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
45923 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45924 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45925 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45926 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45927 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45928 +4 4 4 4 4 4
45929 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45930 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
45931 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
45932 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
45933 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
45934 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
45935 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
45936 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45937 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45938 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45939 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45940 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45941 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45942 +4 4 4 4 4 4
45943 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45944 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
45945 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45946 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
45947 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
45948 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
45949 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
45950 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
45951 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45952 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45953 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45954 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45955 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45956 +4 4 4 4 4 4
45957 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45958 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
45959 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
45960 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
45961 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
45962 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
45963 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
45964 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
45965 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45966 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45967 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45968 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45969 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45970 +4 4 4 4 4 4
45971 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45972 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45973 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
45974 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45975 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
45976 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
45977 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
45978 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45979 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45980 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45981 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45982 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45983 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45984 +4 4 4 4 4 4
45985 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45986 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45987 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45988 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
45989 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
45990 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
45991 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
45992 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45993 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45994 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45995 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45996 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45997 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45998 +4 4 4 4 4 4
45999 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46000 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46001 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46002 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46003 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
46004 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
46005 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
46006 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46007 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46008 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46009 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46011 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46012 +4 4 4 4 4 4
46013 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46014 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46016 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
46017 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
46018 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
46019 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
46020 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46021 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46022 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46023 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46025 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46026 +4 4 4 4 4 4
46027 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46028 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46030 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
46031 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
46032 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
46033 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
46034 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46036 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46037 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46039 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46040 +4 4 4 4 4 4
46041 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46044 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
46045 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
46046 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
46047 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
46048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46050 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46051 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46053 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46054 +4 4 4 4 4 4
46055 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46058 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46059 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
46060 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46061 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46063 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46064 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46065 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46066 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46067 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46068 +4 4 4 4 4 4
46069 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46072 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46073 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
46074 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
46075 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
46076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46077 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46078 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46079 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46080 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46081 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46082 +4 4 4 4 4 4
46083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46086 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46087 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
46088 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
46089 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46090 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46091 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46092 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46093 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46094 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46095 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46096 +4 4 4 4 4 4
46097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46098 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46100 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46101 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
46102 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
46103 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46104 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46105 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46106 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46107 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46108 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46109 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46110 +4 4 4 4 4 4
46111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46112 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46113 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46114 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46115 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
46116 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
46117 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46118 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46119 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46120 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46121 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46122 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46123 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46124 +4 4 4 4 4 4
46125 diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
46126 index 443e3c8..c443d6a 100644
46127 --- a/drivers/video/nvidia/nv_backlight.c
46128 +++ b/drivers/video/nvidia/nv_backlight.c
46129 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
46130 return bd->props.brightness;
46131 }
46132
46133 -static struct backlight_ops nvidia_bl_ops = {
46134 +static const struct backlight_ops nvidia_bl_ops = {
46135 .get_brightness = nvidia_bl_get_brightness,
46136 .update_status = nvidia_bl_update_status,
46137 };
46138 diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
46139 index d94c57f..912984c 100644
46140 --- a/drivers/video/riva/fbdev.c
46141 +++ b/drivers/video/riva/fbdev.c
46142 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
46143 return bd->props.brightness;
46144 }
46145
46146 -static struct backlight_ops riva_bl_ops = {
46147 +static const struct backlight_ops riva_bl_ops = {
46148 .get_brightness = riva_bl_get_brightness,
46149 .update_status = riva_bl_update_status,
46150 };
46151 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
46152 index 54fbb29..2c108fc 100644
46153 --- a/drivers/video/uvesafb.c
46154 +++ b/drivers/video/uvesafb.c
46155 @@ -18,6 +18,7 @@
46156 #include <linux/fb.h>
46157 #include <linux/io.h>
46158 #include <linux/mutex.h>
46159 +#include <linux/moduleloader.h>
46160 #include <video/edid.h>
46161 #include <video/uvesafb.h>
46162 #ifdef CONFIG_X86
46163 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
46164 NULL,
46165 };
46166
46167 - return call_usermodehelper(v86d_path, argv, envp, 1);
46168 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
46169 }
46170
46171 /*
46172 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
46173 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
46174 par->pmi_setpal = par->ypan = 0;
46175 } else {
46176 +
46177 +#ifdef CONFIG_PAX_KERNEXEC
46178 +#ifdef CONFIG_MODULES
46179 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
46180 +#endif
46181 + if (!par->pmi_code) {
46182 + par->pmi_setpal = par->ypan = 0;
46183 + return 0;
46184 + }
46185 +#endif
46186 +
46187 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
46188 + task->t.regs.edi);
46189 +
46190 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46191 + pax_open_kernel();
46192 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
46193 + pax_close_kernel();
46194 +
46195 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
46196 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
46197 +#else
46198 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
46199 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
46200 +#endif
46201 +
46202 printk(KERN_INFO "uvesafb: protected mode interface info at "
46203 "%04x:%04x\n",
46204 (u16)task->t.regs.es, (u16)task->t.regs.edi);
46205 @@ -1799,6 +1822,11 @@ out:
46206 if (par->vbe_modes)
46207 kfree(par->vbe_modes);
46208
46209 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46210 + if (par->pmi_code)
46211 + module_free_exec(NULL, par->pmi_code);
46212 +#endif
46213 +
46214 framebuffer_release(info);
46215 return err;
46216 }
46217 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
46218 kfree(par->vbe_state_orig);
46219 if (par->vbe_state_saved)
46220 kfree(par->vbe_state_saved);
46221 +
46222 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46223 + if (par->pmi_code)
46224 + module_free_exec(NULL, par->pmi_code);
46225 +#endif
46226 +
46227 }
46228
46229 framebuffer_release(info);
46230 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
46231 index bd37ee1..cb827e8 100644
46232 --- a/drivers/video/vesafb.c
46233 +++ b/drivers/video/vesafb.c
46234 @@ -9,6 +9,7 @@
46235 */
46236
46237 #include <linux/module.h>
46238 +#include <linux/moduleloader.h>
46239 #include <linux/kernel.h>
46240 #include <linux/errno.h>
46241 #include <linux/string.h>
46242 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
46243 static int vram_total __initdata; /* Set total amount of memory */
46244 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
46245 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
46246 -static void (*pmi_start)(void) __read_mostly;
46247 -static void (*pmi_pal) (void) __read_mostly;
46248 +static void (*pmi_start)(void) __read_only;
46249 +static void (*pmi_pal) (void) __read_only;
46250 static int depth __read_mostly;
46251 static int vga_compat __read_mostly;
46252 /* --------------------------------------------------------------------- */
46253 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
46254 unsigned int size_vmode;
46255 unsigned int size_remap;
46256 unsigned int size_total;
46257 + void *pmi_code = NULL;
46258
46259 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
46260 return -ENODEV;
46261 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
46262 size_remap = size_total;
46263 vesafb_fix.smem_len = size_remap;
46264
46265 -#ifndef __i386__
46266 - screen_info.vesapm_seg = 0;
46267 -#endif
46268 -
46269 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
46270 printk(KERN_WARNING
46271 "vesafb: cannot reserve video memory at 0x%lx\n",
46272 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
46273 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
46274 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
46275
46276 +#ifdef __i386__
46277 +
46278 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46279 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
46280 + if (!pmi_code)
46281 +#elif !defined(CONFIG_PAX_KERNEXEC)
46282 + if (0)
46283 +#endif
46284 +
46285 +#endif
46286 + screen_info.vesapm_seg = 0;
46287 +
46288 if (screen_info.vesapm_seg) {
46289 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
46290 - screen_info.vesapm_seg,screen_info.vesapm_off);
46291 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
46292 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
46293 }
46294
46295 if (screen_info.vesapm_seg < 0xc000)
46296 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
46297
46298 if (ypan || pmi_setpal) {
46299 unsigned short *pmi_base;
46300 +
46301 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
46302 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
46303 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
46304 +
46305 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46306 + pax_open_kernel();
46307 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
46308 +#else
46309 + pmi_code = pmi_base;
46310 +#endif
46311 +
46312 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
46313 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
46314 +
46315 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46316 + pmi_start = ktva_ktla(pmi_start);
46317 + pmi_pal = ktva_ktla(pmi_pal);
46318 + pax_close_kernel();
46319 +#endif
46320 +
46321 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
46322 if (pmi_base[3]) {
46323 printk(KERN_INFO "vesafb: pmi: ports = ");
46324 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
46325 info->node, info->fix.id);
46326 return 0;
46327 err:
46328 +
46329 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46330 + module_free_exec(NULL, pmi_code);
46331 +#endif
46332 +
46333 if (info->screen_base)
46334 iounmap(info->screen_base);
46335 framebuffer_release(info);
46336 diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
46337 index 88a60e0..6783cc2 100644
46338 --- a/drivers/xen/sys-hypervisor.c
46339 +++ b/drivers/xen/sys-hypervisor.c
46340 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
46341 return 0;
46342 }
46343
46344 -static struct sysfs_ops hyp_sysfs_ops = {
46345 +static const struct sysfs_ops hyp_sysfs_ops = {
46346 .show = hyp_sysfs_show,
46347 .store = hyp_sysfs_store,
46348 };
46349 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
46350 index 18f74ec..3227009 100644
46351 --- a/fs/9p/vfs_inode.c
46352 +++ b/fs/9p/vfs_inode.c
46353 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46354 static void
46355 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46356 {
46357 - char *s = nd_get_link(nd);
46358 + const char *s = nd_get_link(nd);
46359
46360 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
46361 IS_ERR(s) ? "<error>" : s);
46362 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
46363 index bb4cc5b..df5eaa0 100644
46364 --- a/fs/Kconfig.binfmt
46365 +++ b/fs/Kconfig.binfmt
46366 @@ -86,7 +86,7 @@ config HAVE_AOUT
46367
46368 config BINFMT_AOUT
46369 tristate "Kernel support for a.out and ECOFF binaries"
46370 - depends on HAVE_AOUT
46371 + depends on HAVE_AOUT && BROKEN
46372 ---help---
46373 A.out (Assembler.OUTput) is a set of formats for libraries and
46374 executables used in the earliest versions of UNIX. Linux used
46375 diff --git a/fs/aio.c b/fs/aio.c
46376 index 22a19ad..d484e5b 100644
46377 --- a/fs/aio.c
46378 +++ b/fs/aio.c
46379 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
46380 size += sizeof(struct io_event) * nr_events;
46381 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
46382
46383 - if (nr_pages < 0)
46384 + if (nr_pages <= 0)
46385 return -EINVAL;
46386
46387 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
46388 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
46389 struct aio_timeout to;
46390 int retry = 0;
46391
46392 + pax_track_stack();
46393 +
46394 /* needed to zero any padding within an entry (there shouldn't be
46395 * any, but C is fun!
46396 */
46397 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
46398 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
46399 {
46400 ssize_t ret;
46401 + struct iovec iovstack;
46402
46403 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
46404 kiocb->ki_nbytes, 1,
46405 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
46406 + &iovstack, &kiocb->ki_iovec);
46407 if (ret < 0)
46408 goto out;
46409
46410 + if (kiocb->ki_iovec == &iovstack) {
46411 + kiocb->ki_inline_vec = iovstack;
46412 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
46413 + }
46414 kiocb->ki_nr_segs = kiocb->ki_nbytes;
46415 kiocb->ki_cur_seg = 0;
46416 /* ki_nbytes/left now reflect bytes instead of segs */
46417 diff --git a/fs/attr.c b/fs/attr.c
46418 index 96d394b..33cf5b4 100644
46419 --- a/fs/attr.c
46420 +++ b/fs/attr.c
46421 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
46422 unsigned long limit;
46423
46424 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
46425 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
46426 if (limit != RLIM_INFINITY && offset > limit)
46427 goto out_sig;
46428 if (offset > inode->i_sb->s_maxbytes)
46429 diff --git a/fs/autofs/root.c b/fs/autofs/root.c
46430 index 4a1401c..05eb5ca 100644
46431 --- a/fs/autofs/root.c
46432 +++ b/fs/autofs/root.c
46433 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
46434 set_bit(n,sbi->symlink_bitmap);
46435 sl = &sbi->symlink[n];
46436 sl->len = strlen(symname);
46437 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
46438 + slsize = sl->len+1;
46439 + sl->data = kmalloc(slsize, GFP_KERNEL);
46440 if (!sl->data) {
46441 clear_bit(n,sbi->symlink_bitmap);
46442 unlock_kernel();
46443 diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
46444 index b4ea829..e63ef18 100644
46445 --- a/fs/autofs4/symlink.c
46446 +++ b/fs/autofs4/symlink.c
46447 @@ -15,7 +15,7 @@
46448 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
46449 {
46450 struct autofs_info *ino = autofs4_dentry_ino(dentry);
46451 - nd_set_link(nd, (char *)ino->u.symlink);
46452 + nd_set_link(nd, ino->u.symlink);
46453 return NULL;
46454 }
46455
46456 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
46457 index 136a0d6..a287331 100644
46458 --- a/fs/autofs4/waitq.c
46459 +++ b/fs/autofs4/waitq.c
46460 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
46461 {
46462 unsigned long sigpipe, flags;
46463 mm_segment_t fs;
46464 - const char *data = (const char *)addr;
46465 + const char __user *data = (const char __force_user *)addr;
46466 ssize_t wr = 0;
46467
46468 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
46469 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
46470 index 9158c07..3f06659 100644
46471 --- a/fs/befs/linuxvfs.c
46472 +++ b/fs/befs/linuxvfs.c
46473 @@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46474 {
46475 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
46476 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
46477 - char *link = nd_get_link(nd);
46478 + const char *link = nd_get_link(nd);
46479 if (!IS_ERR(link))
46480 kfree(link);
46481 }
46482 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
46483 index 0133b5a..b3baa9f 100644
46484 --- a/fs/binfmt_aout.c
46485 +++ b/fs/binfmt_aout.c
46486 @@ -16,6 +16,7 @@
46487 #include <linux/string.h>
46488 #include <linux/fs.h>
46489 #include <linux/file.h>
46490 +#include <linux/security.h>
46491 #include <linux/stat.h>
46492 #include <linux/fcntl.h>
46493 #include <linux/ptrace.h>
46494 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46495 #endif
46496 # define START_STACK(u) (u.start_stack)
46497
46498 + memset(&dump, 0, sizeof(dump));
46499 +
46500 fs = get_fs();
46501 set_fs(KERNEL_DS);
46502 has_dumped = 1;
46503 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46504
46505 /* If the size of the dump file exceeds the rlimit, then see what would happen
46506 if we wrote the stack, but not the data area. */
46507 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
46508 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
46509 dump.u_dsize = 0;
46510
46511 /* Make sure we have enough room to write the stack and data areas. */
46512 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
46513 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
46514 dump.u_ssize = 0;
46515
46516 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46517 dump_size = dump.u_ssize << PAGE_SHIFT;
46518 DUMP_WRITE(dump_start,dump_size);
46519 }
46520 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
46521 - set_fs(KERNEL_DS);
46522 - DUMP_WRITE(current,sizeof(*current));
46523 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
46524 end_coredump:
46525 set_fs(fs);
46526 return has_dumped;
46527 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46528 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
46529 if (rlim >= RLIM_INFINITY)
46530 rlim = ~0;
46531 +
46532 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
46533 if (ex.a_data + ex.a_bss > rlim)
46534 return -ENOMEM;
46535
46536 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46537 install_exec_creds(bprm);
46538 current->flags &= ~PF_FORKNOEXEC;
46539
46540 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46541 + current->mm->pax_flags = 0UL;
46542 +#endif
46543 +
46544 +#ifdef CONFIG_PAX_PAGEEXEC
46545 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
46546 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
46547 +
46548 +#ifdef CONFIG_PAX_EMUTRAMP
46549 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
46550 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
46551 +#endif
46552 +
46553 +#ifdef CONFIG_PAX_MPROTECT
46554 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
46555 + current->mm->pax_flags |= MF_PAX_MPROTECT;
46556 +#endif
46557 +
46558 + }
46559 +#endif
46560 +
46561 if (N_MAGIC(ex) == OMAGIC) {
46562 unsigned long text_addr, map_size;
46563 loff_t pos;
46564 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46565
46566 down_write(&current->mm->mmap_sem);
46567 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
46568 - PROT_READ | PROT_WRITE | PROT_EXEC,
46569 + PROT_READ | PROT_WRITE,
46570 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
46571 fd_offset + ex.a_text);
46572 up_write(&current->mm->mmap_sem);
46573 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
46574 index 1ed37ba..66794b9 100644
46575 --- a/fs/binfmt_elf.c
46576 +++ b/fs/binfmt_elf.c
46577 @@ -31,6 +31,7 @@
46578 #include <linux/random.h>
46579 #include <linux/elf.h>
46580 #include <linux/utsname.h>
46581 +#include <linux/xattr.h>
46582 #include <asm/uaccess.h>
46583 #include <asm/param.h>
46584 #include <asm/page.h>
46585 @@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46586 #define elf_core_dump NULL
46587 #endif
46588
46589 +#ifdef CONFIG_PAX_MPROTECT
46590 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
46591 +#endif
46592 +
46593 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
46594 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
46595 #else
46596 @@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = {
46597 .load_binary = load_elf_binary,
46598 .load_shlib = load_elf_library,
46599 .core_dump = elf_core_dump,
46600 +
46601 +#ifdef CONFIG_PAX_MPROTECT
46602 + .handle_mprotect= elf_handle_mprotect,
46603 +#endif
46604 +
46605 .min_coredump = ELF_EXEC_PAGESIZE,
46606 .hasvdso = 1
46607 };
46608 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
46609
46610 static int set_brk(unsigned long start, unsigned long end)
46611 {
46612 + unsigned long e = end;
46613 +
46614 start = ELF_PAGEALIGN(start);
46615 end = ELF_PAGEALIGN(end);
46616 if (end > start) {
46617 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
46618 if (BAD_ADDR(addr))
46619 return addr;
46620 }
46621 - current->mm->start_brk = current->mm->brk = end;
46622 + current->mm->start_brk = current->mm->brk = e;
46623 return 0;
46624 }
46625
46626 @@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46627 elf_addr_t __user *u_rand_bytes;
46628 const char *k_platform = ELF_PLATFORM;
46629 const char *k_base_platform = ELF_BASE_PLATFORM;
46630 - unsigned char k_rand_bytes[16];
46631 + u32 k_rand_bytes[4];
46632 int items;
46633 elf_addr_t *elf_info;
46634 int ei_index = 0;
46635 const struct cred *cred = current_cred();
46636 struct vm_area_struct *vma;
46637 + unsigned long saved_auxv[AT_VECTOR_SIZE];
46638 +
46639 + pax_track_stack();
46640
46641 /*
46642 * In some cases (e.g. Hyper-Threading), we want to avoid L1
46643 @@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46644 * Generate 16 random bytes for userspace PRNG seeding.
46645 */
46646 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
46647 - u_rand_bytes = (elf_addr_t __user *)
46648 - STACK_ALLOC(p, sizeof(k_rand_bytes));
46649 + srandom32(k_rand_bytes[0] ^ random32());
46650 + srandom32(k_rand_bytes[1] ^ random32());
46651 + srandom32(k_rand_bytes[2] ^ random32());
46652 + srandom32(k_rand_bytes[3] ^ random32());
46653 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
46654 + u_rand_bytes = (elf_addr_t __user *) p;
46655 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
46656 return -EFAULT;
46657
46658 @@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46659 return -EFAULT;
46660 current->mm->env_end = p;
46661
46662 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
46663 +
46664 /* Put the elf_info on the stack in the right place. */
46665 sp = (elf_addr_t __user *)envp + 1;
46666 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
46667 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
46668 return -EFAULT;
46669 return 0;
46670 }
46671 @@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46672 {
46673 struct elf_phdr *elf_phdata;
46674 struct elf_phdr *eppnt;
46675 - unsigned long load_addr = 0;
46676 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
46677 int load_addr_set = 0;
46678 unsigned long last_bss = 0, elf_bss = 0;
46679 - unsigned long error = ~0UL;
46680 + unsigned long error = -EINVAL;
46681 unsigned long total_size;
46682 int retval, i, size;
46683
46684 @@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46685 goto out_close;
46686 }
46687
46688 +#ifdef CONFIG_PAX_SEGMEXEC
46689 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
46690 + pax_task_size = SEGMEXEC_TASK_SIZE;
46691 +#endif
46692 +
46693 eppnt = elf_phdata;
46694 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
46695 if (eppnt->p_type == PT_LOAD) {
46696 @@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46697 k = load_addr + eppnt->p_vaddr;
46698 if (BAD_ADDR(k) ||
46699 eppnt->p_filesz > eppnt->p_memsz ||
46700 - eppnt->p_memsz > TASK_SIZE ||
46701 - TASK_SIZE - eppnt->p_memsz < k) {
46702 + eppnt->p_memsz > pax_task_size ||
46703 + pax_task_size - eppnt->p_memsz < k) {
46704 error = -ENOMEM;
46705 goto out_close;
46706 }
46707 @@ -532,6 +558,351 @@ out:
46708 return error;
46709 }
46710
46711 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
46712 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
46713 +{
46714 + unsigned long pax_flags = 0UL;
46715 +
46716 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46717 +
46718 +#ifdef CONFIG_PAX_PAGEEXEC
46719 + if (elf_phdata->p_flags & PF_PAGEEXEC)
46720 + pax_flags |= MF_PAX_PAGEEXEC;
46721 +#endif
46722 +
46723 +#ifdef CONFIG_PAX_SEGMEXEC
46724 + if (elf_phdata->p_flags & PF_SEGMEXEC)
46725 + pax_flags |= MF_PAX_SEGMEXEC;
46726 +#endif
46727 +
46728 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46729 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46730 + if (nx_enabled)
46731 + pax_flags &= ~MF_PAX_SEGMEXEC;
46732 + else
46733 + pax_flags &= ~MF_PAX_PAGEEXEC;
46734 + }
46735 +#endif
46736 +
46737 +#ifdef CONFIG_PAX_EMUTRAMP
46738 + if (elf_phdata->p_flags & PF_EMUTRAMP)
46739 + pax_flags |= MF_PAX_EMUTRAMP;
46740 +#endif
46741 +
46742 +#ifdef CONFIG_PAX_MPROTECT
46743 + if (elf_phdata->p_flags & PF_MPROTECT)
46744 + pax_flags |= MF_PAX_MPROTECT;
46745 +#endif
46746 +
46747 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46748 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
46749 + pax_flags |= MF_PAX_RANDMMAP;
46750 +#endif
46751 +
46752 +#endif
46753 +
46754 + return pax_flags;
46755 +}
46756 +
46757 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
46758 +{
46759 + unsigned long pax_flags = 0UL;
46760 +
46761 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46762 +
46763 +#ifdef CONFIG_PAX_PAGEEXEC
46764 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
46765 + pax_flags |= MF_PAX_PAGEEXEC;
46766 +#endif
46767 +
46768 +#ifdef CONFIG_PAX_SEGMEXEC
46769 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
46770 + pax_flags |= MF_PAX_SEGMEXEC;
46771 +#endif
46772 +
46773 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46774 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46775 + if (nx_enabled)
46776 + pax_flags &= ~MF_PAX_SEGMEXEC;
46777 + else
46778 + pax_flags &= ~MF_PAX_PAGEEXEC;
46779 + }
46780 +#endif
46781 +
46782 +#ifdef CONFIG_PAX_EMUTRAMP
46783 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
46784 + pax_flags |= MF_PAX_EMUTRAMP;
46785 +#endif
46786 +
46787 +#ifdef CONFIG_PAX_MPROTECT
46788 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
46789 + pax_flags |= MF_PAX_MPROTECT;
46790 +#endif
46791 +
46792 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46793 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
46794 + pax_flags |= MF_PAX_RANDMMAP;
46795 +#endif
46796 +
46797 +#endif
46798 +
46799 + return pax_flags;
46800 +}
46801 +
46802 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
46803 +{
46804 + unsigned long pax_flags = 0UL;
46805 +
46806 +#ifdef CONFIG_PAX_EI_PAX
46807 +
46808 +#ifdef CONFIG_PAX_PAGEEXEC
46809 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
46810 + pax_flags |= MF_PAX_PAGEEXEC;
46811 +#endif
46812 +
46813 +#ifdef CONFIG_PAX_SEGMEXEC
46814 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
46815 + pax_flags |= MF_PAX_SEGMEXEC;
46816 +#endif
46817 +
46818 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46819 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46820 + if (nx_enabled)
46821 + pax_flags &= ~MF_PAX_SEGMEXEC;
46822 + else
46823 + pax_flags &= ~MF_PAX_PAGEEXEC;
46824 + }
46825 +#endif
46826 +
46827 +#ifdef CONFIG_PAX_EMUTRAMP
46828 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
46829 + pax_flags |= MF_PAX_EMUTRAMP;
46830 +#endif
46831 +
46832 +#ifdef CONFIG_PAX_MPROTECT
46833 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
46834 + pax_flags |= MF_PAX_MPROTECT;
46835 +#endif
46836 +
46837 +#ifdef CONFIG_PAX_ASLR
46838 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
46839 + pax_flags |= MF_PAX_RANDMMAP;
46840 +#endif
46841 +
46842 +#else
46843 +
46844 +#ifdef CONFIG_PAX_PAGEEXEC
46845 + pax_flags |= MF_PAX_PAGEEXEC;
46846 +#endif
46847 +
46848 +#ifdef CONFIG_PAX_MPROTECT
46849 + pax_flags |= MF_PAX_MPROTECT;
46850 +#endif
46851 +
46852 +#ifdef CONFIG_PAX_RANDMMAP
46853 + pax_flags |= MF_PAX_RANDMMAP;
46854 +#endif
46855 +
46856 +#ifdef CONFIG_PAX_SEGMEXEC
46857 + if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
46858 + pax_flags &= ~MF_PAX_PAGEEXEC;
46859 + pax_flags |= MF_PAX_SEGMEXEC;
46860 + }
46861 +#endif
46862 +
46863 +#endif
46864 +
46865 + return pax_flags;
46866 +}
46867 +
46868 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
46869 +{
46870 +
46871 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46872 + unsigned long i;
46873 +
46874 + for (i = 0UL; i < elf_ex->e_phnum; i++)
46875 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
46876 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
46877 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
46878 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
46879 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
46880 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
46881 + return ~0UL;
46882 +
46883 +#ifdef CONFIG_PAX_SOFTMODE
46884 + if (pax_softmode)
46885 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
46886 + else
46887 +#endif
46888 +
46889 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
46890 + break;
46891 + }
46892 +#endif
46893 +
46894 + return ~0UL;
46895 +}
46896 +
46897 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
46898 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
46899 +{
46900 + unsigned long pax_flags = 0UL;
46901 +
46902 +#ifdef CONFIG_PAX_PAGEEXEC
46903 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
46904 + pax_flags |= MF_PAX_PAGEEXEC;
46905 +#endif
46906 +
46907 +#ifdef CONFIG_PAX_SEGMEXEC
46908 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
46909 + pax_flags |= MF_PAX_SEGMEXEC;
46910 +#endif
46911 +
46912 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46913 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46914 + if ((__supported_pte_mask & _PAGE_NX))
46915 + pax_flags &= ~MF_PAX_SEGMEXEC;
46916 + else
46917 + pax_flags &= ~MF_PAX_PAGEEXEC;
46918 + }
46919 +#endif
46920 +
46921 +#ifdef CONFIG_PAX_EMUTRAMP
46922 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
46923 + pax_flags |= MF_PAX_EMUTRAMP;
46924 +#endif
46925 +
46926 +#ifdef CONFIG_PAX_MPROTECT
46927 + if (pax_flags_softmode & MF_PAX_MPROTECT)
46928 + pax_flags |= MF_PAX_MPROTECT;
46929 +#endif
46930 +
46931 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46932 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
46933 + pax_flags |= MF_PAX_RANDMMAP;
46934 +#endif
46935 +
46936 + return pax_flags;
46937 +}
46938 +
46939 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
46940 +{
46941 + unsigned long pax_flags = 0UL;
46942 +
46943 +#ifdef CONFIG_PAX_PAGEEXEC
46944 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
46945 + pax_flags |= MF_PAX_PAGEEXEC;
46946 +#endif
46947 +
46948 +#ifdef CONFIG_PAX_SEGMEXEC
46949 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
46950 + pax_flags |= MF_PAX_SEGMEXEC;
46951 +#endif
46952 +
46953 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46954 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46955 + if ((__supported_pte_mask & _PAGE_NX))
46956 + pax_flags &= ~MF_PAX_SEGMEXEC;
46957 + else
46958 + pax_flags &= ~MF_PAX_PAGEEXEC;
46959 + }
46960 +#endif
46961 +
46962 +#ifdef CONFIG_PAX_EMUTRAMP
46963 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
46964 + pax_flags |= MF_PAX_EMUTRAMP;
46965 +#endif
46966 +
46967 +#ifdef CONFIG_PAX_MPROTECT
46968 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
46969 + pax_flags |= MF_PAX_MPROTECT;
46970 +#endif
46971 +
46972 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46973 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
46974 + pax_flags |= MF_PAX_RANDMMAP;
46975 +#endif
46976 +
46977 + return pax_flags;
46978 +}
46979 +#endif
46980 +
46981 +static unsigned long pax_parse_xattr_pax(struct file * const file)
46982 +{
46983 +
46984 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
46985 + ssize_t xattr_size, i;
46986 + unsigned char xattr_value[5];
46987 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
46988 +
46989 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
46990 + if (xattr_size <= 0)
46991 + return ~0UL;
46992 +
46993 + for (i = 0; i < xattr_size; i++)
46994 + switch (xattr_value[i]) {
46995 + default:
46996 + return ~0UL;
46997 +
46998 +#define parse_flag(option1, option2, flag) \
46999 + case option1: \
47000 + pax_flags_hardmode |= MF_PAX_##flag; \
47001 + break; \
47002 + case option2: \
47003 + pax_flags_softmode |= MF_PAX_##flag; \
47004 + break;
47005 +
47006 + parse_flag('p', 'P', PAGEEXEC);
47007 + parse_flag('e', 'E', EMUTRAMP);
47008 + parse_flag('m', 'M', MPROTECT);
47009 + parse_flag('r', 'R', RANDMMAP);
47010 + parse_flag('s', 'S', SEGMEXEC);
47011 +
47012 +#undef parse_flag
47013 + }
47014 +
47015 + if (pax_flags_hardmode & pax_flags_softmode)
47016 + return ~0UL;
47017 +
47018 +#ifdef CONFIG_PAX_SOFTMODE
47019 + if (pax_softmode)
47020 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
47021 + else
47022 +#endif
47023 +
47024 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
47025 +#else
47026 + return ~0UL;
47027 +#endif
47028 +
47029 +}
47030 +
47031 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
47032 +{
47033 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
47034 +
47035 + pax_flags = pax_parse_ei_pax(elf_ex);
47036 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
47037 + xattr_pax_flags = pax_parse_xattr_pax(file);
47038 +
47039 + if (pt_pax_flags == ~0UL)
47040 + pt_pax_flags = xattr_pax_flags;
47041 + else if (xattr_pax_flags == ~0UL)
47042 + xattr_pax_flags = pt_pax_flags;
47043 + if (pt_pax_flags != xattr_pax_flags)
47044 + return -EINVAL;
47045 + if (pt_pax_flags != ~0UL)
47046 + pax_flags = pt_pax_flags;
47047 +
47048 + if (0 > pax_check_flags(&pax_flags))
47049 + return -EINVAL;
47050 +
47051 + current->mm->pax_flags = pax_flags;
47052 + return 0;
47053 +}
47054 +#endif
47055 +
47056 /*
47057 * These are the functions used to load ELF style executables and shared
47058 * libraries. There is no binary dependent code anywhere else.
47059 @@ -548,6 +919,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
47060 {
47061 unsigned int random_variable = 0;
47062
47063 +#ifdef CONFIG_PAX_RANDUSTACK
47064 + if (randomize_va_space)
47065 + return stack_top - current->mm->delta_stack;
47066 +#endif
47067 +
47068 if ((current->flags & PF_RANDOMIZE) &&
47069 !(current->personality & ADDR_NO_RANDOMIZE)) {
47070 random_variable = get_random_int() & STACK_RND_MASK;
47071 @@ -566,7 +942,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47072 unsigned long load_addr = 0, load_bias = 0;
47073 int load_addr_set = 0;
47074 char * elf_interpreter = NULL;
47075 - unsigned long error;
47076 + unsigned long error = 0;
47077 struct elf_phdr *elf_ppnt, *elf_phdata;
47078 unsigned long elf_bss, elf_brk;
47079 int retval, i;
47080 @@ -576,11 +952,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47081 unsigned long start_code, end_code, start_data, end_data;
47082 unsigned long reloc_func_desc = 0;
47083 int executable_stack = EXSTACK_DEFAULT;
47084 - unsigned long def_flags = 0;
47085 struct {
47086 struct elfhdr elf_ex;
47087 struct elfhdr interp_elf_ex;
47088 } *loc;
47089 + unsigned long pax_task_size = TASK_SIZE;
47090
47091 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
47092 if (!loc) {
47093 @@ -718,11 +1094,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47094
47095 /* OK, This is the point of no return */
47096 current->flags &= ~PF_FORKNOEXEC;
47097 - current->mm->def_flags = def_flags;
47098 +
47099 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47100 + current->mm->pax_flags = 0UL;
47101 +#endif
47102 +
47103 +#ifdef CONFIG_PAX_DLRESOLVE
47104 + current->mm->call_dl_resolve = 0UL;
47105 +#endif
47106 +
47107 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
47108 + current->mm->call_syscall = 0UL;
47109 +#endif
47110 +
47111 +#ifdef CONFIG_PAX_ASLR
47112 + current->mm->delta_mmap = 0UL;
47113 + current->mm->delta_stack = 0UL;
47114 +#endif
47115 +
47116 + current->mm->def_flags = 0;
47117 +
47118 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
47119 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
47120 + send_sig(SIGKILL, current, 0);
47121 + goto out_free_dentry;
47122 + }
47123 +#endif
47124 +
47125 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47126 + pax_set_initial_flags(bprm);
47127 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
47128 + if (pax_set_initial_flags_func)
47129 + (pax_set_initial_flags_func)(bprm);
47130 +#endif
47131 +
47132 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47133 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
47134 + current->mm->context.user_cs_limit = PAGE_SIZE;
47135 + current->mm->def_flags |= VM_PAGEEXEC;
47136 + }
47137 +#endif
47138 +
47139 +#ifdef CONFIG_PAX_SEGMEXEC
47140 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
47141 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
47142 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
47143 + pax_task_size = SEGMEXEC_TASK_SIZE;
47144 + }
47145 +#endif
47146 +
47147 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
47148 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47149 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
47150 + put_cpu();
47151 + }
47152 +#endif
47153
47154 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
47155 may depend on the personality. */
47156 SET_PERSONALITY(loc->elf_ex);
47157 +
47158 +#ifdef CONFIG_PAX_ASLR
47159 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
47160 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
47161 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
47162 + }
47163 +#endif
47164 +
47165 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
47166 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47167 + executable_stack = EXSTACK_DISABLE_X;
47168 + current->personality &= ~READ_IMPLIES_EXEC;
47169 + } else
47170 +#endif
47171 +
47172 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
47173 current->personality |= READ_IMPLIES_EXEC;
47174
47175 @@ -800,10 +1245,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47176 * might try to exec. This is because the brk will
47177 * follow the loader, and is not movable. */
47178 #ifdef CONFIG_X86
47179 - load_bias = 0;
47180 + if (current->flags & PF_RANDOMIZE)
47181 + load_bias = 0;
47182 + else
47183 + load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47184 #else
47185 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47186 #endif
47187 +
47188 +#ifdef CONFIG_PAX_RANDMMAP
47189 + /* PaX: randomize base address at the default exe base if requested */
47190 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
47191 +#ifdef CONFIG_SPARC64
47192 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
47193 +#else
47194 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
47195 +#endif
47196 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
47197 + elf_flags |= MAP_FIXED;
47198 + }
47199 +#endif
47200 +
47201 }
47202
47203 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
47204 @@ -836,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47205 * allowed task size. Note that p_filesz must always be
47206 * <= p_memsz so it is only necessary to check p_memsz.
47207 */
47208 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47209 - elf_ppnt->p_memsz > TASK_SIZE ||
47210 - TASK_SIZE - elf_ppnt->p_memsz < k) {
47211 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47212 + elf_ppnt->p_memsz > pax_task_size ||
47213 + pax_task_size - elf_ppnt->p_memsz < k) {
47214 /* set_brk can never work. Avoid overflows. */
47215 send_sig(SIGKILL, current, 0);
47216 retval = -EINVAL;
47217 @@ -866,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47218 start_data += load_bias;
47219 end_data += load_bias;
47220
47221 +#ifdef CONFIG_PAX_RANDMMAP
47222 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
47223 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
47224 +#endif
47225 +
47226 /* Calling set_brk effectively mmaps the pages that we need
47227 * for the bss and break sections. We must do this before
47228 * mapping in the interpreter, to make sure it doesn't wind
47229 @@ -877,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47230 goto out_free_dentry;
47231 }
47232 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
47233 - send_sig(SIGSEGV, current, 0);
47234 - retval = -EFAULT; /* Nobody gets to see this, but.. */
47235 - goto out_free_dentry;
47236 + /*
47237 + * This bss-zeroing can fail if the ELF
47238 + * file specifies odd protections. So
47239 + * we don't check the return value
47240 + */
47241 }
47242
47243 if (elf_interpreter) {
47244 @@ -1112,8 +1581,10 @@ static int dump_seek(struct file *file, loff_t off)
47245 unsigned long n = off;
47246 if (n > PAGE_SIZE)
47247 n = PAGE_SIZE;
47248 - if (!dump_write(file, buf, n))
47249 + if (!dump_write(file, buf, n)) {
47250 + free_page((unsigned long)buf);
47251 return 0;
47252 + }
47253 off -= n;
47254 }
47255 free_page((unsigned long)buf);
47256 @@ -1125,7 +1596,7 @@ static int dump_seek(struct file *file, loff_t off)
47257 * Decide what to dump of a segment, part, all or none.
47258 */
47259 static unsigned long vma_dump_size(struct vm_area_struct *vma,
47260 - unsigned long mm_flags)
47261 + unsigned long mm_flags, long signr)
47262 {
47263 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
47264
47265 @@ -1159,7 +1630,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
47266 if (vma->vm_file == NULL)
47267 return 0;
47268
47269 - if (FILTER(MAPPED_PRIVATE))
47270 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
47271 goto whole;
47272
47273 /*
47274 @@ -1255,8 +1726,11 @@ static int writenote(struct memelfnote *men, struct file *file,
47275 #undef DUMP_WRITE
47276
47277 #define DUMP_WRITE(addr, nr) \
47278 + do { \
47279 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
47280 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
47281 - goto end_coredump;
47282 + goto end_coredump; \
47283 + } while (0);
47284
47285 static void fill_elf_header(struct elfhdr *elf, int segs,
47286 u16 machine, u32 flags, u8 osabi)
47287 @@ -1385,9 +1859,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
47288 {
47289 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
47290 int i = 0;
47291 - do
47292 + do {
47293 i += 2;
47294 - while (auxv[i - 2] != AT_NULL);
47295 + } while (auxv[i - 2] != AT_NULL);
47296 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
47297 }
47298
47299 @@ -1452,7 +1926,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
47300 for (i = 1; i < view->n; ++i) {
47301 const struct user_regset *regset = &view->regsets[i];
47302 do_thread_regset_writeback(t->task, regset);
47303 - if (regset->core_note_type &&
47304 + if (regset->core_note_type && regset->get &&
47305 (!regset->active || regset->active(t->task, regset))) {
47306 int ret;
47307 size_t size = regset->n * regset->size;
47308 @@ -1973,7 +2447,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47309 phdr.p_offset = offset;
47310 phdr.p_vaddr = vma->vm_start;
47311 phdr.p_paddr = 0;
47312 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
47313 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
47314 phdr.p_memsz = vma->vm_end - vma->vm_start;
47315 offset += phdr.p_filesz;
47316 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
47317 @@ -2006,7 +2480,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47318 unsigned long addr;
47319 unsigned long end;
47320
47321 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
47322 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
47323
47324 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
47325 struct page *page;
47326 @@ -2015,6 +2489,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47327 page = get_dump_page(addr);
47328 if (page) {
47329 void *kaddr = kmap(page);
47330 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
47331 stop = ((size += PAGE_SIZE) > limit) ||
47332 !dump_write(file, kaddr, PAGE_SIZE);
47333 kunmap(page);
47334 @@ -2042,6 +2517,97 @@ out:
47335
47336 #endif /* USE_ELF_CORE_DUMP */
47337
47338 +#ifdef CONFIG_PAX_MPROTECT
47339 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
47340 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
47341 + * we'll remove VM_MAYWRITE for good on RELRO segments.
47342 + *
47343 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
47344 + * basis because we want to allow the common case and not the special ones.
47345 + */
47346 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
47347 +{
47348 + struct elfhdr elf_h;
47349 + struct elf_phdr elf_p;
47350 + unsigned long i;
47351 + unsigned long oldflags;
47352 + bool is_textrel_rw, is_textrel_rx, is_relro;
47353 +
47354 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
47355 + return;
47356 +
47357 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
47358 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
47359 +
47360 +#ifdef CONFIG_PAX_ELFRELOCS
47361 + /* possible TEXTREL */
47362 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
47363 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
47364 +#else
47365 + is_textrel_rw = false;
47366 + is_textrel_rx = false;
47367 +#endif
47368 +
47369 + /* possible RELRO */
47370 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
47371 +
47372 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
47373 + return;
47374 +
47375 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
47376 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
47377 +
47378 +#ifdef CONFIG_PAX_ETEXECRELOCS
47379 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47380 +#else
47381 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
47382 +#endif
47383 +
47384 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47385 + !elf_check_arch(&elf_h) ||
47386 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
47387 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
47388 + return;
47389 +
47390 + for (i = 0UL; i < elf_h.e_phnum; i++) {
47391 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
47392 + return;
47393 + switch (elf_p.p_type) {
47394 + case PT_DYNAMIC:
47395 + if (!is_textrel_rw && !is_textrel_rx)
47396 + continue;
47397 + i = 0UL;
47398 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
47399 + elf_dyn dyn;
47400 +
47401 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
47402 + return;
47403 + if (dyn.d_tag == DT_NULL)
47404 + return;
47405 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
47406 + gr_log_textrel(vma);
47407 + if (is_textrel_rw)
47408 + vma->vm_flags |= VM_MAYWRITE;
47409 + else
47410 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
47411 + vma->vm_flags &= ~VM_MAYWRITE;
47412 + return;
47413 + }
47414 + i++;
47415 + }
47416 + return;
47417 +
47418 + case PT_GNU_RELRO:
47419 + if (!is_relro)
47420 + continue;
47421 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
47422 + vma->vm_flags &= ~VM_MAYWRITE;
47423 + return;
47424 + }
47425 + }
47426 +}
47427 +#endif
47428 +
47429 static int __init init_elf_binfmt(void)
47430 {
47431 return register_binfmt(&elf_format);
47432 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
47433 index ca88c46..f155a60 100644
47434 --- a/fs/binfmt_flat.c
47435 +++ b/fs/binfmt_flat.c
47436 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
47437 realdatastart = (unsigned long) -ENOMEM;
47438 printk("Unable to allocate RAM for process data, errno %d\n",
47439 (int)-realdatastart);
47440 + down_write(&current->mm->mmap_sem);
47441 do_munmap(current->mm, textpos, text_len);
47442 + up_write(&current->mm->mmap_sem);
47443 ret = realdatastart;
47444 goto err;
47445 }
47446 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47447 }
47448 if (IS_ERR_VALUE(result)) {
47449 printk("Unable to read data+bss, errno %d\n", (int)-result);
47450 + down_write(&current->mm->mmap_sem);
47451 do_munmap(current->mm, textpos, text_len);
47452 do_munmap(current->mm, realdatastart, data_len + extra);
47453 + up_write(&current->mm->mmap_sem);
47454 ret = result;
47455 goto err;
47456 }
47457 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47458 }
47459 if (IS_ERR_VALUE(result)) {
47460 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
47461 + down_write(&current->mm->mmap_sem);
47462 do_munmap(current->mm, textpos, text_len + data_len + extra +
47463 MAX_SHARED_LIBS * sizeof(unsigned long));
47464 + up_write(&current->mm->mmap_sem);
47465 ret = result;
47466 goto err;
47467 }
47468 diff --git a/fs/bio.c b/fs/bio.c
47469 index e696713..83de133 100644
47470 --- a/fs/bio.c
47471 +++ b/fs/bio.c
47472 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
47473
47474 i = 0;
47475 while (i < bio_slab_nr) {
47476 - struct bio_slab *bslab = &bio_slabs[i];
47477 + bslab = &bio_slabs[i];
47478
47479 if (!bslab->slab && entry == -1)
47480 entry = i;
47481 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
47482 const int read = bio_data_dir(bio) == READ;
47483 struct bio_map_data *bmd = bio->bi_private;
47484 int i;
47485 - char *p = bmd->sgvecs[0].iov_base;
47486 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
47487
47488 __bio_for_each_segment(bvec, bio, i, 0) {
47489 char *addr = page_address(bvec->bv_page);
47490 diff --git a/fs/block_dev.c b/fs/block_dev.c
47491 index e65efa2..04fae57 100644
47492 --- a/fs/block_dev.c
47493 +++ b/fs/block_dev.c
47494 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
47495 else if (bdev->bd_contains == bdev)
47496 res = 0; /* is a whole device which isn't held */
47497
47498 - else if (bdev->bd_contains->bd_holder == bd_claim)
47499 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
47500 res = 0; /* is a partition of a device that is being partitioned */
47501 else if (bdev->bd_contains->bd_holder != NULL)
47502 res = -EBUSY; /* is a partition of a held device */
47503 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
47504 index c4bc570..42acd8d 100644
47505 --- a/fs/btrfs/ctree.c
47506 +++ b/fs/btrfs/ctree.c
47507 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
47508 free_extent_buffer(buf);
47509 add_root_to_dirty_list(root);
47510 } else {
47511 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
47512 - parent_start = parent->start;
47513 - else
47514 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
47515 + if (parent)
47516 + parent_start = parent->start;
47517 + else
47518 + parent_start = 0;
47519 + } else
47520 parent_start = 0;
47521
47522 WARN_ON(trans->transid != btrfs_header_generation(parent));
47523 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
47524
47525 ret = 0;
47526 if (slot == 0) {
47527 - struct btrfs_disk_key disk_key;
47528 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
47529 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
47530 }
47531 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
47532 index f447188..59c17c5 100644
47533 --- a/fs/btrfs/disk-io.c
47534 +++ b/fs/btrfs/disk-io.c
47535 @@ -39,7 +39,7 @@
47536 #include "tree-log.h"
47537 #include "free-space-cache.h"
47538
47539 -static struct extent_io_ops btree_extent_io_ops;
47540 +static const struct extent_io_ops btree_extent_io_ops;
47541 static void end_workqueue_fn(struct btrfs_work *work);
47542 static void free_fs_root(struct btrfs_root *root);
47543
47544 @@ -2607,7 +2607,7 @@ out:
47545 return 0;
47546 }
47547
47548 -static struct extent_io_ops btree_extent_io_ops = {
47549 +static const struct extent_io_ops btree_extent_io_ops = {
47550 .write_cache_pages_lock_hook = btree_lock_page_hook,
47551 .readpage_end_io_hook = btree_readpage_end_io_hook,
47552 .submit_bio_hook = btree_submit_bio_hook,
47553 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
47554 index 559f724..a026171 100644
47555 --- a/fs/btrfs/extent-tree.c
47556 +++ b/fs/btrfs/extent-tree.c
47557 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
47558 u64 group_start = group->key.objectid;
47559 new_extents = kmalloc(sizeof(*new_extents),
47560 GFP_NOFS);
47561 + if (!new_extents) {
47562 + ret = -ENOMEM;
47563 + goto out;
47564 + }
47565 nr_extents = 1;
47566 ret = get_new_locations(reloc_inode,
47567 extent_key,
47568 diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
47569 index 36de250..7ec75c7 100644
47570 --- a/fs/btrfs/extent_io.h
47571 +++ b/fs/btrfs/extent_io.h
47572 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
47573 struct bio *bio, int mirror_num,
47574 unsigned long bio_flags);
47575 struct extent_io_ops {
47576 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
47577 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
47578 u64 start, u64 end, int *page_started,
47579 unsigned long *nr_written);
47580 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
47581 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
47582 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
47583 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
47584 extent_submit_bio_hook_t *submit_bio_hook;
47585 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
47586 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
47587 size_t size, struct bio *bio,
47588 unsigned long bio_flags);
47589 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
47590 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
47591 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
47592 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
47593 u64 start, u64 end,
47594 struct extent_state *state);
47595 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
47596 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
47597 u64 start, u64 end,
47598 struct extent_state *state);
47599 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47600 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47601 struct extent_state *state);
47602 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47603 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47604 struct extent_state *state, int uptodate);
47605 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
47606 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
47607 unsigned long old, unsigned long bits);
47608 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
47609 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
47610 unsigned long bits);
47611 - int (*merge_extent_hook)(struct inode *inode,
47612 + int (* const merge_extent_hook)(struct inode *inode,
47613 struct extent_state *new,
47614 struct extent_state *other);
47615 - int (*split_extent_hook)(struct inode *inode,
47616 + int (* const split_extent_hook)(struct inode *inode,
47617 struct extent_state *orig, u64 split);
47618 - int (*write_cache_pages_lock_hook)(struct page *page);
47619 + int (* const write_cache_pages_lock_hook)(struct page *page);
47620 };
47621
47622 struct extent_io_tree {
47623 @@ -88,7 +88,7 @@ struct extent_io_tree {
47624 u64 dirty_bytes;
47625 spinlock_t lock;
47626 spinlock_t buffer_lock;
47627 - struct extent_io_ops *ops;
47628 + const struct extent_io_ops *ops;
47629 };
47630
47631 struct extent_state {
47632 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
47633 index cb2849f..3718fb4 100644
47634 --- a/fs/btrfs/free-space-cache.c
47635 +++ b/fs/btrfs/free-space-cache.c
47636 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
47637
47638 while(1) {
47639 if (entry->bytes < bytes || entry->offset < min_start) {
47640 - struct rb_node *node;
47641 -
47642 node = rb_next(&entry->offset_index);
47643 if (!node)
47644 break;
47645 @@ -1226,7 +1224,7 @@ again:
47646 */
47647 while (entry->bitmap || found_bitmap ||
47648 (!entry->bitmap && entry->bytes < min_bytes)) {
47649 - struct rb_node *node = rb_next(&entry->offset_index);
47650 + node = rb_next(&entry->offset_index);
47651
47652 if (entry->bitmap && entry->bytes > bytes + empty_size) {
47653 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
47654 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
47655 index e03a836..323837e 100644
47656 --- a/fs/btrfs/inode.c
47657 +++ b/fs/btrfs/inode.c
47658 @@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
47659 static const struct address_space_operations btrfs_aops;
47660 static const struct address_space_operations btrfs_symlink_aops;
47661 static const struct file_operations btrfs_dir_file_operations;
47662 -static struct extent_io_ops btrfs_extent_io_ops;
47663 +static const struct extent_io_ops btrfs_extent_io_ops;
47664
47665 static struct kmem_cache *btrfs_inode_cachep;
47666 struct kmem_cache *btrfs_trans_handle_cachep;
47667 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
47668 1, 0, NULL, GFP_NOFS);
47669 while (start < end) {
47670 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
47671 + BUG_ON(!async_cow);
47672 async_cow->inode = inode;
47673 async_cow->root = root;
47674 async_cow->locked_page = locked_page;
47675 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
47676 inline_size = btrfs_file_extent_inline_item_len(leaf,
47677 btrfs_item_nr(leaf, path->slots[0]));
47678 tmp = kmalloc(inline_size, GFP_NOFS);
47679 + if (!tmp)
47680 + return -ENOMEM;
47681 ptr = btrfs_file_extent_inline_start(item);
47682
47683 read_extent_buffer(leaf, tmp, ptr, inline_size);
47684 @@ -5410,7 +5413,7 @@ fail:
47685 return -ENOMEM;
47686 }
47687
47688 -static int btrfs_getattr(struct vfsmount *mnt,
47689 +int btrfs_getattr(struct vfsmount *mnt,
47690 struct dentry *dentry, struct kstat *stat)
47691 {
47692 struct inode *inode = dentry->d_inode;
47693 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
47694 return 0;
47695 }
47696
47697 +EXPORT_SYMBOL(btrfs_getattr);
47698 +
47699 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
47700 +{
47701 + return BTRFS_I(inode)->root->anon_super.s_dev;
47702 +}
47703 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
47704 +
47705 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
47706 struct inode *new_dir, struct dentry *new_dentry)
47707 {
47708 @@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
47709 .fsync = btrfs_sync_file,
47710 };
47711
47712 -static struct extent_io_ops btrfs_extent_io_ops = {
47713 +static const struct extent_io_ops btrfs_extent_io_ops = {
47714 .fill_delalloc = run_delalloc_range,
47715 .submit_bio_hook = btrfs_submit_bio_hook,
47716 .merge_bio_hook = btrfs_merge_bio_hook,
47717 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
47718 index ab7ab53..94e0781 100644
47719 --- a/fs/btrfs/relocation.c
47720 +++ b/fs/btrfs/relocation.c
47721 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
47722 }
47723 spin_unlock(&rc->reloc_root_tree.lock);
47724
47725 - BUG_ON((struct btrfs_root *)node->data != root);
47726 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
47727
47728 if (!del) {
47729 spin_lock(&rc->reloc_root_tree.lock);
47730 diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
47731 index a240b6f..4ce16ef 100644
47732 --- a/fs/btrfs/sysfs.c
47733 +++ b/fs/btrfs/sysfs.c
47734 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
47735 complete(&root->kobj_unregister);
47736 }
47737
47738 -static struct sysfs_ops btrfs_super_attr_ops = {
47739 +static const struct sysfs_ops btrfs_super_attr_ops = {
47740 .show = btrfs_super_attr_show,
47741 .store = btrfs_super_attr_store,
47742 };
47743
47744 -static struct sysfs_ops btrfs_root_attr_ops = {
47745 +static const struct sysfs_ops btrfs_root_attr_ops = {
47746 .show = btrfs_root_attr_show,
47747 .store = btrfs_root_attr_store,
47748 };
47749 diff --git a/fs/buffer.c b/fs/buffer.c
47750 index 6fa5302..395d9f6 100644
47751 --- a/fs/buffer.c
47752 +++ b/fs/buffer.c
47753 @@ -25,6 +25,7 @@
47754 #include <linux/percpu.h>
47755 #include <linux/slab.h>
47756 #include <linux/capability.h>
47757 +#include <linux/security.h>
47758 #include <linux/blkdev.h>
47759 #include <linux/file.h>
47760 #include <linux/quotaops.h>
47761 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
47762 index 3797e00..ce776f6 100644
47763 --- a/fs/cachefiles/bind.c
47764 +++ b/fs/cachefiles/bind.c
47765 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
47766 args);
47767
47768 /* start by checking things over */
47769 - ASSERT(cache->fstop_percent >= 0 &&
47770 - cache->fstop_percent < cache->fcull_percent &&
47771 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
47772 cache->fcull_percent < cache->frun_percent &&
47773 cache->frun_percent < 100);
47774
47775 - ASSERT(cache->bstop_percent >= 0 &&
47776 - cache->bstop_percent < cache->bcull_percent &&
47777 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
47778 cache->bcull_percent < cache->brun_percent &&
47779 cache->brun_percent < 100);
47780
47781 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
47782 index 4618516..bb30d01 100644
47783 --- a/fs/cachefiles/daemon.c
47784 +++ b/fs/cachefiles/daemon.c
47785 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
47786 if (test_bit(CACHEFILES_DEAD, &cache->flags))
47787 return -EIO;
47788
47789 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
47790 + if (datalen > PAGE_SIZE - 1)
47791 return -EOPNOTSUPP;
47792
47793 /* drag the command string into the kernel so we can parse it */
47794 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
47795 if (args[0] != '%' || args[1] != '\0')
47796 return -EINVAL;
47797
47798 - if (fstop < 0 || fstop >= cache->fcull_percent)
47799 + if (fstop >= cache->fcull_percent)
47800 return cachefiles_daemon_range_error(cache, args);
47801
47802 cache->fstop_percent = fstop;
47803 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
47804 if (args[0] != '%' || args[1] != '\0')
47805 return -EINVAL;
47806
47807 - if (bstop < 0 || bstop >= cache->bcull_percent)
47808 + if (bstop >= cache->bcull_percent)
47809 return cachefiles_daemon_range_error(cache, args);
47810
47811 cache->bstop_percent = bstop;
47812 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
47813 index f7c255f..fcd61de 100644
47814 --- a/fs/cachefiles/internal.h
47815 +++ b/fs/cachefiles/internal.h
47816 @@ -56,7 +56,7 @@ struct cachefiles_cache {
47817 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
47818 struct rb_root active_nodes; /* active nodes (can't be culled) */
47819 rwlock_t active_lock; /* lock for active_nodes */
47820 - atomic_t gravecounter; /* graveyard uniquifier */
47821 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
47822 unsigned frun_percent; /* when to stop culling (% files) */
47823 unsigned fcull_percent; /* when to start culling (% files) */
47824 unsigned fstop_percent; /* when to stop allocating (% files) */
47825 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
47826 * proc.c
47827 */
47828 #ifdef CONFIG_CACHEFILES_HISTOGRAM
47829 -extern atomic_t cachefiles_lookup_histogram[HZ];
47830 -extern atomic_t cachefiles_mkdir_histogram[HZ];
47831 -extern atomic_t cachefiles_create_histogram[HZ];
47832 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47833 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47834 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
47835
47836 extern int __init cachefiles_proc_init(void);
47837 extern void cachefiles_proc_cleanup(void);
47838 static inline
47839 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
47840 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
47841 {
47842 unsigned long jif = jiffies - start_jif;
47843 if (jif >= HZ)
47844 jif = HZ - 1;
47845 - atomic_inc(&histogram[jif]);
47846 + atomic_inc_unchecked(&histogram[jif]);
47847 }
47848
47849 #else
47850 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
47851 index 14ac480..a62766c 100644
47852 --- a/fs/cachefiles/namei.c
47853 +++ b/fs/cachefiles/namei.c
47854 @@ -250,7 +250,7 @@ try_again:
47855 /* first step is to make up a grave dentry in the graveyard */
47856 sprintf(nbuffer, "%08x%08x",
47857 (uint32_t) get_seconds(),
47858 - (uint32_t) atomic_inc_return(&cache->gravecounter));
47859 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
47860
47861 /* do the multiway lock magic */
47862 trap = lock_rename(cache->graveyard, dir);
47863 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
47864 index eccd339..4c1d995 100644
47865 --- a/fs/cachefiles/proc.c
47866 +++ b/fs/cachefiles/proc.c
47867 @@ -14,9 +14,9 @@
47868 #include <linux/seq_file.h>
47869 #include "internal.h"
47870
47871 -atomic_t cachefiles_lookup_histogram[HZ];
47872 -atomic_t cachefiles_mkdir_histogram[HZ];
47873 -atomic_t cachefiles_create_histogram[HZ];
47874 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47875 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47876 +atomic_unchecked_t cachefiles_create_histogram[HZ];
47877
47878 /*
47879 * display the latency histogram
47880 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
47881 return 0;
47882 default:
47883 index = (unsigned long) v - 3;
47884 - x = atomic_read(&cachefiles_lookup_histogram[index]);
47885 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
47886 - z = atomic_read(&cachefiles_create_histogram[index]);
47887 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
47888 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
47889 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
47890 if (x == 0 && y == 0 && z == 0)
47891 return 0;
47892
47893 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
47894 index a6c8c6f..5cf8517 100644
47895 --- a/fs/cachefiles/rdwr.c
47896 +++ b/fs/cachefiles/rdwr.c
47897 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
47898 old_fs = get_fs();
47899 set_fs(KERNEL_DS);
47900 ret = file->f_op->write(
47901 - file, (const void __user *) data, len, &pos);
47902 + file, (const void __force_user *) data, len, &pos);
47903 set_fs(old_fs);
47904 kunmap(page);
47905 if (ret != len)
47906 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
47907 index 42cec2a..2aba466 100644
47908 --- a/fs/cifs/cifs_debug.c
47909 +++ b/fs/cifs/cifs_debug.c
47910 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
47911 tcon = list_entry(tmp3,
47912 struct cifsTconInfo,
47913 tcon_list);
47914 - atomic_set(&tcon->num_smbs_sent, 0);
47915 - atomic_set(&tcon->num_writes, 0);
47916 - atomic_set(&tcon->num_reads, 0);
47917 - atomic_set(&tcon->num_oplock_brks, 0);
47918 - atomic_set(&tcon->num_opens, 0);
47919 - atomic_set(&tcon->num_posixopens, 0);
47920 - atomic_set(&tcon->num_posixmkdirs, 0);
47921 - atomic_set(&tcon->num_closes, 0);
47922 - atomic_set(&tcon->num_deletes, 0);
47923 - atomic_set(&tcon->num_mkdirs, 0);
47924 - atomic_set(&tcon->num_rmdirs, 0);
47925 - atomic_set(&tcon->num_renames, 0);
47926 - atomic_set(&tcon->num_t2renames, 0);
47927 - atomic_set(&tcon->num_ffirst, 0);
47928 - atomic_set(&tcon->num_fnext, 0);
47929 - atomic_set(&tcon->num_fclose, 0);
47930 - atomic_set(&tcon->num_hardlinks, 0);
47931 - atomic_set(&tcon->num_symlinks, 0);
47932 - atomic_set(&tcon->num_locks, 0);
47933 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
47934 + atomic_set_unchecked(&tcon->num_writes, 0);
47935 + atomic_set_unchecked(&tcon->num_reads, 0);
47936 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
47937 + atomic_set_unchecked(&tcon->num_opens, 0);
47938 + atomic_set_unchecked(&tcon->num_posixopens, 0);
47939 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
47940 + atomic_set_unchecked(&tcon->num_closes, 0);
47941 + atomic_set_unchecked(&tcon->num_deletes, 0);
47942 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
47943 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
47944 + atomic_set_unchecked(&tcon->num_renames, 0);
47945 + atomic_set_unchecked(&tcon->num_t2renames, 0);
47946 + atomic_set_unchecked(&tcon->num_ffirst, 0);
47947 + atomic_set_unchecked(&tcon->num_fnext, 0);
47948 + atomic_set_unchecked(&tcon->num_fclose, 0);
47949 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
47950 + atomic_set_unchecked(&tcon->num_symlinks, 0);
47951 + atomic_set_unchecked(&tcon->num_locks, 0);
47952 }
47953 }
47954 }
47955 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
47956 if (tcon->need_reconnect)
47957 seq_puts(m, "\tDISCONNECTED ");
47958 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
47959 - atomic_read(&tcon->num_smbs_sent),
47960 - atomic_read(&tcon->num_oplock_brks));
47961 + atomic_read_unchecked(&tcon->num_smbs_sent),
47962 + atomic_read_unchecked(&tcon->num_oplock_brks));
47963 seq_printf(m, "\nReads: %d Bytes: %lld",
47964 - atomic_read(&tcon->num_reads),
47965 + atomic_read_unchecked(&tcon->num_reads),
47966 (long long)(tcon->bytes_read));
47967 seq_printf(m, "\nWrites: %d Bytes: %lld",
47968 - atomic_read(&tcon->num_writes),
47969 + atomic_read_unchecked(&tcon->num_writes),
47970 (long long)(tcon->bytes_written));
47971 seq_printf(m, "\nFlushes: %d",
47972 - atomic_read(&tcon->num_flushes));
47973 + atomic_read_unchecked(&tcon->num_flushes));
47974 seq_printf(m, "\nLocks: %d HardLinks: %d "
47975 "Symlinks: %d",
47976 - atomic_read(&tcon->num_locks),
47977 - atomic_read(&tcon->num_hardlinks),
47978 - atomic_read(&tcon->num_symlinks));
47979 + atomic_read_unchecked(&tcon->num_locks),
47980 + atomic_read_unchecked(&tcon->num_hardlinks),
47981 + atomic_read_unchecked(&tcon->num_symlinks));
47982 seq_printf(m, "\nOpens: %d Closes: %d "
47983 "Deletes: %d",
47984 - atomic_read(&tcon->num_opens),
47985 - atomic_read(&tcon->num_closes),
47986 - atomic_read(&tcon->num_deletes));
47987 + atomic_read_unchecked(&tcon->num_opens),
47988 + atomic_read_unchecked(&tcon->num_closes),
47989 + atomic_read_unchecked(&tcon->num_deletes));
47990 seq_printf(m, "\nPosix Opens: %d "
47991 "Posix Mkdirs: %d",
47992 - atomic_read(&tcon->num_posixopens),
47993 - atomic_read(&tcon->num_posixmkdirs));
47994 + atomic_read_unchecked(&tcon->num_posixopens),
47995 + atomic_read_unchecked(&tcon->num_posixmkdirs));
47996 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
47997 - atomic_read(&tcon->num_mkdirs),
47998 - atomic_read(&tcon->num_rmdirs));
47999 + atomic_read_unchecked(&tcon->num_mkdirs),
48000 + atomic_read_unchecked(&tcon->num_rmdirs));
48001 seq_printf(m, "\nRenames: %d T2 Renames %d",
48002 - atomic_read(&tcon->num_renames),
48003 - atomic_read(&tcon->num_t2renames));
48004 + atomic_read_unchecked(&tcon->num_renames),
48005 + atomic_read_unchecked(&tcon->num_t2renames));
48006 seq_printf(m, "\nFindFirst: %d FNext %d "
48007 "FClose %d",
48008 - atomic_read(&tcon->num_ffirst),
48009 - atomic_read(&tcon->num_fnext),
48010 - atomic_read(&tcon->num_fclose));
48011 + atomic_read_unchecked(&tcon->num_ffirst),
48012 + atomic_read_unchecked(&tcon->num_fnext),
48013 + atomic_read_unchecked(&tcon->num_fclose));
48014 }
48015 }
48016 }
48017 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
48018 index 1445407..68cb0dc 100644
48019 --- a/fs/cifs/cifsfs.c
48020 +++ b/fs/cifs/cifsfs.c
48021 @@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
48022 cifs_req_cachep = kmem_cache_create("cifs_request",
48023 CIFSMaxBufSize +
48024 MAX_CIFS_HDR_SIZE, 0,
48025 - SLAB_HWCACHE_ALIGN, NULL);
48026 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
48027 if (cifs_req_cachep == NULL)
48028 return -ENOMEM;
48029
48030 @@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
48031 efficient to alloc 1 per page off the slab compared to 17K (5page)
48032 alloc of large cifs buffers even when page debugging is on */
48033 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
48034 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
48035 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
48036 NULL);
48037 if (cifs_sm_req_cachep == NULL) {
48038 mempool_destroy(cifs_req_poolp);
48039 @@ -991,8 +991,8 @@ init_cifs(void)
48040 atomic_set(&bufAllocCount, 0);
48041 atomic_set(&smBufAllocCount, 0);
48042 #ifdef CONFIG_CIFS_STATS2
48043 - atomic_set(&totBufAllocCount, 0);
48044 - atomic_set(&totSmBufAllocCount, 0);
48045 + atomic_set_unchecked(&totBufAllocCount, 0);
48046 + atomic_set_unchecked(&totSmBufAllocCount, 0);
48047 #endif /* CONFIG_CIFS_STATS2 */
48048
48049 atomic_set(&midCount, 0);
48050 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
48051 index e29581e..1c22bab 100644
48052 --- a/fs/cifs/cifsglob.h
48053 +++ b/fs/cifs/cifsglob.h
48054 @@ -252,28 +252,28 @@ struct cifsTconInfo {
48055 __u16 Flags; /* optional support bits */
48056 enum statusEnum tidStatus;
48057 #ifdef CONFIG_CIFS_STATS
48058 - atomic_t num_smbs_sent;
48059 - atomic_t num_writes;
48060 - atomic_t num_reads;
48061 - atomic_t num_flushes;
48062 - atomic_t num_oplock_brks;
48063 - atomic_t num_opens;
48064 - atomic_t num_closes;
48065 - atomic_t num_deletes;
48066 - atomic_t num_mkdirs;
48067 - atomic_t num_posixopens;
48068 - atomic_t num_posixmkdirs;
48069 - atomic_t num_rmdirs;
48070 - atomic_t num_renames;
48071 - atomic_t num_t2renames;
48072 - atomic_t num_ffirst;
48073 - atomic_t num_fnext;
48074 - atomic_t num_fclose;
48075 - atomic_t num_hardlinks;
48076 - atomic_t num_symlinks;
48077 - atomic_t num_locks;
48078 - atomic_t num_acl_get;
48079 - atomic_t num_acl_set;
48080 + atomic_unchecked_t num_smbs_sent;
48081 + atomic_unchecked_t num_writes;
48082 + atomic_unchecked_t num_reads;
48083 + atomic_unchecked_t num_flushes;
48084 + atomic_unchecked_t num_oplock_brks;
48085 + atomic_unchecked_t num_opens;
48086 + atomic_unchecked_t num_closes;
48087 + atomic_unchecked_t num_deletes;
48088 + atomic_unchecked_t num_mkdirs;
48089 + atomic_unchecked_t num_posixopens;
48090 + atomic_unchecked_t num_posixmkdirs;
48091 + atomic_unchecked_t num_rmdirs;
48092 + atomic_unchecked_t num_renames;
48093 + atomic_unchecked_t num_t2renames;
48094 + atomic_unchecked_t num_ffirst;
48095 + atomic_unchecked_t num_fnext;
48096 + atomic_unchecked_t num_fclose;
48097 + atomic_unchecked_t num_hardlinks;
48098 + atomic_unchecked_t num_symlinks;
48099 + atomic_unchecked_t num_locks;
48100 + atomic_unchecked_t num_acl_get;
48101 + atomic_unchecked_t num_acl_set;
48102 #ifdef CONFIG_CIFS_STATS2
48103 unsigned long long time_writes;
48104 unsigned long long time_reads;
48105 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
48106 }
48107
48108 #ifdef CONFIG_CIFS_STATS
48109 -#define cifs_stats_inc atomic_inc
48110 +#define cifs_stats_inc atomic_inc_unchecked
48111
48112 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
48113 unsigned int bytes)
48114 @@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
48115 /* Various Debug counters */
48116 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
48117 #ifdef CONFIG_CIFS_STATS2
48118 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
48119 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
48120 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
48121 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
48122 #endif
48123 GLOBAL_EXTERN atomic_t smBufAllocCount;
48124 GLOBAL_EXTERN atomic_t midCount;
48125 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
48126 index fc1e048..28b3441 100644
48127 --- a/fs/cifs/link.c
48128 +++ b/fs/cifs/link.c
48129 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
48130
48131 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
48132 {
48133 - char *p = nd_get_link(nd);
48134 + const char *p = nd_get_link(nd);
48135 if (!IS_ERR(p))
48136 kfree(p);
48137 }
48138 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
48139 index 95b82e8..12a538d 100644
48140 --- a/fs/cifs/misc.c
48141 +++ b/fs/cifs/misc.c
48142 @@ -155,7 +155,7 @@ cifs_buf_get(void)
48143 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
48144 atomic_inc(&bufAllocCount);
48145 #ifdef CONFIG_CIFS_STATS2
48146 - atomic_inc(&totBufAllocCount);
48147 + atomic_inc_unchecked(&totBufAllocCount);
48148 #endif /* CONFIG_CIFS_STATS2 */
48149 }
48150
48151 @@ -190,7 +190,7 @@ cifs_small_buf_get(void)
48152 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
48153 atomic_inc(&smBufAllocCount);
48154 #ifdef CONFIG_CIFS_STATS2
48155 - atomic_inc(&totSmBufAllocCount);
48156 + atomic_inc_unchecked(&totSmBufAllocCount);
48157 #endif /* CONFIG_CIFS_STATS2 */
48158
48159 }
48160 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
48161 index a5bf577..6d19845 100644
48162 --- a/fs/coda/cache.c
48163 +++ b/fs/coda/cache.c
48164 @@ -24,14 +24,14 @@
48165 #include <linux/coda_fs_i.h>
48166 #include <linux/coda_cache.h>
48167
48168 -static atomic_t permission_epoch = ATOMIC_INIT(0);
48169 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
48170
48171 /* replace or extend an acl cache hit */
48172 void coda_cache_enter(struct inode *inode, int mask)
48173 {
48174 struct coda_inode_info *cii = ITOC(inode);
48175
48176 - cii->c_cached_epoch = atomic_read(&permission_epoch);
48177 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
48178 if (cii->c_uid != current_fsuid()) {
48179 cii->c_uid = current_fsuid();
48180 cii->c_cached_perm = mask;
48181 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
48182 void coda_cache_clear_inode(struct inode *inode)
48183 {
48184 struct coda_inode_info *cii = ITOC(inode);
48185 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
48186 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
48187 }
48188
48189 /* remove all acl caches */
48190 void coda_cache_clear_all(struct super_block *sb)
48191 {
48192 - atomic_inc(&permission_epoch);
48193 + atomic_inc_unchecked(&permission_epoch);
48194 }
48195
48196
48197 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
48198
48199 hit = (mask & cii->c_cached_perm) == mask &&
48200 cii->c_uid == current_fsuid() &&
48201 - cii->c_cached_epoch == atomic_read(&permission_epoch);
48202 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
48203
48204 return hit;
48205 }
48206 diff --git a/fs/compat.c b/fs/compat.c
48207 index d1e2411..c2ef8ed 100644
48208 --- a/fs/compat.c
48209 +++ b/fs/compat.c
48210 @@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
48211 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
48212 {
48213 compat_ino_t ino = stat->ino;
48214 - typeof(ubuf->st_uid) uid = 0;
48215 - typeof(ubuf->st_gid) gid = 0;
48216 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
48217 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
48218 int err;
48219
48220 SET_UID(uid, stat->uid);
48221 @@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
48222
48223 set_fs(KERNEL_DS);
48224 /* The __user pointer cast is valid because of the set_fs() */
48225 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
48226 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
48227 set_fs(oldfs);
48228 /* truncating is ok because it's a user address */
48229 if (!ret)
48230 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
48231
48232 struct compat_readdir_callback {
48233 struct compat_old_linux_dirent __user *dirent;
48234 + struct file * file;
48235 int result;
48236 };
48237
48238 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
48239 buf->result = -EOVERFLOW;
48240 return -EOVERFLOW;
48241 }
48242 +
48243 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48244 + return 0;
48245 +
48246 buf->result++;
48247 dirent = buf->dirent;
48248 if (!access_ok(VERIFY_WRITE, dirent,
48249 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
48250
48251 buf.result = 0;
48252 buf.dirent = dirent;
48253 + buf.file = file;
48254
48255 error = vfs_readdir(file, compat_fillonedir, &buf);
48256 if (buf.result)
48257 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
48258 struct compat_getdents_callback {
48259 struct compat_linux_dirent __user *current_dir;
48260 struct compat_linux_dirent __user *previous;
48261 + struct file * file;
48262 int count;
48263 int error;
48264 };
48265 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
48266 buf->error = -EOVERFLOW;
48267 return -EOVERFLOW;
48268 }
48269 +
48270 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48271 + return 0;
48272 +
48273 dirent = buf->previous;
48274 if (dirent) {
48275 if (__put_user(offset, &dirent->d_off))
48276 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
48277 buf.previous = NULL;
48278 buf.count = count;
48279 buf.error = 0;
48280 + buf.file = file;
48281
48282 error = vfs_readdir(file, compat_filldir, &buf);
48283 if (error >= 0)
48284 @@ -987,6 +999,7 @@ out:
48285 struct compat_getdents_callback64 {
48286 struct linux_dirent64 __user *current_dir;
48287 struct linux_dirent64 __user *previous;
48288 + struct file * file;
48289 int count;
48290 int error;
48291 };
48292 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
48293 buf->error = -EINVAL; /* only used if we fail.. */
48294 if (reclen > buf->count)
48295 return -EINVAL;
48296 +
48297 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48298 + return 0;
48299 +
48300 dirent = buf->previous;
48301
48302 if (dirent) {
48303 @@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
48304 buf.previous = NULL;
48305 buf.count = count;
48306 buf.error = 0;
48307 + buf.file = file;
48308
48309 error = vfs_readdir(file, compat_filldir64, &buf);
48310 if (error >= 0)
48311 error = buf.error;
48312 lastdirent = buf.previous;
48313 if (lastdirent) {
48314 - typeof(lastdirent->d_off) d_off = file->f_pos;
48315 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48316 if (__put_user_unaligned(d_off, &lastdirent->d_off))
48317 error = -EFAULT;
48318 else
48319 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
48320 * verify all the pointers
48321 */
48322 ret = -EINVAL;
48323 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
48324 + if (nr_segs > UIO_MAXIOV)
48325 goto out;
48326 if (!file->f_op)
48327 goto out;
48328 @@ -1454,6 +1472,10 @@ out:
48329 return ret;
48330 }
48331
48332 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48333 +extern atomic64_unchecked_t global_exec_counter;
48334 +#endif
48335 +
48336 /*
48337 * compat_do_execve() is mostly a copy of do_execve(), with the exception
48338 * that it processes 32 bit argv and envp pointers.
48339 @@ -1463,11 +1485,35 @@ int compat_do_execve(char * filename,
48340 compat_uptr_t __user *envp,
48341 struct pt_regs * regs)
48342 {
48343 +#ifdef CONFIG_GRKERNSEC
48344 + struct file *old_exec_file;
48345 + struct acl_subject_label *old_acl;
48346 + struct rlimit old_rlim[RLIM_NLIMITS];
48347 +#endif
48348 struct linux_binprm *bprm;
48349 struct file *file;
48350 struct files_struct *displaced;
48351 bool clear_in_exec;
48352 int retval;
48353 + const struct cred *cred = current_cred();
48354 +
48355 + /*
48356 + * We move the actual failure in case of RLIMIT_NPROC excess from
48357 + * set*uid() to execve() because too many poorly written programs
48358 + * don't check setuid() return code. Here we additionally recheck
48359 + * whether NPROC limit is still exceeded.
48360 + */
48361 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48362 +
48363 + if ((current->flags & PF_NPROC_EXCEEDED) &&
48364 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48365 + retval = -EAGAIN;
48366 + goto out_ret;
48367 + }
48368 +
48369 + /* We're below the limit (still or again), so we don't want to make
48370 + * further execve() calls fail. */
48371 + current->flags &= ~PF_NPROC_EXCEEDED;
48372
48373 retval = unshare_files(&displaced);
48374 if (retval)
48375 @@ -1493,12 +1539,26 @@ int compat_do_execve(char * filename,
48376 if (IS_ERR(file))
48377 goto out_unmark;
48378
48379 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
48380 + retval = -EPERM;
48381 + goto out_file;
48382 + }
48383 +
48384 sched_exec();
48385
48386 bprm->file = file;
48387 bprm->filename = filename;
48388 bprm->interp = filename;
48389
48390 + if (gr_process_user_ban()) {
48391 + retval = -EPERM;
48392 + goto out_file;
48393 + }
48394 +
48395 + retval = -EACCES;
48396 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
48397 + goto out_file;
48398 +
48399 retval = bprm_mm_init(bprm);
48400 if (retval)
48401 goto out_file;
48402 @@ -1528,11 +1588,45 @@ int compat_do_execve(char * filename,
48403 if (retval < 0)
48404 goto out;
48405
48406 + if (!gr_tpe_allow(file)) {
48407 + retval = -EACCES;
48408 + goto out;
48409 + }
48410 +
48411 + if (gr_check_crash_exec(file)) {
48412 + retval = -EACCES;
48413 + goto out;
48414 + }
48415 +
48416 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48417 +
48418 + gr_handle_exec_args_compat(bprm, argv);
48419 +
48420 +#ifdef CONFIG_GRKERNSEC
48421 + old_acl = current->acl;
48422 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48423 + old_exec_file = current->exec_file;
48424 + get_file(file);
48425 + current->exec_file = file;
48426 +#endif
48427 +
48428 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48429 + bprm->unsafe);
48430 + if (retval < 0)
48431 + goto out_fail;
48432 +
48433 retval = search_binary_handler(bprm, regs);
48434 if (retval < 0)
48435 - goto out;
48436 + goto out_fail;
48437 +#ifdef CONFIG_GRKERNSEC
48438 + if (old_exec_file)
48439 + fput(old_exec_file);
48440 +#endif
48441
48442 /* execve succeeded */
48443 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48444 + current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
48445 +#endif
48446 current->fs->in_exec = 0;
48447 current->in_execve = 0;
48448 acct_update_integrals(current);
48449 @@ -1541,6 +1635,14 @@ int compat_do_execve(char * filename,
48450 put_files_struct(displaced);
48451 return retval;
48452
48453 +out_fail:
48454 +#ifdef CONFIG_GRKERNSEC
48455 + current->acl = old_acl;
48456 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48457 + fput(current->exec_file);
48458 + current->exec_file = old_exec_file;
48459 +#endif
48460 +
48461 out:
48462 if (bprm->mm) {
48463 acct_arg_size(bprm, 0);
48464 @@ -1711,6 +1813,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
48465 struct fdtable *fdt;
48466 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
48467
48468 + pax_track_stack();
48469 +
48470 if (n < 0)
48471 goto out_nofds;
48472
48473 @@ -2151,7 +2255,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
48474 oldfs = get_fs();
48475 set_fs(KERNEL_DS);
48476 /* The __user pointer casts are valid because of the set_fs() */
48477 - err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
48478 + err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
48479 set_fs(oldfs);
48480
48481 if (err)
48482 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
48483 index 0adced2..bbb1b0d 100644
48484 --- a/fs/compat_binfmt_elf.c
48485 +++ b/fs/compat_binfmt_elf.c
48486 @@ -29,10 +29,12 @@
48487 #undef elfhdr
48488 #undef elf_phdr
48489 #undef elf_note
48490 +#undef elf_dyn
48491 #undef elf_addr_t
48492 #define elfhdr elf32_hdr
48493 #define elf_phdr elf32_phdr
48494 #define elf_note elf32_note
48495 +#define elf_dyn Elf32_Dyn
48496 #define elf_addr_t Elf32_Addr
48497
48498 /*
48499 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
48500 index d84e705..d8c364c 100644
48501 --- a/fs/compat_ioctl.c
48502 +++ b/fs/compat_ioctl.c
48503 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
48504 up = (struct compat_video_spu_palette __user *) arg;
48505 err = get_user(palp, &up->palette);
48506 err |= get_user(length, &up->length);
48507 + if (err)
48508 + return -EFAULT;
48509
48510 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
48511 err = put_user(compat_ptr(palp), &up_native->palette);
48512 @@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
48513 return -EFAULT;
48514 if (__get_user(udata, &ss32->iomem_base))
48515 return -EFAULT;
48516 - ss.iomem_base = compat_ptr(udata);
48517 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
48518 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
48519 __get_user(ss.port_high, &ss32->port_high))
48520 return -EFAULT;
48521 @@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
48522 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
48523 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
48524 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
48525 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48526 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48527 return -EFAULT;
48528
48529 return ioctl_preallocate(file, p);
48530 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
48531 index 8e48b52..f01ed91 100644
48532 --- a/fs/configfs/dir.c
48533 +++ b/fs/configfs/dir.c
48534 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48535 }
48536 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
48537 struct configfs_dirent *next;
48538 - const char * name;
48539 + const unsigned char * name;
48540 + char d_name[sizeof(next->s_dentry->d_iname)];
48541 int len;
48542
48543 next = list_entry(p, struct configfs_dirent,
48544 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48545 continue;
48546
48547 name = configfs_get_name(next);
48548 - len = strlen(name);
48549 + if (next->s_dentry && name == next->s_dentry->d_iname) {
48550 + len = next->s_dentry->d_name.len;
48551 + memcpy(d_name, name, len);
48552 + name = d_name;
48553 + } else
48554 + len = strlen(name);
48555 if (next->s_dentry)
48556 ino = next->s_dentry->d_inode->i_ino;
48557 else
48558 diff --git a/fs/dcache.c b/fs/dcache.c
48559 index 44c0aea..2529092 100644
48560 --- a/fs/dcache.c
48561 +++ b/fs/dcache.c
48562 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
48563
48564 static struct kmem_cache *dentry_cache __read_mostly;
48565
48566 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
48567 -
48568 /*
48569 * This is the single most critical data structure when it comes
48570 * to the dcache: the hashtable for lookups. Somebody should try
48571 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
48572 mempages -= reserve;
48573
48574 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
48575 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
48576 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
48577
48578 dcache_init();
48579 inode_init();
48580 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
48581 index 39c6ee8..dcee0f1 100644
48582 --- a/fs/debugfs/inode.c
48583 +++ b/fs/debugfs/inode.c
48584 @@ -269,7 +269,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
48585 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
48586 {
48587 return debugfs_create_file(name,
48588 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
48589 + S_IFDIR | S_IRWXU,
48590 +#else
48591 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
48592 +#endif
48593 parent, NULL, NULL);
48594 }
48595 EXPORT_SYMBOL_GPL(debugfs_create_dir);
48596 diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
48597 index c010ecf..a8d8c59 100644
48598 --- a/fs/dlm/lockspace.c
48599 +++ b/fs/dlm/lockspace.c
48600 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
48601 kfree(ls);
48602 }
48603
48604 -static struct sysfs_ops dlm_attr_ops = {
48605 +static const struct sysfs_ops dlm_attr_ops = {
48606 .show = dlm_attr_show,
48607 .store = dlm_attr_store,
48608 };
48609 diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
48610 index 7a5f1ac..62fa913 100644
48611 --- a/fs/ecryptfs/crypto.c
48612 +++ b/fs/ecryptfs/crypto.c
48613 @@ -418,17 +418,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48614 rc);
48615 goto out;
48616 }
48617 - if (unlikely(ecryptfs_verbosity > 0)) {
48618 - ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
48619 - "with iv:\n");
48620 - ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48621 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48622 - "encryption:\n");
48623 - ecryptfs_dump_hex((char *)
48624 - (page_address(page)
48625 - + (extent_offset * crypt_stat->extent_size)),
48626 - 8);
48627 - }
48628 rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
48629 page, (extent_offset
48630 * crypt_stat->extent_size),
48631 @@ -441,14 +430,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48632 goto out;
48633 }
48634 rc = 0;
48635 - if (unlikely(ecryptfs_verbosity > 0)) {
48636 - ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; "
48637 - "rc = [%d]\n", (extent_base + extent_offset),
48638 - rc);
48639 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
48640 - "encryption:\n");
48641 - ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
48642 - }
48643 out:
48644 return rc;
48645 }
48646 @@ -545,17 +526,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
48647 rc);
48648 goto out;
48649 }
48650 - if (unlikely(ecryptfs_verbosity > 0)) {
48651 - ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
48652 - "with iv:\n");
48653 - ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48654 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48655 - "decryption:\n");
48656 - ecryptfs_dump_hex((char *)
48657 - (page_address(enc_extent_page)
48658 - + (extent_offset * crypt_stat->extent_size)),
48659 - 8);
48660 - }
48661 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
48662 (extent_offset
48663 * crypt_stat->extent_size),
48664 @@ -569,16 +539,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
48665 goto out;
48666 }
48667 rc = 0;
48668 - if (unlikely(ecryptfs_verbosity > 0)) {
48669 - ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16x]; "
48670 - "rc = [%d]\n", (extent_base + extent_offset),
48671 - rc);
48672 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
48673 - "decryption:\n");
48674 - ecryptfs_dump_hex((char *)(page_address(page)
48675 - + (extent_offset
48676 - * crypt_stat->extent_size)), 8);
48677 - }
48678 out:
48679 return rc;
48680 }
48681 @@ -1455,6 +1415,25 @@ static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat)
48682 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
48683 }
48684
48685 +void ecryptfs_i_size_init(const char *page_virt, struct inode *inode)
48686 +{
48687 + struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
48688 + struct ecryptfs_crypt_stat *crypt_stat;
48689 + u64 file_size;
48690 +
48691 + crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
48692 + mount_crypt_stat =
48693 + &ecryptfs_superblock_to_private(inode->i_sb)->mount_crypt_stat;
48694 + if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
48695 + file_size = i_size_read(ecryptfs_inode_to_lower(inode));
48696 + if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
48697 + file_size += crypt_stat->num_header_bytes_at_front;
48698 + } else
48699 + file_size = get_unaligned_be64(page_virt);
48700 + i_size_write(inode, (loff_t)file_size);
48701 + crypt_stat->flags |= ECRYPTFS_I_SIZE_INITIALIZED;
48702 +}
48703 +
48704 /**
48705 * ecryptfs_read_headers_virt
48706 * @page_virt: The virtual address into which to read the headers
48707 @@ -1485,6 +1464,8 @@ static int ecryptfs_read_headers_virt(char *page_virt,
48708 rc = -EINVAL;
48709 goto out;
48710 }
48711 + if (!(crypt_stat->flags & ECRYPTFS_I_SIZE_INITIALIZED))
48712 + ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
48713 offset += MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
48714 rc = ecryptfs_process_flags(crypt_stat, (page_virt + offset),
48715 &bytes_read);
48716 diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
48717 index 542f625..9685315 100644
48718 --- a/fs/ecryptfs/ecryptfs_kernel.h
48719 +++ b/fs/ecryptfs/ecryptfs_kernel.h
48720 @@ -270,6 +270,7 @@ struct ecryptfs_crypt_stat {
48721 #define ECRYPTFS_ENCFN_USE_MOUNT_FNEK 0x00001000
48722 #define ECRYPTFS_ENCFN_USE_FEK 0x00002000
48723 #define ECRYPTFS_UNLINK_SIGS 0x00004000
48724 +#define ECRYPTFS_I_SIZE_INITIALIZED 0x00008000
48725 u32 flags;
48726 unsigned int file_version;
48727 size_t iv_bytes;
48728 @@ -619,6 +620,7 @@ struct ecryptfs_open_req {
48729 int ecryptfs_interpose(struct dentry *hidden_dentry,
48730 struct dentry *this_dentry, struct super_block *sb,
48731 u32 flags);
48732 +void ecryptfs_i_size_init(const char *page_virt, struct inode *inode);
48733 int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
48734 struct dentry *lower_dentry,
48735 struct inode *ecryptfs_dir_inode,
48736 diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
48737 index 3015389..502b09f 100644
48738 --- a/fs/ecryptfs/file.c
48739 +++ b/fs/ecryptfs/file.c
48740 @@ -237,7 +237,8 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
48741 goto out_free;
48742 }
48743 rc = 0;
48744 - crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
48745 + crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
48746 + | ECRYPTFS_ENCRYPTED);
48747 mutex_unlock(&crypt_stat->cs_mutex);
48748 goto out;
48749 }
48750 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
48751 index 4434e8f..fa05803 100644
48752 --- a/fs/ecryptfs/inode.c
48753 +++ b/fs/ecryptfs/inode.c
48754 @@ -256,10 +256,8 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
48755 struct dentry *lower_dir_dentry;
48756 struct vfsmount *lower_mnt;
48757 struct inode *lower_inode;
48758 - struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
48759 struct ecryptfs_crypt_stat *crypt_stat;
48760 char *page_virt = NULL;
48761 - u64 file_size;
48762 int rc = 0;
48763
48764 lower_dir_dentry = lower_dentry->d_parent;
48765 @@ -334,18 +332,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
48766 }
48767 crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
48768 }
48769 - mount_crypt_stat = &ecryptfs_superblock_to_private(
48770 - ecryptfs_dentry->d_sb)->mount_crypt_stat;
48771 - if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
48772 - if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
48773 - file_size = (crypt_stat->num_header_bytes_at_front
48774 - + i_size_read(lower_dentry->d_inode));
48775 - else
48776 - file_size = i_size_read(lower_dentry->d_inode);
48777 - } else {
48778 - file_size = get_unaligned_be64(page_virt);
48779 - }
48780 - i_size_write(ecryptfs_dentry->d_inode, (loff_t)file_size);
48781 + ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
48782 out_free_kmem:
48783 kmem_cache_free(ecryptfs_header_cache_2, page_virt);
48784 goto out;
48785 @@ -660,7 +647,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
48786 old_fs = get_fs();
48787 set_fs(get_ds());
48788 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
48789 - (char __user *)lower_buf,
48790 + (char __force_user *)lower_buf,
48791 lower_bufsiz);
48792 set_fs(old_fs);
48793 if (rc < 0)
48794 @@ -706,7 +693,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48795 }
48796 old_fs = get_fs();
48797 set_fs(get_ds());
48798 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
48799 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
48800 set_fs(old_fs);
48801 if (rc < 0)
48802 goto out_free;
48803 @@ -964,7 +951,8 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
48804 goto out;
48805 }
48806 rc = 0;
48807 - crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
48808 + crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
48809 + | ECRYPTFS_ENCRYPTED);
48810 }
48811 }
48812 mutex_unlock(&crypt_stat->cs_mutex);
48813 diff --git a/fs/exec.c b/fs/exec.c
48814 index 86fafc6..6272c0e 100644
48815 --- a/fs/exec.c
48816 +++ b/fs/exec.c
48817 @@ -56,12 +56,28 @@
48818 #include <linux/fsnotify.h>
48819 #include <linux/fs_struct.h>
48820 #include <linux/pipe_fs_i.h>
48821 +#include <linux/random.h>
48822 +#include <linux/seq_file.h>
48823 +
48824 +#ifdef CONFIG_PAX_REFCOUNT
48825 +#include <linux/kallsyms.h>
48826 +#include <linux/kdebug.h>
48827 +#endif
48828
48829 #include <asm/uaccess.h>
48830 #include <asm/mmu_context.h>
48831 #include <asm/tlb.h>
48832 #include "internal.h"
48833
48834 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
48835 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
48836 +#endif
48837 +
48838 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
48839 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
48840 +EXPORT_SYMBOL(pax_set_initial_flags_func);
48841 +#endif
48842 +
48843 int core_uses_pid;
48844 char core_pattern[CORENAME_MAX_SIZE] = "core";
48845 unsigned int core_pipe_limit;
48846 @@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
48847 int write)
48848 {
48849 struct page *page;
48850 - int ret;
48851
48852 -#ifdef CONFIG_STACK_GROWSUP
48853 - if (write) {
48854 - ret = expand_stack_downwards(bprm->vma, pos);
48855 - if (ret < 0)
48856 - return NULL;
48857 - }
48858 -#endif
48859 - ret = get_user_pages(current, bprm->mm, pos,
48860 - 1, write, 1, &page, NULL);
48861 - if (ret <= 0)
48862 + if (0 > expand_stack_downwards(bprm->vma, pos))
48863 + return NULL;
48864 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
48865 return NULL;
48866
48867 if (write) {
48868 @@ -205,6 +213,17 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
48869 if (size <= ARG_MAX)
48870 return page;
48871
48872 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48873 + // only allow 1MB for argv+env on suid/sgid binaries
48874 + // to prevent easy ASLR exhaustion
48875 + if (((bprm->cred->euid != current_euid()) ||
48876 + (bprm->cred->egid != current_egid())) &&
48877 + (size > (1024 * 1024))) {
48878 + put_page(page);
48879 + return NULL;
48880 + }
48881 +#endif
48882 +
48883 /*
48884 * Limit to 1/4-th the stack size for the argv+env strings.
48885 * This ensures that:
48886 @@ -263,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48887 vma->vm_end = STACK_TOP_MAX;
48888 vma->vm_start = vma->vm_end - PAGE_SIZE;
48889 vma->vm_flags = VM_STACK_FLAGS;
48890 +
48891 +#ifdef CONFIG_PAX_SEGMEXEC
48892 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
48893 +#endif
48894 +
48895 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
48896
48897 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
48898 @@ -276,6 +300,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48899 mm->stack_vm = mm->total_vm = 1;
48900 up_write(&mm->mmap_sem);
48901 bprm->p = vma->vm_end - sizeof(void *);
48902 +
48903 +#ifdef CONFIG_PAX_RANDUSTACK
48904 + if (randomize_va_space)
48905 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
48906 +#endif
48907 +
48908 return 0;
48909 err:
48910 up_write(&mm->mmap_sem);
48911 @@ -510,7 +540,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
48912 int r;
48913 mm_segment_t oldfs = get_fs();
48914 set_fs(KERNEL_DS);
48915 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
48916 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
48917 set_fs(oldfs);
48918 return r;
48919 }
48920 @@ -540,7 +570,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48921 unsigned long new_end = old_end - shift;
48922 struct mmu_gather *tlb;
48923
48924 - BUG_ON(new_start > new_end);
48925 + if (new_start >= new_end || new_start < mmap_min_addr)
48926 + return -ENOMEM;
48927
48928 /*
48929 * ensure there are no vmas between where we want to go
48930 @@ -549,6 +580,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48931 if (vma != find_vma(mm, new_start))
48932 return -EFAULT;
48933
48934 +#ifdef CONFIG_PAX_SEGMEXEC
48935 + BUG_ON(pax_find_mirror_vma(vma));
48936 +#endif
48937 +
48938 /*
48939 * cover the whole range: [new_start, old_end)
48940 */
48941 @@ -630,10 +665,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
48942 stack_top = arch_align_stack(stack_top);
48943 stack_top = PAGE_ALIGN(stack_top);
48944
48945 - if (unlikely(stack_top < mmap_min_addr) ||
48946 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
48947 - return -ENOMEM;
48948 -
48949 stack_shift = vma->vm_end - stack_top;
48950
48951 bprm->p -= stack_shift;
48952 @@ -645,6 +676,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
48953 bprm->exec -= stack_shift;
48954
48955 down_write(&mm->mmap_sem);
48956 +
48957 + /* Move stack pages down in memory. */
48958 + if (stack_shift) {
48959 + ret = shift_arg_pages(vma, stack_shift);
48960 + if (ret)
48961 + goto out_unlock;
48962 + }
48963 +
48964 vm_flags = VM_STACK_FLAGS;
48965
48966 /*
48967 @@ -658,19 +697,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
48968 vm_flags &= ~VM_EXEC;
48969 vm_flags |= mm->def_flags;
48970
48971 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48972 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48973 + vm_flags &= ~VM_EXEC;
48974 +
48975 +#ifdef CONFIG_PAX_MPROTECT
48976 + if (mm->pax_flags & MF_PAX_MPROTECT)
48977 + vm_flags &= ~VM_MAYEXEC;
48978 +#endif
48979 +
48980 + }
48981 +#endif
48982 +
48983 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
48984 vm_flags);
48985 if (ret)
48986 goto out_unlock;
48987 BUG_ON(prev != vma);
48988
48989 - /* Move stack pages down in memory. */
48990 - if (stack_shift) {
48991 - ret = shift_arg_pages(vma, stack_shift);
48992 - if (ret)
48993 - goto out_unlock;
48994 - }
48995 -
48996 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
48997 stack_size = vma->vm_end - vma->vm_start;
48998 /*
48999 @@ -744,7 +788,7 @@ int kernel_read(struct file *file, loff_t offset,
49000 old_fs = get_fs();
49001 set_fs(get_ds());
49002 /* The cast to a user pointer is valid due to the set_fs() */
49003 - result = vfs_read(file, (void __user *)addr, count, &pos);
49004 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
49005 set_fs(old_fs);
49006 return result;
49007 }
49008 @@ -985,6 +1029,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
49009 perf_event_comm(tsk);
49010 }
49011
49012 +static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
49013 +{
49014 + int i, ch;
49015 +
49016 + /* Copies the binary name from after last slash */
49017 + for (i = 0; (ch = *(fn++)) != '\0';) {
49018 + if (ch == '/')
49019 + i = 0; /* overwrite what we wrote */
49020 + else
49021 + if (i < len - 1)
49022 + tcomm[i++] = ch;
49023 + }
49024 + tcomm[i] = '\0';
49025 +}
49026 +
49027 int flush_old_exec(struct linux_binprm * bprm)
49028 {
49029 int retval;
49030 @@ -999,6 +1058,7 @@ int flush_old_exec(struct linux_binprm * bprm)
49031
49032 set_mm_exe_file(bprm->mm, bprm->file);
49033
49034 + filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
49035 /*
49036 * Release all of the old mmap stuff
49037 */
49038 @@ -1023,10 +1083,6 @@ EXPORT_SYMBOL(flush_old_exec);
49039
49040 void setup_new_exec(struct linux_binprm * bprm)
49041 {
49042 - int i, ch;
49043 - char * name;
49044 - char tcomm[sizeof(current->comm)];
49045 -
49046 arch_pick_mmap_layout(current->mm);
49047
49048 /* This is the point of no return */
49049 @@ -1037,18 +1093,7 @@ void setup_new_exec(struct linux_binprm * bprm)
49050 else
49051 set_dumpable(current->mm, suid_dumpable);
49052
49053 - name = bprm->filename;
49054 -
49055 - /* Copies the binary name from after last slash */
49056 - for (i=0; (ch = *(name++)) != '\0';) {
49057 - if (ch == '/')
49058 - i = 0; /* overwrite what we wrote */
49059 - else
49060 - if (i < (sizeof(tcomm) - 1))
49061 - tcomm[i++] = ch;
49062 - }
49063 - tcomm[i] = '\0';
49064 - set_task_comm(current, tcomm);
49065 + set_task_comm(current, bprm->tcomm);
49066
49067 /* Set the new mm task size. We have to do that late because it may
49068 * depend on TIF_32BIT which is only updated in flush_thread() on
49069 @@ -1152,7 +1197,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
49070 }
49071 rcu_read_unlock();
49072
49073 - if (p->fs->users > n_fs) {
49074 + if (atomic_read(&p->fs->users) > n_fs) {
49075 bprm->unsafe |= LSM_UNSAFE_SHARE;
49076 } else {
49077 res = -EAGAIN;
49078 @@ -1339,6 +1384,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
49079
49080 EXPORT_SYMBOL(search_binary_handler);
49081
49082 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49083 +atomic64_unchecked_t global_exec_counter = ATOMIC64_INIT(0);
49084 +#endif
49085 +
49086 /*
49087 * sys_execve() executes a new program.
49088 */
49089 @@ -1347,11 +1396,35 @@ int do_execve(char * filename,
49090 char __user *__user *envp,
49091 struct pt_regs * regs)
49092 {
49093 +#ifdef CONFIG_GRKERNSEC
49094 + struct file *old_exec_file;
49095 + struct acl_subject_label *old_acl;
49096 + struct rlimit old_rlim[RLIM_NLIMITS];
49097 +#endif
49098 struct linux_binprm *bprm;
49099 struct file *file;
49100 struct files_struct *displaced;
49101 bool clear_in_exec;
49102 int retval;
49103 + const struct cred *cred = current_cred();
49104 +
49105 + /*
49106 + * We move the actual failure in case of RLIMIT_NPROC excess from
49107 + * set*uid() to execve() because too many poorly written programs
49108 + * don't check setuid() return code. Here we additionally recheck
49109 + * whether NPROC limit is still exceeded.
49110 + */
49111 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
49112 +
49113 + if ((current->flags & PF_NPROC_EXCEEDED) &&
49114 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
49115 + retval = -EAGAIN;
49116 + goto out_ret;
49117 + }
49118 +
49119 + /* We're below the limit (still or again), so we don't want to make
49120 + * further execve() calls fail. */
49121 + current->flags &= ~PF_NPROC_EXCEEDED;
49122
49123 retval = unshare_files(&displaced);
49124 if (retval)
49125 @@ -1377,12 +1450,27 @@ int do_execve(char * filename,
49126 if (IS_ERR(file))
49127 goto out_unmark;
49128
49129 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
49130 + retval = -EPERM;
49131 + goto out_file;
49132 + }
49133 +
49134 sched_exec();
49135
49136 bprm->file = file;
49137 bprm->filename = filename;
49138 bprm->interp = filename;
49139
49140 + if (gr_process_user_ban()) {
49141 + retval = -EPERM;
49142 + goto out_file;
49143 + }
49144 +
49145 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
49146 + retval = -EACCES;
49147 + goto out_file;
49148 + }
49149 +
49150 retval = bprm_mm_init(bprm);
49151 if (retval)
49152 goto out_file;
49153 @@ -1412,12 +1500,47 @@ int do_execve(char * filename,
49154 if (retval < 0)
49155 goto out;
49156
49157 + if (!gr_tpe_allow(file)) {
49158 + retval = -EACCES;
49159 + goto out;
49160 + }
49161 +
49162 + if (gr_check_crash_exec(file)) {
49163 + retval = -EACCES;
49164 + goto out;
49165 + }
49166 +
49167 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
49168 +
49169 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
49170 +
49171 +#ifdef CONFIG_GRKERNSEC
49172 + old_acl = current->acl;
49173 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
49174 + old_exec_file = current->exec_file;
49175 + get_file(file);
49176 + current->exec_file = file;
49177 +#endif
49178 +
49179 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
49180 + bprm->unsafe);
49181 + if (retval < 0)
49182 + goto out_fail;
49183 +
49184 current->flags &= ~PF_KTHREAD;
49185 retval = search_binary_handler(bprm,regs);
49186 if (retval < 0)
49187 - goto out;
49188 + goto out_fail;
49189 +#ifdef CONFIG_GRKERNSEC
49190 + if (old_exec_file)
49191 + fput(old_exec_file);
49192 +#endif
49193
49194 /* execve succeeded */
49195 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49196 + current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
49197 +#endif
49198 +
49199 current->fs->in_exec = 0;
49200 current->in_execve = 0;
49201 acct_update_integrals(current);
49202 @@ -1426,6 +1549,14 @@ int do_execve(char * filename,
49203 put_files_struct(displaced);
49204 return retval;
49205
49206 +out_fail:
49207 +#ifdef CONFIG_GRKERNSEC
49208 + current->acl = old_acl;
49209 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
49210 + fput(current->exec_file);
49211 + current->exec_file = old_exec_file;
49212 +#endif
49213 +
49214 out:
49215 if (bprm->mm) {
49216 acct_arg_size(bprm, 0);
49217 @@ -1591,6 +1722,220 @@ out:
49218 return ispipe;
49219 }
49220
49221 +int pax_check_flags(unsigned long *flags)
49222 +{
49223 + int retval = 0;
49224 +
49225 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
49226 + if (*flags & MF_PAX_SEGMEXEC)
49227 + {
49228 + *flags &= ~MF_PAX_SEGMEXEC;
49229 + retval = -EINVAL;
49230 + }
49231 +#endif
49232 +
49233 + if ((*flags & MF_PAX_PAGEEXEC)
49234 +
49235 +#ifdef CONFIG_PAX_PAGEEXEC
49236 + && (*flags & MF_PAX_SEGMEXEC)
49237 +#endif
49238 +
49239 + )
49240 + {
49241 + *flags &= ~MF_PAX_PAGEEXEC;
49242 + retval = -EINVAL;
49243 + }
49244 +
49245 + if ((*flags & MF_PAX_MPROTECT)
49246 +
49247 +#ifdef CONFIG_PAX_MPROTECT
49248 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
49249 +#endif
49250 +
49251 + )
49252 + {
49253 + *flags &= ~MF_PAX_MPROTECT;
49254 + retval = -EINVAL;
49255 + }
49256 +
49257 + if ((*flags & MF_PAX_EMUTRAMP)
49258 +
49259 +#ifdef CONFIG_PAX_EMUTRAMP
49260 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
49261 +#endif
49262 +
49263 + )
49264 + {
49265 + *flags &= ~MF_PAX_EMUTRAMP;
49266 + retval = -EINVAL;
49267 + }
49268 +
49269 + return retval;
49270 +}
49271 +
49272 +EXPORT_SYMBOL(pax_check_flags);
49273 +
49274 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
49275 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
49276 +{
49277 + struct task_struct *tsk = current;
49278 + struct mm_struct *mm = current->mm;
49279 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
49280 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
49281 + char *path_exec = NULL;
49282 + char *path_fault = NULL;
49283 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
49284 +
49285 + if (buffer_exec && buffer_fault) {
49286 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
49287 +
49288 + down_read(&mm->mmap_sem);
49289 + vma = mm->mmap;
49290 + while (vma && (!vma_exec || !vma_fault)) {
49291 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
49292 + vma_exec = vma;
49293 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
49294 + vma_fault = vma;
49295 + vma = vma->vm_next;
49296 + }
49297 + if (vma_exec) {
49298 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
49299 + if (IS_ERR(path_exec))
49300 + path_exec = "<path too long>";
49301 + else {
49302 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
49303 + if (path_exec) {
49304 + *path_exec = 0;
49305 + path_exec = buffer_exec;
49306 + } else
49307 + path_exec = "<path too long>";
49308 + }
49309 + }
49310 + if (vma_fault) {
49311 + start = vma_fault->vm_start;
49312 + end = vma_fault->vm_end;
49313 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
49314 + if (vma_fault->vm_file) {
49315 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
49316 + if (IS_ERR(path_fault))
49317 + path_fault = "<path too long>";
49318 + else {
49319 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
49320 + if (path_fault) {
49321 + *path_fault = 0;
49322 + path_fault = buffer_fault;
49323 + } else
49324 + path_fault = "<path too long>";
49325 + }
49326 + } else
49327 + path_fault = "<anonymous mapping>";
49328 + }
49329 + up_read(&mm->mmap_sem);
49330 + }
49331 + if (tsk->signal->curr_ip)
49332 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
49333 + else
49334 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
49335 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
49336 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
49337 + task_uid(tsk), task_euid(tsk), pc, sp);
49338 + free_page((unsigned long)buffer_exec);
49339 + free_page((unsigned long)buffer_fault);
49340 + pax_report_insns(regs, pc, sp);
49341 + do_coredump(SIGKILL, SIGKILL, regs);
49342 +}
49343 +#endif
49344 +
49345 +#ifdef CONFIG_PAX_REFCOUNT
49346 +void pax_report_refcount_overflow(struct pt_regs *regs)
49347 +{
49348 + if (current->signal->curr_ip)
49349 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
49350 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
49351 + else
49352 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
49353 + current->comm, task_pid_nr(current), current_uid(), current_euid());
49354 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
49355 + show_regs(regs);
49356 + force_sig_specific(SIGKILL, current);
49357 +}
49358 +#endif
49359 +
49360 +#ifdef CONFIG_PAX_USERCOPY
49361 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
49362 +int object_is_on_stack(const void *obj, unsigned long len)
49363 +{
49364 + const void * const stack = task_stack_page(current);
49365 + const void * const stackend = stack + THREAD_SIZE;
49366 +
49367 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
49368 + const void *frame = NULL;
49369 + const void *oldframe;
49370 +#endif
49371 +
49372 + if (obj + len < obj)
49373 + return -1;
49374 +
49375 + if (obj + len <= stack || stackend <= obj)
49376 + return 0;
49377 +
49378 + if (obj < stack || stackend < obj + len)
49379 + return -1;
49380 +
49381 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
49382 + oldframe = __builtin_frame_address(1);
49383 + if (oldframe)
49384 + frame = __builtin_frame_address(2);
49385 + /*
49386 + low ----------------------------------------------> high
49387 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
49388 + ^----------------^
49389 + allow copies only within here
49390 + */
49391 + while (stack <= frame && frame < stackend) {
49392 + /* if obj + len extends past the last frame, this
49393 + check won't pass and the next frame will be 0,
49394 + causing us to bail out and correctly report
49395 + the copy as invalid
49396 + */
49397 + if (obj + len <= frame)
49398 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
49399 + oldframe = frame;
49400 + frame = *(const void * const *)frame;
49401 + }
49402 + return -1;
49403 +#else
49404 + return 1;
49405 +#endif
49406 +}
49407 +
49408 +
49409 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
49410 +{
49411 + if (current->signal->curr_ip)
49412 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
49413 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
49414 + else
49415 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
49416 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
49417 +
49418 + dump_stack();
49419 + gr_handle_kernel_exploit();
49420 + do_group_exit(SIGKILL);
49421 +}
49422 +#endif
49423 +
49424 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
49425 +void pax_track_stack(void)
49426 +{
49427 + unsigned long sp = (unsigned long)&sp;
49428 + if (sp < current_thread_info()->lowest_stack &&
49429 + sp > (unsigned long)task_stack_page(current))
49430 + current_thread_info()->lowest_stack = sp;
49431 +}
49432 +EXPORT_SYMBOL(pax_track_stack);
49433 +#endif
49434 +
49435 static int zap_process(struct task_struct *start)
49436 {
49437 struct task_struct *t;
49438 @@ -1793,17 +2138,17 @@ static void wait_for_dump_helpers(struct file *file)
49439 pipe = file->f_path.dentry->d_inode->i_pipe;
49440
49441 pipe_lock(pipe);
49442 - pipe->readers++;
49443 - pipe->writers--;
49444 + atomic_inc(&pipe->readers);
49445 + atomic_dec(&pipe->writers);
49446
49447 - while ((pipe->readers > 1) && (!signal_pending(current))) {
49448 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
49449 wake_up_interruptible_sync(&pipe->wait);
49450 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
49451 pipe_wait(pipe);
49452 }
49453
49454 - pipe->readers--;
49455 - pipe->writers++;
49456 + atomic_dec(&pipe->readers);
49457 + atomic_inc(&pipe->writers);
49458 pipe_unlock(pipe);
49459
49460 }
49461 @@ -1826,10 +2171,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49462 char **helper_argv = NULL;
49463 int helper_argc = 0;
49464 int dump_count = 0;
49465 - static atomic_t core_dump_count = ATOMIC_INIT(0);
49466 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
49467
49468 audit_core_dumps(signr);
49469
49470 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
49471 + gr_handle_brute_attach(current, mm->flags);
49472 +
49473 binfmt = mm->binfmt;
49474 if (!binfmt || !binfmt->core_dump)
49475 goto fail;
49476 @@ -1874,6 +2222,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49477 */
49478 clear_thread_flag(TIF_SIGPENDING);
49479
49480 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
49481 +
49482 /*
49483 * lock_kernel() because format_corename() is controlled by sysctl, which
49484 * uses lock_kernel()
49485 @@ -1908,7 +2258,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49486 goto fail_unlock;
49487 }
49488
49489 - dump_count = atomic_inc_return(&core_dump_count);
49490 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
49491 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
49492 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
49493 task_tgid_vnr(current), current->comm);
49494 @@ -1972,7 +2322,7 @@ close_fail:
49495 filp_close(file, NULL);
49496 fail_dropcount:
49497 if (dump_count)
49498 - atomic_dec(&core_dump_count);
49499 + atomic_dec_unchecked(&core_dump_count);
49500 fail_unlock:
49501 if (helper_argv)
49502 argv_free(helper_argv);
49503 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
49504 index 7f8d2e5..a1abdbb 100644
49505 --- a/fs/ext2/balloc.c
49506 +++ b/fs/ext2/balloc.c
49507 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
49508
49509 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49510 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49511 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49512 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49513 sbi->s_resuid != current_fsuid() &&
49514 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49515 return 0;
49516 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
49517 index 27967f9..9f2a5fb 100644
49518 --- a/fs/ext3/balloc.c
49519 +++ b/fs/ext3/balloc.c
49520 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
49521
49522 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49523 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49524 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49525 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49526 sbi->s_resuid != current_fsuid() &&
49527 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49528 return 0;
49529 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
49530 index e85b63c..80398e6 100644
49531 --- a/fs/ext4/balloc.c
49532 +++ b/fs/ext4/balloc.c
49533 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
49534 /* Hm, nope. Are (enough) root reserved blocks available? */
49535 if (sbi->s_resuid == current_fsuid() ||
49536 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
49537 - capable(CAP_SYS_RESOURCE)) {
49538 + capable_nolog(CAP_SYS_RESOURCE)) {
49539 if (free_blocks >= (nblocks + dirty_blocks))
49540 return 1;
49541 }
49542 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
49543 index 67c46ed..1f237e5 100644
49544 --- a/fs/ext4/ext4.h
49545 +++ b/fs/ext4/ext4.h
49546 @@ -1077,19 +1077,19 @@ struct ext4_sb_info {
49547
49548 /* stats for buddy allocator */
49549 spinlock_t s_mb_pa_lock;
49550 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
49551 - atomic_t s_bal_success; /* we found long enough chunks */
49552 - atomic_t s_bal_allocated; /* in blocks */
49553 - atomic_t s_bal_ex_scanned; /* total extents scanned */
49554 - atomic_t s_bal_goals; /* goal hits */
49555 - atomic_t s_bal_breaks; /* too long searches */
49556 - atomic_t s_bal_2orders; /* 2^order hits */
49557 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
49558 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
49559 + atomic_unchecked_t s_bal_allocated; /* in blocks */
49560 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
49561 + atomic_unchecked_t s_bal_goals; /* goal hits */
49562 + atomic_unchecked_t s_bal_breaks; /* too long searches */
49563 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
49564 spinlock_t s_bal_lock;
49565 unsigned long s_mb_buddies_generated;
49566 unsigned long long s_mb_generation_time;
49567 - atomic_t s_mb_lost_chunks;
49568 - atomic_t s_mb_preallocated;
49569 - atomic_t s_mb_discarded;
49570 + atomic_unchecked_t s_mb_lost_chunks;
49571 + atomic_unchecked_t s_mb_preallocated;
49572 + atomic_unchecked_t s_mb_discarded;
49573 atomic_t s_lock_busy;
49574
49575 /* locality groups */
49576 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
49577 index 2a60541..7439d61 100644
49578 --- a/fs/ext4/file.c
49579 +++ b/fs/ext4/file.c
49580 @@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
49581 cp = d_path(&path, buf, sizeof(buf));
49582 path_put(&path);
49583 if (!IS_ERR(cp)) {
49584 - memcpy(sbi->s_es->s_last_mounted, cp,
49585 - sizeof(sbi->s_es->s_last_mounted));
49586 + strlcpy(sbi->s_es->s_last_mounted, cp,
49587 + sizeof(sbi->s_es->s_last_mounted));
49588 sb->s_dirt = 1;
49589 }
49590 }
49591 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
49592 index 42bac1b..0aab9d8 100644
49593 --- a/fs/ext4/mballoc.c
49594 +++ b/fs/ext4/mballoc.c
49595 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
49596 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
49597
49598 if (EXT4_SB(sb)->s_mb_stats)
49599 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
49600 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
49601
49602 break;
49603 }
49604 @@ -2131,7 +2131,7 @@ repeat:
49605 ac->ac_status = AC_STATUS_CONTINUE;
49606 ac->ac_flags |= EXT4_MB_HINT_FIRST;
49607 cr = 3;
49608 - atomic_inc(&sbi->s_mb_lost_chunks);
49609 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
49610 goto repeat;
49611 }
49612 }
49613 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
49614 ext4_grpblk_t counters[16];
49615 } sg;
49616
49617 + pax_track_stack();
49618 +
49619 group--;
49620 if (group == 0)
49621 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
49622 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
49623 if (sbi->s_mb_stats) {
49624 printk(KERN_INFO
49625 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
49626 - atomic_read(&sbi->s_bal_allocated),
49627 - atomic_read(&sbi->s_bal_reqs),
49628 - atomic_read(&sbi->s_bal_success));
49629 + atomic_read_unchecked(&sbi->s_bal_allocated),
49630 + atomic_read_unchecked(&sbi->s_bal_reqs),
49631 + atomic_read_unchecked(&sbi->s_bal_success));
49632 printk(KERN_INFO
49633 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
49634 "%u 2^N hits, %u breaks, %u lost\n",
49635 - atomic_read(&sbi->s_bal_ex_scanned),
49636 - atomic_read(&sbi->s_bal_goals),
49637 - atomic_read(&sbi->s_bal_2orders),
49638 - atomic_read(&sbi->s_bal_breaks),
49639 - atomic_read(&sbi->s_mb_lost_chunks));
49640 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
49641 + atomic_read_unchecked(&sbi->s_bal_goals),
49642 + atomic_read_unchecked(&sbi->s_bal_2orders),
49643 + atomic_read_unchecked(&sbi->s_bal_breaks),
49644 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
49645 printk(KERN_INFO
49646 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
49647 sbi->s_mb_buddies_generated++,
49648 sbi->s_mb_generation_time);
49649 printk(KERN_INFO
49650 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
49651 - atomic_read(&sbi->s_mb_preallocated),
49652 - atomic_read(&sbi->s_mb_discarded));
49653 + atomic_read_unchecked(&sbi->s_mb_preallocated),
49654 + atomic_read_unchecked(&sbi->s_mb_discarded));
49655 }
49656
49657 free_percpu(sbi->s_locality_groups);
49658 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
49659 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
49660
49661 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
49662 - atomic_inc(&sbi->s_bal_reqs);
49663 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49664 + atomic_inc_unchecked(&sbi->s_bal_reqs);
49665 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49666 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
49667 - atomic_inc(&sbi->s_bal_success);
49668 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
49669 + atomic_inc_unchecked(&sbi->s_bal_success);
49670 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
49671 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
49672 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
49673 - atomic_inc(&sbi->s_bal_goals);
49674 + atomic_inc_unchecked(&sbi->s_bal_goals);
49675 if (ac->ac_found > sbi->s_mb_max_to_scan)
49676 - atomic_inc(&sbi->s_bal_breaks);
49677 + atomic_inc_unchecked(&sbi->s_bal_breaks);
49678 }
49679
49680 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
49681 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
49682 trace_ext4_mb_new_inode_pa(ac, pa);
49683
49684 ext4_mb_use_inode_pa(ac, pa);
49685 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49686 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49687
49688 ei = EXT4_I(ac->ac_inode);
49689 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49690 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
49691 trace_ext4_mb_new_group_pa(ac, pa);
49692
49693 ext4_mb_use_group_pa(ac, pa);
49694 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49695 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49696
49697 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49698 lg = ac->ac_lg;
49699 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
49700 * from the bitmap and continue.
49701 */
49702 }
49703 - atomic_add(free, &sbi->s_mb_discarded);
49704 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
49705
49706 return err;
49707 }
49708 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
49709 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
49710 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
49711 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
49712 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49713 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49714
49715 if (ac) {
49716 ac->ac_sb = sb;
49717 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
49718 index f1e7077..edd86b2 100644
49719 --- a/fs/ext4/super.c
49720 +++ b/fs/ext4/super.c
49721 @@ -2286,7 +2286,7 @@ static void ext4_sb_release(struct kobject *kobj)
49722 }
49723
49724
49725 -static struct sysfs_ops ext4_attr_ops = {
49726 +static const struct sysfs_ops ext4_attr_ops = {
49727 .show = ext4_attr_show,
49728 .store = ext4_attr_store,
49729 };
49730 diff --git a/fs/fcntl.c b/fs/fcntl.c
49731 index 97e01dc..e9aab2d 100644
49732 --- a/fs/fcntl.c
49733 +++ b/fs/fcntl.c
49734 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
49735 if (err)
49736 return err;
49737
49738 + if (gr_handle_chroot_fowner(pid, type))
49739 + return -ENOENT;
49740 + if (gr_check_protected_task_fowner(pid, type))
49741 + return -EACCES;
49742 +
49743 f_modown(filp, pid, type, force);
49744 return 0;
49745 }
49746 @@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
49747
49748 static int f_setown_ex(struct file *filp, unsigned long arg)
49749 {
49750 - struct f_owner_ex * __user owner_p = (void * __user)arg;
49751 + struct f_owner_ex __user *owner_p = (void __user *)arg;
49752 struct f_owner_ex owner;
49753 struct pid *pid;
49754 int type;
49755 @@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
49756
49757 static int f_getown_ex(struct file *filp, unsigned long arg)
49758 {
49759 - struct f_owner_ex * __user owner_p = (void * __user)arg;
49760 + struct f_owner_ex __user *owner_p = (void __user *)arg;
49761 struct f_owner_ex owner;
49762 int ret = 0;
49763
49764 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
49765 switch (cmd) {
49766 case F_DUPFD:
49767 case F_DUPFD_CLOEXEC:
49768 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
49769 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49770 break;
49771 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
49772 diff --git a/fs/fifo.c b/fs/fifo.c
49773 index f8f97b8..b1f2259 100644
49774 --- a/fs/fifo.c
49775 +++ b/fs/fifo.c
49776 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
49777 */
49778 filp->f_op = &read_pipefifo_fops;
49779 pipe->r_counter++;
49780 - if (pipe->readers++ == 0)
49781 + if (atomic_inc_return(&pipe->readers) == 1)
49782 wake_up_partner(inode);
49783
49784 - if (!pipe->writers) {
49785 + if (!atomic_read(&pipe->writers)) {
49786 if ((filp->f_flags & O_NONBLOCK)) {
49787 /* suppress POLLHUP until we have
49788 * seen a writer */
49789 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
49790 * errno=ENXIO when there is no process reading the FIFO.
49791 */
49792 ret = -ENXIO;
49793 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
49794 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
49795 goto err;
49796
49797 filp->f_op = &write_pipefifo_fops;
49798 pipe->w_counter++;
49799 - if (!pipe->writers++)
49800 + if (atomic_inc_return(&pipe->writers) == 1)
49801 wake_up_partner(inode);
49802
49803 - if (!pipe->readers) {
49804 + if (!atomic_read(&pipe->readers)) {
49805 wait_for_partner(inode, &pipe->r_counter);
49806 if (signal_pending(current))
49807 goto err_wr;
49808 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
49809 */
49810 filp->f_op = &rdwr_pipefifo_fops;
49811
49812 - pipe->readers++;
49813 - pipe->writers++;
49814 + atomic_inc(&pipe->readers);
49815 + atomic_inc(&pipe->writers);
49816 pipe->r_counter++;
49817 pipe->w_counter++;
49818 - if (pipe->readers == 1 || pipe->writers == 1)
49819 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
49820 wake_up_partner(inode);
49821 break;
49822
49823 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
49824 return 0;
49825
49826 err_rd:
49827 - if (!--pipe->readers)
49828 + if (atomic_dec_and_test(&pipe->readers))
49829 wake_up_interruptible(&pipe->wait);
49830 ret = -ERESTARTSYS;
49831 goto err;
49832
49833 err_wr:
49834 - if (!--pipe->writers)
49835 + if (atomic_dec_and_test(&pipe->writers))
49836 wake_up_interruptible(&pipe->wait);
49837 ret = -ERESTARTSYS;
49838 goto err;
49839
49840 err:
49841 - if (!pipe->readers && !pipe->writers)
49842 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
49843 free_pipe_info(inode);
49844
49845 err_nocleanup:
49846 diff --git a/fs/file.c b/fs/file.c
49847 index 87e1290..a930cc4 100644
49848 --- a/fs/file.c
49849 +++ b/fs/file.c
49850 @@ -14,6 +14,7 @@
49851 #include <linux/slab.h>
49852 #include <linux/vmalloc.h>
49853 #include <linux/file.h>
49854 +#include <linux/security.h>
49855 #include <linux/fdtable.h>
49856 #include <linux/bitops.h>
49857 #include <linux/interrupt.h>
49858 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
49859 * N.B. For clone tasks sharing a files structure, this test
49860 * will limit the total number of files that can be opened.
49861 */
49862 +
49863 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
49864 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49865 return -EMFILE;
49866
49867 diff --git a/fs/filesystems.c b/fs/filesystems.c
49868 index a24c58e..53f91ee 100644
49869 --- a/fs/filesystems.c
49870 +++ b/fs/filesystems.c
49871 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
49872 int len = dot ? dot - name : strlen(name);
49873
49874 fs = __get_fs_type(name, len);
49875 +
49876 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
49877 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
49878 +#else
49879 if (!fs && (request_module("%.*s", len, name) == 0))
49880 +#endif
49881 fs = __get_fs_type(name, len);
49882
49883 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
49884 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
49885 index eee0590..1181166 100644
49886 --- a/fs/fs_struct.c
49887 +++ b/fs/fs_struct.c
49888 @@ -4,6 +4,7 @@
49889 #include <linux/path.h>
49890 #include <linux/slab.h>
49891 #include <linux/fs_struct.h>
49892 +#include <linux/grsecurity.h>
49893
49894 /*
49895 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
49896 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
49897 old_root = fs->root;
49898 fs->root = *path;
49899 path_get(path);
49900 + gr_set_chroot_entries(current, path);
49901 write_unlock(&fs->lock);
49902 if (old_root.dentry)
49903 path_put(&old_root);
49904 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
49905 && fs->root.mnt == old_root->mnt) {
49906 path_get(new_root);
49907 fs->root = *new_root;
49908 + gr_set_chroot_entries(p, new_root);
49909 count++;
49910 }
49911 if (fs->pwd.dentry == old_root->dentry
49912 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
49913 task_lock(tsk);
49914 write_lock(&fs->lock);
49915 tsk->fs = NULL;
49916 - kill = !--fs->users;
49917 + gr_clear_chroot_entries(tsk);
49918 + kill = !atomic_dec_return(&fs->users);
49919 write_unlock(&fs->lock);
49920 task_unlock(tsk);
49921 if (kill)
49922 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
49923 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
49924 /* We don't need to lock fs - think why ;-) */
49925 if (fs) {
49926 - fs->users = 1;
49927 + atomic_set(&fs->users, 1);
49928 fs->in_exec = 0;
49929 rwlock_init(&fs->lock);
49930 fs->umask = old->umask;
49931 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
49932
49933 task_lock(current);
49934 write_lock(&fs->lock);
49935 - kill = !--fs->users;
49936 + kill = !atomic_dec_return(&fs->users);
49937 current->fs = new_fs;
49938 + gr_set_chroot_entries(current, &new_fs->root);
49939 write_unlock(&fs->lock);
49940 task_unlock(current);
49941
49942 @@ -141,13 +146,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
49943
49944 int current_umask(void)
49945 {
49946 - return current->fs->umask;
49947 + return current->fs->umask | gr_acl_umask();
49948 }
49949 EXPORT_SYMBOL(current_umask);
49950
49951 /* to be mentioned only in INIT_TASK */
49952 struct fs_struct init_fs = {
49953 - .users = 1,
49954 + .users = ATOMIC_INIT(1),
49955 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
49956 .umask = 0022,
49957 };
49958 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
49959 task_lock(current);
49960
49961 write_lock(&init_fs.lock);
49962 - init_fs.users++;
49963 + atomic_inc(&init_fs.users);
49964 write_unlock(&init_fs.lock);
49965
49966 write_lock(&fs->lock);
49967 current->fs = &init_fs;
49968 - kill = !--fs->users;
49969 + gr_set_chroot_entries(current, &current->fs->root);
49970 + kill = !atomic_dec_return(&fs->users);
49971 write_unlock(&fs->lock);
49972
49973 task_unlock(current);
49974 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
49975 index 9905350..02eaec4 100644
49976 --- a/fs/fscache/cookie.c
49977 +++ b/fs/fscache/cookie.c
49978 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
49979 parent ? (char *) parent->def->name : "<no-parent>",
49980 def->name, netfs_data);
49981
49982 - fscache_stat(&fscache_n_acquires);
49983 + fscache_stat_unchecked(&fscache_n_acquires);
49984
49985 /* if there's no parent cookie, then we don't create one here either */
49986 if (!parent) {
49987 - fscache_stat(&fscache_n_acquires_null);
49988 + fscache_stat_unchecked(&fscache_n_acquires_null);
49989 _leave(" [no parent]");
49990 return NULL;
49991 }
49992 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
49993 /* allocate and initialise a cookie */
49994 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
49995 if (!cookie) {
49996 - fscache_stat(&fscache_n_acquires_oom);
49997 + fscache_stat_unchecked(&fscache_n_acquires_oom);
49998 _leave(" [ENOMEM]");
49999 return NULL;
50000 }
50001 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
50002
50003 switch (cookie->def->type) {
50004 case FSCACHE_COOKIE_TYPE_INDEX:
50005 - fscache_stat(&fscache_n_cookie_index);
50006 + fscache_stat_unchecked(&fscache_n_cookie_index);
50007 break;
50008 case FSCACHE_COOKIE_TYPE_DATAFILE:
50009 - fscache_stat(&fscache_n_cookie_data);
50010 + fscache_stat_unchecked(&fscache_n_cookie_data);
50011 break;
50012 default:
50013 - fscache_stat(&fscache_n_cookie_special);
50014 + fscache_stat_unchecked(&fscache_n_cookie_special);
50015 break;
50016 }
50017
50018 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
50019 if (fscache_acquire_non_index_cookie(cookie) < 0) {
50020 atomic_dec(&parent->n_children);
50021 __fscache_cookie_put(cookie);
50022 - fscache_stat(&fscache_n_acquires_nobufs);
50023 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
50024 _leave(" = NULL");
50025 return NULL;
50026 }
50027 }
50028
50029 - fscache_stat(&fscache_n_acquires_ok);
50030 + fscache_stat_unchecked(&fscache_n_acquires_ok);
50031 _leave(" = %p", cookie);
50032 return cookie;
50033 }
50034 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
50035 cache = fscache_select_cache_for_object(cookie->parent);
50036 if (!cache) {
50037 up_read(&fscache_addremove_sem);
50038 - fscache_stat(&fscache_n_acquires_no_cache);
50039 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
50040 _leave(" = -ENOMEDIUM [no cache]");
50041 return -ENOMEDIUM;
50042 }
50043 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
50044 object = cache->ops->alloc_object(cache, cookie);
50045 fscache_stat_d(&fscache_n_cop_alloc_object);
50046 if (IS_ERR(object)) {
50047 - fscache_stat(&fscache_n_object_no_alloc);
50048 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
50049 ret = PTR_ERR(object);
50050 goto error;
50051 }
50052
50053 - fscache_stat(&fscache_n_object_alloc);
50054 + fscache_stat_unchecked(&fscache_n_object_alloc);
50055
50056 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
50057
50058 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
50059 struct fscache_object *object;
50060 struct hlist_node *_p;
50061
50062 - fscache_stat(&fscache_n_updates);
50063 + fscache_stat_unchecked(&fscache_n_updates);
50064
50065 if (!cookie) {
50066 - fscache_stat(&fscache_n_updates_null);
50067 + fscache_stat_unchecked(&fscache_n_updates_null);
50068 _leave(" [no cookie]");
50069 return;
50070 }
50071 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
50072 struct fscache_object *object;
50073 unsigned long event;
50074
50075 - fscache_stat(&fscache_n_relinquishes);
50076 + fscache_stat_unchecked(&fscache_n_relinquishes);
50077 if (retire)
50078 - fscache_stat(&fscache_n_relinquishes_retire);
50079 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
50080
50081 if (!cookie) {
50082 - fscache_stat(&fscache_n_relinquishes_null);
50083 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
50084 _leave(" [no cookie]");
50085 return;
50086 }
50087 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
50088
50089 /* wait for the cookie to finish being instantiated (or to fail) */
50090 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
50091 - fscache_stat(&fscache_n_relinquishes_waitcrt);
50092 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
50093 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
50094 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
50095 }
50096 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
50097 index edd7434..0725e66 100644
50098 --- a/fs/fscache/internal.h
50099 +++ b/fs/fscache/internal.h
50100 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
50101 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
50102 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
50103
50104 -extern atomic_t fscache_n_op_pend;
50105 -extern atomic_t fscache_n_op_run;
50106 -extern atomic_t fscache_n_op_enqueue;
50107 -extern atomic_t fscache_n_op_deferred_release;
50108 -extern atomic_t fscache_n_op_release;
50109 -extern atomic_t fscache_n_op_gc;
50110 -extern atomic_t fscache_n_op_cancelled;
50111 -extern atomic_t fscache_n_op_rejected;
50112 +extern atomic_unchecked_t fscache_n_op_pend;
50113 +extern atomic_unchecked_t fscache_n_op_run;
50114 +extern atomic_unchecked_t fscache_n_op_enqueue;
50115 +extern atomic_unchecked_t fscache_n_op_deferred_release;
50116 +extern atomic_unchecked_t fscache_n_op_release;
50117 +extern atomic_unchecked_t fscache_n_op_gc;
50118 +extern atomic_unchecked_t fscache_n_op_cancelled;
50119 +extern atomic_unchecked_t fscache_n_op_rejected;
50120
50121 -extern atomic_t fscache_n_attr_changed;
50122 -extern atomic_t fscache_n_attr_changed_ok;
50123 -extern atomic_t fscache_n_attr_changed_nobufs;
50124 -extern atomic_t fscache_n_attr_changed_nomem;
50125 -extern atomic_t fscache_n_attr_changed_calls;
50126 +extern atomic_unchecked_t fscache_n_attr_changed;
50127 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
50128 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
50129 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
50130 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
50131
50132 -extern atomic_t fscache_n_allocs;
50133 -extern atomic_t fscache_n_allocs_ok;
50134 -extern atomic_t fscache_n_allocs_wait;
50135 -extern atomic_t fscache_n_allocs_nobufs;
50136 -extern atomic_t fscache_n_allocs_intr;
50137 -extern atomic_t fscache_n_allocs_object_dead;
50138 -extern atomic_t fscache_n_alloc_ops;
50139 -extern atomic_t fscache_n_alloc_op_waits;
50140 +extern atomic_unchecked_t fscache_n_allocs;
50141 +extern atomic_unchecked_t fscache_n_allocs_ok;
50142 +extern atomic_unchecked_t fscache_n_allocs_wait;
50143 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
50144 +extern atomic_unchecked_t fscache_n_allocs_intr;
50145 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
50146 +extern atomic_unchecked_t fscache_n_alloc_ops;
50147 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
50148
50149 -extern atomic_t fscache_n_retrievals;
50150 -extern atomic_t fscache_n_retrievals_ok;
50151 -extern atomic_t fscache_n_retrievals_wait;
50152 -extern atomic_t fscache_n_retrievals_nodata;
50153 -extern atomic_t fscache_n_retrievals_nobufs;
50154 -extern atomic_t fscache_n_retrievals_intr;
50155 -extern atomic_t fscache_n_retrievals_nomem;
50156 -extern atomic_t fscache_n_retrievals_object_dead;
50157 -extern atomic_t fscache_n_retrieval_ops;
50158 -extern atomic_t fscache_n_retrieval_op_waits;
50159 +extern atomic_unchecked_t fscache_n_retrievals;
50160 +extern atomic_unchecked_t fscache_n_retrievals_ok;
50161 +extern atomic_unchecked_t fscache_n_retrievals_wait;
50162 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
50163 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
50164 +extern atomic_unchecked_t fscache_n_retrievals_intr;
50165 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
50166 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
50167 +extern atomic_unchecked_t fscache_n_retrieval_ops;
50168 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
50169
50170 -extern atomic_t fscache_n_stores;
50171 -extern atomic_t fscache_n_stores_ok;
50172 -extern atomic_t fscache_n_stores_again;
50173 -extern atomic_t fscache_n_stores_nobufs;
50174 -extern atomic_t fscache_n_stores_oom;
50175 -extern atomic_t fscache_n_store_ops;
50176 -extern atomic_t fscache_n_store_calls;
50177 -extern atomic_t fscache_n_store_pages;
50178 -extern atomic_t fscache_n_store_radix_deletes;
50179 -extern atomic_t fscache_n_store_pages_over_limit;
50180 +extern atomic_unchecked_t fscache_n_stores;
50181 +extern atomic_unchecked_t fscache_n_stores_ok;
50182 +extern atomic_unchecked_t fscache_n_stores_again;
50183 +extern atomic_unchecked_t fscache_n_stores_nobufs;
50184 +extern atomic_unchecked_t fscache_n_stores_oom;
50185 +extern atomic_unchecked_t fscache_n_store_ops;
50186 +extern atomic_unchecked_t fscache_n_store_calls;
50187 +extern atomic_unchecked_t fscache_n_store_pages;
50188 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
50189 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
50190
50191 -extern atomic_t fscache_n_store_vmscan_not_storing;
50192 -extern atomic_t fscache_n_store_vmscan_gone;
50193 -extern atomic_t fscache_n_store_vmscan_busy;
50194 -extern atomic_t fscache_n_store_vmscan_cancelled;
50195 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50196 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
50197 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
50198 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50199
50200 -extern atomic_t fscache_n_marks;
50201 -extern atomic_t fscache_n_uncaches;
50202 +extern atomic_unchecked_t fscache_n_marks;
50203 +extern atomic_unchecked_t fscache_n_uncaches;
50204
50205 -extern atomic_t fscache_n_acquires;
50206 -extern atomic_t fscache_n_acquires_null;
50207 -extern atomic_t fscache_n_acquires_no_cache;
50208 -extern atomic_t fscache_n_acquires_ok;
50209 -extern atomic_t fscache_n_acquires_nobufs;
50210 -extern atomic_t fscache_n_acquires_oom;
50211 +extern atomic_unchecked_t fscache_n_acquires;
50212 +extern atomic_unchecked_t fscache_n_acquires_null;
50213 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
50214 +extern atomic_unchecked_t fscache_n_acquires_ok;
50215 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
50216 +extern atomic_unchecked_t fscache_n_acquires_oom;
50217
50218 -extern atomic_t fscache_n_updates;
50219 -extern atomic_t fscache_n_updates_null;
50220 -extern atomic_t fscache_n_updates_run;
50221 +extern atomic_unchecked_t fscache_n_updates;
50222 +extern atomic_unchecked_t fscache_n_updates_null;
50223 +extern atomic_unchecked_t fscache_n_updates_run;
50224
50225 -extern atomic_t fscache_n_relinquishes;
50226 -extern atomic_t fscache_n_relinquishes_null;
50227 -extern atomic_t fscache_n_relinquishes_waitcrt;
50228 -extern atomic_t fscache_n_relinquishes_retire;
50229 +extern atomic_unchecked_t fscache_n_relinquishes;
50230 +extern atomic_unchecked_t fscache_n_relinquishes_null;
50231 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50232 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
50233
50234 -extern atomic_t fscache_n_cookie_index;
50235 -extern atomic_t fscache_n_cookie_data;
50236 -extern atomic_t fscache_n_cookie_special;
50237 +extern atomic_unchecked_t fscache_n_cookie_index;
50238 +extern atomic_unchecked_t fscache_n_cookie_data;
50239 +extern atomic_unchecked_t fscache_n_cookie_special;
50240
50241 -extern atomic_t fscache_n_object_alloc;
50242 -extern atomic_t fscache_n_object_no_alloc;
50243 -extern atomic_t fscache_n_object_lookups;
50244 -extern atomic_t fscache_n_object_lookups_negative;
50245 -extern atomic_t fscache_n_object_lookups_positive;
50246 -extern atomic_t fscache_n_object_lookups_timed_out;
50247 -extern atomic_t fscache_n_object_created;
50248 -extern atomic_t fscache_n_object_avail;
50249 -extern atomic_t fscache_n_object_dead;
50250 +extern atomic_unchecked_t fscache_n_object_alloc;
50251 +extern atomic_unchecked_t fscache_n_object_no_alloc;
50252 +extern atomic_unchecked_t fscache_n_object_lookups;
50253 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
50254 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
50255 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
50256 +extern atomic_unchecked_t fscache_n_object_created;
50257 +extern atomic_unchecked_t fscache_n_object_avail;
50258 +extern atomic_unchecked_t fscache_n_object_dead;
50259
50260 -extern atomic_t fscache_n_checkaux_none;
50261 -extern atomic_t fscache_n_checkaux_okay;
50262 -extern atomic_t fscache_n_checkaux_update;
50263 -extern atomic_t fscache_n_checkaux_obsolete;
50264 +extern atomic_unchecked_t fscache_n_checkaux_none;
50265 +extern atomic_unchecked_t fscache_n_checkaux_okay;
50266 +extern atomic_unchecked_t fscache_n_checkaux_update;
50267 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
50268
50269 extern atomic_t fscache_n_cop_alloc_object;
50270 extern atomic_t fscache_n_cop_lookup_object;
50271 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
50272 atomic_inc(stat);
50273 }
50274
50275 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
50276 +{
50277 + atomic_inc_unchecked(stat);
50278 +}
50279 +
50280 static inline void fscache_stat_d(atomic_t *stat)
50281 {
50282 atomic_dec(stat);
50283 @@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
50284
50285 #define __fscache_stat(stat) (NULL)
50286 #define fscache_stat(stat) do {} while (0)
50287 +#define fscache_stat_unchecked(stat) do {} while (0)
50288 #define fscache_stat_d(stat) do {} while (0)
50289 #endif
50290
50291 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
50292 index e513ac5..e888d34 100644
50293 --- a/fs/fscache/object.c
50294 +++ b/fs/fscache/object.c
50295 @@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50296 /* update the object metadata on disk */
50297 case FSCACHE_OBJECT_UPDATING:
50298 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
50299 - fscache_stat(&fscache_n_updates_run);
50300 + fscache_stat_unchecked(&fscache_n_updates_run);
50301 fscache_stat(&fscache_n_cop_update_object);
50302 object->cache->ops->update_object(object);
50303 fscache_stat_d(&fscache_n_cop_update_object);
50304 @@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50305 spin_lock(&object->lock);
50306 object->state = FSCACHE_OBJECT_DEAD;
50307 spin_unlock(&object->lock);
50308 - fscache_stat(&fscache_n_object_dead);
50309 + fscache_stat_unchecked(&fscache_n_object_dead);
50310 goto terminal_transit;
50311
50312 /* handle the parent cache of this object being withdrawn from
50313 @@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50314 spin_lock(&object->lock);
50315 object->state = FSCACHE_OBJECT_DEAD;
50316 spin_unlock(&object->lock);
50317 - fscache_stat(&fscache_n_object_dead);
50318 + fscache_stat_unchecked(&fscache_n_object_dead);
50319 goto terminal_transit;
50320
50321 /* complain about the object being woken up once it is
50322 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
50323 parent->cookie->def->name, cookie->def->name,
50324 object->cache->tag->name);
50325
50326 - fscache_stat(&fscache_n_object_lookups);
50327 + fscache_stat_unchecked(&fscache_n_object_lookups);
50328 fscache_stat(&fscache_n_cop_lookup_object);
50329 ret = object->cache->ops->lookup_object(object);
50330 fscache_stat_d(&fscache_n_cop_lookup_object);
50331 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
50332 if (ret == -ETIMEDOUT) {
50333 /* probably stuck behind another object, so move this one to
50334 * the back of the queue */
50335 - fscache_stat(&fscache_n_object_lookups_timed_out);
50336 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
50337 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
50338 }
50339
50340 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
50341
50342 spin_lock(&object->lock);
50343 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
50344 - fscache_stat(&fscache_n_object_lookups_negative);
50345 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
50346
50347 /* transit here to allow write requests to begin stacking up
50348 * and read requests to begin returning ENODATA */
50349 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
50350 * result, in which case there may be data available */
50351 spin_lock(&object->lock);
50352 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
50353 - fscache_stat(&fscache_n_object_lookups_positive);
50354 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
50355
50356 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
50357
50358 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
50359 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
50360 } else {
50361 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
50362 - fscache_stat(&fscache_n_object_created);
50363 + fscache_stat_unchecked(&fscache_n_object_created);
50364
50365 object->state = FSCACHE_OBJECT_AVAILABLE;
50366 spin_unlock(&object->lock);
50367 @@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
50368 fscache_enqueue_dependents(object);
50369
50370 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
50371 - fscache_stat(&fscache_n_object_avail);
50372 + fscache_stat_unchecked(&fscache_n_object_avail);
50373
50374 _leave("");
50375 }
50376 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
50377 enum fscache_checkaux result;
50378
50379 if (!object->cookie->def->check_aux) {
50380 - fscache_stat(&fscache_n_checkaux_none);
50381 + fscache_stat_unchecked(&fscache_n_checkaux_none);
50382 return FSCACHE_CHECKAUX_OKAY;
50383 }
50384
50385 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
50386 switch (result) {
50387 /* entry okay as is */
50388 case FSCACHE_CHECKAUX_OKAY:
50389 - fscache_stat(&fscache_n_checkaux_okay);
50390 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
50391 break;
50392
50393 /* entry requires update */
50394 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
50395 - fscache_stat(&fscache_n_checkaux_update);
50396 + fscache_stat_unchecked(&fscache_n_checkaux_update);
50397 break;
50398
50399 /* entry requires deletion */
50400 case FSCACHE_CHECKAUX_OBSOLETE:
50401 - fscache_stat(&fscache_n_checkaux_obsolete);
50402 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
50403 break;
50404
50405 default:
50406 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
50407 index 313e79a..775240f 100644
50408 --- a/fs/fscache/operation.c
50409 +++ b/fs/fscache/operation.c
50410 @@ -16,7 +16,7 @@
50411 #include <linux/seq_file.h>
50412 #include "internal.h"
50413
50414 -atomic_t fscache_op_debug_id;
50415 +atomic_unchecked_t fscache_op_debug_id;
50416 EXPORT_SYMBOL(fscache_op_debug_id);
50417
50418 /**
50419 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
50420 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
50421 ASSERTCMP(atomic_read(&op->usage), >, 0);
50422
50423 - fscache_stat(&fscache_n_op_enqueue);
50424 + fscache_stat_unchecked(&fscache_n_op_enqueue);
50425 switch (op->flags & FSCACHE_OP_TYPE) {
50426 case FSCACHE_OP_FAST:
50427 _debug("queue fast");
50428 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
50429 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
50430 if (op->processor)
50431 fscache_enqueue_operation(op);
50432 - fscache_stat(&fscache_n_op_run);
50433 + fscache_stat_unchecked(&fscache_n_op_run);
50434 }
50435
50436 /*
50437 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
50438 if (object->n_ops > 0) {
50439 atomic_inc(&op->usage);
50440 list_add_tail(&op->pend_link, &object->pending_ops);
50441 - fscache_stat(&fscache_n_op_pend);
50442 + fscache_stat_unchecked(&fscache_n_op_pend);
50443 } else if (!list_empty(&object->pending_ops)) {
50444 atomic_inc(&op->usage);
50445 list_add_tail(&op->pend_link, &object->pending_ops);
50446 - fscache_stat(&fscache_n_op_pend);
50447 + fscache_stat_unchecked(&fscache_n_op_pend);
50448 fscache_start_operations(object);
50449 } else {
50450 ASSERTCMP(object->n_in_progress, ==, 0);
50451 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
50452 object->n_exclusive++; /* reads and writes must wait */
50453 atomic_inc(&op->usage);
50454 list_add_tail(&op->pend_link, &object->pending_ops);
50455 - fscache_stat(&fscache_n_op_pend);
50456 + fscache_stat_unchecked(&fscache_n_op_pend);
50457 ret = 0;
50458 } else {
50459 /* not allowed to submit ops in any other state */
50460 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
50461 if (object->n_exclusive > 0) {
50462 atomic_inc(&op->usage);
50463 list_add_tail(&op->pend_link, &object->pending_ops);
50464 - fscache_stat(&fscache_n_op_pend);
50465 + fscache_stat_unchecked(&fscache_n_op_pend);
50466 } else if (!list_empty(&object->pending_ops)) {
50467 atomic_inc(&op->usage);
50468 list_add_tail(&op->pend_link, &object->pending_ops);
50469 - fscache_stat(&fscache_n_op_pend);
50470 + fscache_stat_unchecked(&fscache_n_op_pend);
50471 fscache_start_operations(object);
50472 } else {
50473 ASSERTCMP(object->n_exclusive, ==, 0);
50474 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
50475 object->n_ops++;
50476 atomic_inc(&op->usage);
50477 list_add_tail(&op->pend_link, &object->pending_ops);
50478 - fscache_stat(&fscache_n_op_pend);
50479 + fscache_stat_unchecked(&fscache_n_op_pend);
50480 ret = 0;
50481 } else if (object->state == FSCACHE_OBJECT_DYING ||
50482 object->state == FSCACHE_OBJECT_LC_DYING ||
50483 object->state == FSCACHE_OBJECT_WITHDRAWING) {
50484 - fscache_stat(&fscache_n_op_rejected);
50485 + fscache_stat_unchecked(&fscache_n_op_rejected);
50486 ret = -ENOBUFS;
50487 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
50488 fscache_report_unexpected_submission(object, op, ostate);
50489 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
50490
50491 ret = -EBUSY;
50492 if (!list_empty(&op->pend_link)) {
50493 - fscache_stat(&fscache_n_op_cancelled);
50494 + fscache_stat_unchecked(&fscache_n_op_cancelled);
50495 list_del_init(&op->pend_link);
50496 object->n_ops--;
50497 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
50498 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
50499 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
50500 BUG();
50501
50502 - fscache_stat(&fscache_n_op_release);
50503 + fscache_stat_unchecked(&fscache_n_op_release);
50504
50505 if (op->release) {
50506 op->release(op);
50507 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
50508 * lock, and defer it otherwise */
50509 if (!spin_trylock(&object->lock)) {
50510 _debug("defer put");
50511 - fscache_stat(&fscache_n_op_deferred_release);
50512 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
50513
50514 cache = object->cache;
50515 spin_lock(&cache->op_gc_list_lock);
50516 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
50517
50518 _debug("GC DEFERRED REL OBJ%x OP%x",
50519 object->debug_id, op->debug_id);
50520 - fscache_stat(&fscache_n_op_gc);
50521 + fscache_stat_unchecked(&fscache_n_op_gc);
50522
50523 ASSERTCMP(atomic_read(&op->usage), ==, 0);
50524
50525 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
50526 index c598ea4..6aac13e 100644
50527 --- a/fs/fscache/page.c
50528 +++ b/fs/fscache/page.c
50529 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50530 val = radix_tree_lookup(&cookie->stores, page->index);
50531 if (!val) {
50532 rcu_read_unlock();
50533 - fscache_stat(&fscache_n_store_vmscan_not_storing);
50534 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
50535 __fscache_uncache_page(cookie, page);
50536 return true;
50537 }
50538 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50539 spin_unlock(&cookie->stores_lock);
50540
50541 if (xpage) {
50542 - fscache_stat(&fscache_n_store_vmscan_cancelled);
50543 - fscache_stat(&fscache_n_store_radix_deletes);
50544 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
50545 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50546 ASSERTCMP(xpage, ==, page);
50547 } else {
50548 - fscache_stat(&fscache_n_store_vmscan_gone);
50549 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
50550 }
50551
50552 wake_up_bit(&cookie->flags, 0);
50553 @@ -106,7 +106,7 @@ page_busy:
50554 /* we might want to wait here, but that could deadlock the allocator as
50555 * the slow-work threads writing to the cache may all end up sleeping
50556 * on memory allocation */
50557 - fscache_stat(&fscache_n_store_vmscan_busy);
50558 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
50559 return false;
50560 }
50561 EXPORT_SYMBOL(__fscache_maybe_release_page);
50562 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
50563 FSCACHE_COOKIE_STORING_TAG);
50564 if (!radix_tree_tag_get(&cookie->stores, page->index,
50565 FSCACHE_COOKIE_PENDING_TAG)) {
50566 - fscache_stat(&fscache_n_store_radix_deletes);
50567 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50568 xpage = radix_tree_delete(&cookie->stores, page->index);
50569 }
50570 spin_unlock(&cookie->stores_lock);
50571 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
50572
50573 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
50574
50575 - fscache_stat(&fscache_n_attr_changed_calls);
50576 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
50577
50578 if (fscache_object_is_active(object)) {
50579 fscache_set_op_state(op, "CallFS");
50580 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50581
50582 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50583
50584 - fscache_stat(&fscache_n_attr_changed);
50585 + fscache_stat_unchecked(&fscache_n_attr_changed);
50586
50587 op = kzalloc(sizeof(*op), GFP_KERNEL);
50588 if (!op) {
50589 - fscache_stat(&fscache_n_attr_changed_nomem);
50590 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
50591 _leave(" = -ENOMEM");
50592 return -ENOMEM;
50593 }
50594 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50595 if (fscache_submit_exclusive_op(object, op) < 0)
50596 goto nobufs;
50597 spin_unlock(&cookie->lock);
50598 - fscache_stat(&fscache_n_attr_changed_ok);
50599 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
50600 fscache_put_operation(op);
50601 _leave(" = 0");
50602 return 0;
50603 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50604 nobufs:
50605 spin_unlock(&cookie->lock);
50606 kfree(op);
50607 - fscache_stat(&fscache_n_attr_changed_nobufs);
50608 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
50609 _leave(" = %d", -ENOBUFS);
50610 return -ENOBUFS;
50611 }
50612 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
50613 /* allocate a retrieval operation and attempt to submit it */
50614 op = kzalloc(sizeof(*op), GFP_NOIO);
50615 if (!op) {
50616 - fscache_stat(&fscache_n_retrievals_nomem);
50617 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50618 return NULL;
50619 }
50620
50621 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50622 return 0;
50623 }
50624
50625 - fscache_stat(&fscache_n_retrievals_wait);
50626 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
50627
50628 jif = jiffies;
50629 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
50630 fscache_wait_bit_interruptible,
50631 TASK_INTERRUPTIBLE) != 0) {
50632 - fscache_stat(&fscache_n_retrievals_intr);
50633 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
50634 _leave(" = -ERESTARTSYS");
50635 return -ERESTARTSYS;
50636 }
50637 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50638 */
50639 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50640 struct fscache_retrieval *op,
50641 - atomic_t *stat_op_waits,
50642 - atomic_t *stat_object_dead)
50643 + atomic_unchecked_t *stat_op_waits,
50644 + atomic_unchecked_t *stat_object_dead)
50645 {
50646 int ret;
50647
50648 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50649 goto check_if_dead;
50650
50651 _debug(">>> WT");
50652 - fscache_stat(stat_op_waits);
50653 + fscache_stat_unchecked(stat_op_waits);
50654 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
50655 fscache_wait_bit_interruptible,
50656 TASK_INTERRUPTIBLE) < 0) {
50657 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50658
50659 check_if_dead:
50660 if (unlikely(fscache_object_is_dead(object))) {
50661 - fscache_stat(stat_object_dead);
50662 + fscache_stat_unchecked(stat_object_dead);
50663 return -ENOBUFS;
50664 }
50665 return 0;
50666 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50667
50668 _enter("%p,%p,,,", cookie, page);
50669
50670 - fscache_stat(&fscache_n_retrievals);
50671 + fscache_stat_unchecked(&fscache_n_retrievals);
50672
50673 if (hlist_empty(&cookie->backing_objects))
50674 goto nobufs;
50675 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50676 goto nobufs_unlock;
50677 spin_unlock(&cookie->lock);
50678
50679 - fscache_stat(&fscache_n_retrieval_ops);
50680 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
50681
50682 /* pin the netfs read context in case we need to do the actual netfs
50683 * read because we've encountered a cache read failure */
50684 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50685
50686 error:
50687 if (ret == -ENOMEM)
50688 - fscache_stat(&fscache_n_retrievals_nomem);
50689 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50690 else if (ret == -ERESTARTSYS)
50691 - fscache_stat(&fscache_n_retrievals_intr);
50692 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
50693 else if (ret == -ENODATA)
50694 - fscache_stat(&fscache_n_retrievals_nodata);
50695 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50696 else if (ret < 0)
50697 - fscache_stat(&fscache_n_retrievals_nobufs);
50698 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50699 else
50700 - fscache_stat(&fscache_n_retrievals_ok);
50701 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
50702
50703 fscache_put_retrieval(op);
50704 _leave(" = %d", ret);
50705 @@ -453,7 +453,7 @@ nobufs_unlock:
50706 spin_unlock(&cookie->lock);
50707 kfree(op);
50708 nobufs:
50709 - fscache_stat(&fscache_n_retrievals_nobufs);
50710 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50711 _leave(" = -ENOBUFS");
50712 return -ENOBUFS;
50713 }
50714 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50715
50716 _enter("%p,,%d,,,", cookie, *nr_pages);
50717
50718 - fscache_stat(&fscache_n_retrievals);
50719 + fscache_stat_unchecked(&fscache_n_retrievals);
50720
50721 if (hlist_empty(&cookie->backing_objects))
50722 goto nobufs;
50723 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50724 goto nobufs_unlock;
50725 spin_unlock(&cookie->lock);
50726
50727 - fscache_stat(&fscache_n_retrieval_ops);
50728 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
50729
50730 /* pin the netfs read context in case we need to do the actual netfs
50731 * read because we've encountered a cache read failure */
50732 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50733
50734 error:
50735 if (ret == -ENOMEM)
50736 - fscache_stat(&fscache_n_retrievals_nomem);
50737 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50738 else if (ret == -ERESTARTSYS)
50739 - fscache_stat(&fscache_n_retrievals_intr);
50740 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
50741 else if (ret == -ENODATA)
50742 - fscache_stat(&fscache_n_retrievals_nodata);
50743 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50744 else if (ret < 0)
50745 - fscache_stat(&fscache_n_retrievals_nobufs);
50746 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50747 else
50748 - fscache_stat(&fscache_n_retrievals_ok);
50749 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
50750
50751 fscache_put_retrieval(op);
50752 _leave(" = %d", ret);
50753 @@ -570,7 +570,7 @@ nobufs_unlock:
50754 spin_unlock(&cookie->lock);
50755 kfree(op);
50756 nobufs:
50757 - fscache_stat(&fscache_n_retrievals_nobufs);
50758 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50759 _leave(" = -ENOBUFS");
50760 return -ENOBUFS;
50761 }
50762 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50763
50764 _enter("%p,%p,,,", cookie, page);
50765
50766 - fscache_stat(&fscache_n_allocs);
50767 + fscache_stat_unchecked(&fscache_n_allocs);
50768
50769 if (hlist_empty(&cookie->backing_objects))
50770 goto nobufs;
50771 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50772 goto nobufs_unlock;
50773 spin_unlock(&cookie->lock);
50774
50775 - fscache_stat(&fscache_n_alloc_ops);
50776 + fscache_stat_unchecked(&fscache_n_alloc_ops);
50777
50778 ret = fscache_wait_for_retrieval_activation(
50779 object, op,
50780 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50781
50782 error:
50783 if (ret == -ERESTARTSYS)
50784 - fscache_stat(&fscache_n_allocs_intr);
50785 + fscache_stat_unchecked(&fscache_n_allocs_intr);
50786 else if (ret < 0)
50787 - fscache_stat(&fscache_n_allocs_nobufs);
50788 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50789 else
50790 - fscache_stat(&fscache_n_allocs_ok);
50791 + fscache_stat_unchecked(&fscache_n_allocs_ok);
50792
50793 fscache_put_retrieval(op);
50794 _leave(" = %d", ret);
50795 @@ -651,7 +651,7 @@ nobufs_unlock:
50796 spin_unlock(&cookie->lock);
50797 kfree(op);
50798 nobufs:
50799 - fscache_stat(&fscache_n_allocs_nobufs);
50800 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50801 _leave(" = -ENOBUFS");
50802 return -ENOBUFS;
50803 }
50804 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50805
50806 spin_lock(&cookie->stores_lock);
50807
50808 - fscache_stat(&fscache_n_store_calls);
50809 + fscache_stat_unchecked(&fscache_n_store_calls);
50810
50811 /* find a page to store */
50812 page = NULL;
50813 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50814 page = results[0];
50815 _debug("gang %d [%lx]", n, page->index);
50816 if (page->index > op->store_limit) {
50817 - fscache_stat(&fscache_n_store_pages_over_limit);
50818 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
50819 goto superseded;
50820 }
50821
50822 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50823
50824 if (page) {
50825 fscache_set_op_state(&op->op, "Store");
50826 - fscache_stat(&fscache_n_store_pages);
50827 + fscache_stat_unchecked(&fscache_n_store_pages);
50828 fscache_stat(&fscache_n_cop_write_page);
50829 ret = object->cache->ops->write_page(op, page);
50830 fscache_stat_d(&fscache_n_cop_write_page);
50831 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50832 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50833 ASSERT(PageFsCache(page));
50834
50835 - fscache_stat(&fscache_n_stores);
50836 + fscache_stat_unchecked(&fscache_n_stores);
50837
50838 op = kzalloc(sizeof(*op), GFP_NOIO);
50839 if (!op)
50840 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50841 spin_unlock(&cookie->stores_lock);
50842 spin_unlock(&object->lock);
50843
50844 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
50845 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
50846 op->store_limit = object->store_limit;
50847
50848 if (fscache_submit_op(object, &op->op) < 0)
50849 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50850
50851 spin_unlock(&cookie->lock);
50852 radix_tree_preload_end();
50853 - fscache_stat(&fscache_n_store_ops);
50854 - fscache_stat(&fscache_n_stores_ok);
50855 + fscache_stat_unchecked(&fscache_n_store_ops);
50856 + fscache_stat_unchecked(&fscache_n_stores_ok);
50857
50858 /* the slow work queue now carries its own ref on the object */
50859 fscache_put_operation(&op->op);
50860 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50861 return 0;
50862
50863 already_queued:
50864 - fscache_stat(&fscache_n_stores_again);
50865 + fscache_stat_unchecked(&fscache_n_stores_again);
50866 already_pending:
50867 spin_unlock(&cookie->stores_lock);
50868 spin_unlock(&object->lock);
50869 spin_unlock(&cookie->lock);
50870 radix_tree_preload_end();
50871 kfree(op);
50872 - fscache_stat(&fscache_n_stores_ok);
50873 + fscache_stat_unchecked(&fscache_n_stores_ok);
50874 _leave(" = 0");
50875 return 0;
50876
50877 @@ -886,14 +886,14 @@ nobufs:
50878 spin_unlock(&cookie->lock);
50879 radix_tree_preload_end();
50880 kfree(op);
50881 - fscache_stat(&fscache_n_stores_nobufs);
50882 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
50883 _leave(" = -ENOBUFS");
50884 return -ENOBUFS;
50885
50886 nomem_free:
50887 kfree(op);
50888 nomem:
50889 - fscache_stat(&fscache_n_stores_oom);
50890 + fscache_stat_unchecked(&fscache_n_stores_oom);
50891 _leave(" = -ENOMEM");
50892 return -ENOMEM;
50893 }
50894 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
50895 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50896 ASSERTCMP(page, !=, NULL);
50897
50898 - fscache_stat(&fscache_n_uncaches);
50899 + fscache_stat_unchecked(&fscache_n_uncaches);
50900
50901 /* cache withdrawal may beat us to it */
50902 if (!PageFsCache(page))
50903 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
50904 unsigned long loop;
50905
50906 #ifdef CONFIG_FSCACHE_STATS
50907 - atomic_add(pagevec->nr, &fscache_n_marks);
50908 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
50909 #endif
50910
50911 for (loop = 0; loop < pagevec->nr; loop++) {
50912 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
50913 index 46435f3..8cddf18 100644
50914 --- a/fs/fscache/stats.c
50915 +++ b/fs/fscache/stats.c
50916 @@ -18,95 +18,95 @@
50917 /*
50918 * operation counters
50919 */
50920 -atomic_t fscache_n_op_pend;
50921 -atomic_t fscache_n_op_run;
50922 -atomic_t fscache_n_op_enqueue;
50923 -atomic_t fscache_n_op_requeue;
50924 -atomic_t fscache_n_op_deferred_release;
50925 -atomic_t fscache_n_op_release;
50926 -atomic_t fscache_n_op_gc;
50927 -atomic_t fscache_n_op_cancelled;
50928 -atomic_t fscache_n_op_rejected;
50929 +atomic_unchecked_t fscache_n_op_pend;
50930 +atomic_unchecked_t fscache_n_op_run;
50931 +atomic_unchecked_t fscache_n_op_enqueue;
50932 +atomic_unchecked_t fscache_n_op_requeue;
50933 +atomic_unchecked_t fscache_n_op_deferred_release;
50934 +atomic_unchecked_t fscache_n_op_release;
50935 +atomic_unchecked_t fscache_n_op_gc;
50936 +atomic_unchecked_t fscache_n_op_cancelled;
50937 +atomic_unchecked_t fscache_n_op_rejected;
50938
50939 -atomic_t fscache_n_attr_changed;
50940 -atomic_t fscache_n_attr_changed_ok;
50941 -atomic_t fscache_n_attr_changed_nobufs;
50942 -atomic_t fscache_n_attr_changed_nomem;
50943 -atomic_t fscache_n_attr_changed_calls;
50944 +atomic_unchecked_t fscache_n_attr_changed;
50945 +atomic_unchecked_t fscache_n_attr_changed_ok;
50946 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
50947 +atomic_unchecked_t fscache_n_attr_changed_nomem;
50948 +atomic_unchecked_t fscache_n_attr_changed_calls;
50949
50950 -atomic_t fscache_n_allocs;
50951 -atomic_t fscache_n_allocs_ok;
50952 -atomic_t fscache_n_allocs_wait;
50953 -atomic_t fscache_n_allocs_nobufs;
50954 -atomic_t fscache_n_allocs_intr;
50955 -atomic_t fscache_n_allocs_object_dead;
50956 -atomic_t fscache_n_alloc_ops;
50957 -atomic_t fscache_n_alloc_op_waits;
50958 +atomic_unchecked_t fscache_n_allocs;
50959 +atomic_unchecked_t fscache_n_allocs_ok;
50960 +atomic_unchecked_t fscache_n_allocs_wait;
50961 +atomic_unchecked_t fscache_n_allocs_nobufs;
50962 +atomic_unchecked_t fscache_n_allocs_intr;
50963 +atomic_unchecked_t fscache_n_allocs_object_dead;
50964 +atomic_unchecked_t fscache_n_alloc_ops;
50965 +atomic_unchecked_t fscache_n_alloc_op_waits;
50966
50967 -atomic_t fscache_n_retrievals;
50968 -atomic_t fscache_n_retrievals_ok;
50969 -atomic_t fscache_n_retrievals_wait;
50970 -atomic_t fscache_n_retrievals_nodata;
50971 -atomic_t fscache_n_retrievals_nobufs;
50972 -atomic_t fscache_n_retrievals_intr;
50973 -atomic_t fscache_n_retrievals_nomem;
50974 -atomic_t fscache_n_retrievals_object_dead;
50975 -atomic_t fscache_n_retrieval_ops;
50976 -atomic_t fscache_n_retrieval_op_waits;
50977 +atomic_unchecked_t fscache_n_retrievals;
50978 +atomic_unchecked_t fscache_n_retrievals_ok;
50979 +atomic_unchecked_t fscache_n_retrievals_wait;
50980 +atomic_unchecked_t fscache_n_retrievals_nodata;
50981 +atomic_unchecked_t fscache_n_retrievals_nobufs;
50982 +atomic_unchecked_t fscache_n_retrievals_intr;
50983 +atomic_unchecked_t fscache_n_retrievals_nomem;
50984 +atomic_unchecked_t fscache_n_retrievals_object_dead;
50985 +atomic_unchecked_t fscache_n_retrieval_ops;
50986 +atomic_unchecked_t fscache_n_retrieval_op_waits;
50987
50988 -atomic_t fscache_n_stores;
50989 -atomic_t fscache_n_stores_ok;
50990 -atomic_t fscache_n_stores_again;
50991 -atomic_t fscache_n_stores_nobufs;
50992 -atomic_t fscache_n_stores_oom;
50993 -atomic_t fscache_n_store_ops;
50994 -atomic_t fscache_n_store_calls;
50995 -atomic_t fscache_n_store_pages;
50996 -atomic_t fscache_n_store_radix_deletes;
50997 -atomic_t fscache_n_store_pages_over_limit;
50998 +atomic_unchecked_t fscache_n_stores;
50999 +atomic_unchecked_t fscache_n_stores_ok;
51000 +atomic_unchecked_t fscache_n_stores_again;
51001 +atomic_unchecked_t fscache_n_stores_nobufs;
51002 +atomic_unchecked_t fscache_n_stores_oom;
51003 +atomic_unchecked_t fscache_n_store_ops;
51004 +atomic_unchecked_t fscache_n_store_calls;
51005 +atomic_unchecked_t fscache_n_store_pages;
51006 +atomic_unchecked_t fscache_n_store_radix_deletes;
51007 +atomic_unchecked_t fscache_n_store_pages_over_limit;
51008
51009 -atomic_t fscache_n_store_vmscan_not_storing;
51010 -atomic_t fscache_n_store_vmscan_gone;
51011 -atomic_t fscache_n_store_vmscan_busy;
51012 -atomic_t fscache_n_store_vmscan_cancelled;
51013 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
51014 +atomic_unchecked_t fscache_n_store_vmscan_gone;
51015 +atomic_unchecked_t fscache_n_store_vmscan_busy;
51016 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
51017
51018 -atomic_t fscache_n_marks;
51019 -atomic_t fscache_n_uncaches;
51020 +atomic_unchecked_t fscache_n_marks;
51021 +atomic_unchecked_t fscache_n_uncaches;
51022
51023 -atomic_t fscache_n_acquires;
51024 -atomic_t fscache_n_acquires_null;
51025 -atomic_t fscache_n_acquires_no_cache;
51026 -atomic_t fscache_n_acquires_ok;
51027 -atomic_t fscache_n_acquires_nobufs;
51028 -atomic_t fscache_n_acquires_oom;
51029 +atomic_unchecked_t fscache_n_acquires;
51030 +atomic_unchecked_t fscache_n_acquires_null;
51031 +atomic_unchecked_t fscache_n_acquires_no_cache;
51032 +atomic_unchecked_t fscache_n_acquires_ok;
51033 +atomic_unchecked_t fscache_n_acquires_nobufs;
51034 +atomic_unchecked_t fscache_n_acquires_oom;
51035
51036 -atomic_t fscache_n_updates;
51037 -atomic_t fscache_n_updates_null;
51038 -atomic_t fscache_n_updates_run;
51039 +atomic_unchecked_t fscache_n_updates;
51040 +atomic_unchecked_t fscache_n_updates_null;
51041 +atomic_unchecked_t fscache_n_updates_run;
51042
51043 -atomic_t fscache_n_relinquishes;
51044 -atomic_t fscache_n_relinquishes_null;
51045 -atomic_t fscache_n_relinquishes_waitcrt;
51046 -atomic_t fscache_n_relinquishes_retire;
51047 +atomic_unchecked_t fscache_n_relinquishes;
51048 +atomic_unchecked_t fscache_n_relinquishes_null;
51049 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
51050 +atomic_unchecked_t fscache_n_relinquishes_retire;
51051
51052 -atomic_t fscache_n_cookie_index;
51053 -atomic_t fscache_n_cookie_data;
51054 -atomic_t fscache_n_cookie_special;
51055 +atomic_unchecked_t fscache_n_cookie_index;
51056 +atomic_unchecked_t fscache_n_cookie_data;
51057 +atomic_unchecked_t fscache_n_cookie_special;
51058
51059 -atomic_t fscache_n_object_alloc;
51060 -atomic_t fscache_n_object_no_alloc;
51061 -atomic_t fscache_n_object_lookups;
51062 -atomic_t fscache_n_object_lookups_negative;
51063 -atomic_t fscache_n_object_lookups_positive;
51064 -atomic_t fscache_n_object_lookups_timed_out;
51065 -atomic_t fscache_n_object_created;
51066 -atomic_t fscache_n_object_avail;
51067 -atomic_t fscache_n_object_dead;
51068 +atomic_unchecked_t fscache_n_object_alloc;
51069 +atomic_unchecked_t fscache_n_object_no_alloc;
51070 +atomic_unchecked_t fscache_n_object_lookups;
51071 +atomic_unchecked_t fscache_n_object_lookups_negative;
51072 +atomic_unchecked_t fscache_n_object_lookups_positive;
51073 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
51074 +atomic_unchecked_t fscache_n_object_created;
51075 +atomic_unchecked_t fscache_n_object_avail;
51076 +atomic_unchecked_t fscache_n_object_dead;
51077
51078 -atomic_t fscache_n_checkaux_none;
51079 -atomic_t fscache_n_checkaux_okay;
51080 -atomic_t fscache_n_checkaux_update;
51081 -atomic_t fscache_n_checkaux_obsolete;
51082 +atomic_unchecked_t fscache_n_checkaux_none;
51083 +atomic_unchecked_t fscache_n_checkaux_okay;
51084 +atomic_unchecked_t fscache_n_checkaux_update;
51085 +atomic_unchecked_t fscache_n_checkaux_obsolete;
51086
51087 atomic_t fscache_n_cop_alloc_object;
51088 atomic_t fscache_n_cop_lookup_object;
51089 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
51090 seq_puts(m, "FS-Cache statistics\n");
51091
51092 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
51093 - atomic_read(&fscache_n_cookie_index),
51094 - atomic_read(&fscache_n_cookie_data),
51095 - atomic_read(&fscache_n_cookie_special));
51096 + atomic_read_unchecked(&fscache_n_cookie_index),
51097 + atomic_read_unchecked(&fscache_n_cookie_data),
51098 + atomic_read_unchecked(&fscache_n_cookie_special));
51099
51100 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
51101 - atomic_read(&fscache_n_object_alloc),
51102 - atomic_read(&fscache_n_object_no_alloc),
51103 - atomic_read(&fscache_n_object_avail),
51104 - atomic_read(&fscache_n_object_dead));
51105 + atomic_read_unchecked(&fscache_n_object_alloc),
51106 + atomic_read_unchecked(&fscache_n_object_no_alloc),
51107 + atomic_read_unchecked(&fscache_n_object_avail),
51108 + atomic_read_unchecked(&fscache_n_object_dead));
51109 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
51110 - atomic_read(&fscache_n_checkaux_none),
51111 - atomic_read(&fscache_n_checkaux_okay),
51112 - atomic_read(&fscache_n_checkaux_update),
51113 - atomic_read(&fscache_n_checkaux_obsolete));
51114 + atomic_read_unchecked(&fscache_n_checkaux_none),
51115 + atomic_read_unchecked(&fscache_n_checkaux_okay),
51116 + atomic_read_unchecked(&fscache_n_checkaux_update),
51117 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
51118
51119 seq_printf(m, "Pages : mrk=%u unc=%u\n",
51120 - atomic_read(&fscache_n_marks),
51121 - atomic_read(&fscache_n_uncaches));
51122 + atomic_read_unchecked(&fscache_n_marks),
51123 + atomic_read_unchecked(&fscache_n_uncaches));
51124
51125 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
51126 " oom=%u\n",
51127 - atomic_read(&fscache_n_acquires),
51128 - atomic_read(&fscache_n_acquires_null),
51129 - atomic_read(&fscache_n_acquires_no_cache),
51130 - atomic_read(&fscache_n_acquires_ok),
51131 - atomic_read(&fscache_n_acquires_nobufs),
51132 - atomic_read(&fscache_n_acquires_oom));
51133 + atomic_read_unchecked(&fscache_n_acquires),
51134 + atomic_read_unchecked(&fscache_n_acquires_null),
51135 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
51136 + atomic_read_unchecked(&fscache_n_acquires_ok),
51137 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
51138 + atomic_read_unchecked(&fscache_n_acquires_oom));
51139
51140 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
51141 - atomic_read(&fscache_n_object_lookups),
51142 - atomic_read(&fscache_n_object_lookups_negative),
51143 - atomic_read(&fscache_n_object_lookups_positive),
51144 - atomic_read(&fscache_n_object_lookups_timed_out),
51145 - atomic_read(&fscache_n_object_created));
51146 + atomic_read_unchecked(&fscache_n_object_lookups),
51147 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
51148 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
51149 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
51150 + atomic_read_unchecked(&fscache_n_object_created));
51151
51152 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
51153 - atomic_read(&fscache_n_updates),
51154 - atomic_read(&fscache_n_updates_null),
51155 - atomic_read(&fscache_n_updates_run));
51156 + atomic_read_unchecked(&fscache_n_updates),
51157 + atomic_read_unchecked(&fscache_n_updates_null),
51158 + atomic_read_unchecked(&fscache_n_updates_run));
51159
51160 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
51161 - atomic_read(&fscache_n_relinquishes),
51162 - atomic_read(&fscache_n_relinquishes_null),
51163 - atomic_read(&fscache_n_relinquishes_waitcrt),
51164 - atomic_read(&fscache_n_relinquishes_retire));
51165 + atomic_read_unchecked(&fscache_n_relinquishes),
51166 + atomic_read_unchecked(&fscache_n_relinquishes_null),
51167 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
51168 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
51169
51170 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
51171 - atomic_read(&fscache_n_attr_changed),
51172 - atomic_read(&fscache_n_attr_changed_ok),
51173 - atomic_read(&fscache_n_attr_changed_nobufs),
51174 - atomic_read(&fscache_n_attr_changed_nomem),
51175 - atomic_read(&fscache_n_attr_changed_calls));
51176 + atomic_read_unchecked(&fscache_n_attr_changed),
51177 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
51178 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
51179 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
51180 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
51181
51182 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
51183 - atomic_read(&fscache_n_allocs),
51184 - atomic_read(&fscache_n_allocs_ok),
51185 - atomic_read(&fscache_n_allocs_wait),
51186 - atomic_read(&fscache_n_allocs_nobufs),
51187 - atomic_read(&fscache_n_allocs_intr));
51188 + atomic_read_unchecked(&fscache_n_allocs),
51189 + atomic_read_unchecked(&fscache_n_allocs_ok),
51190 + atomic_read_unchecked(&fscache_n_allocs_wait),
51191 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
51192 + atomic_read_unchecked(&fscache_n_allocs_intr));
51193 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
51194 - atomic_read(&fscache_n_alloc_ops),
51195 - atomic_read(&fscache_n_alloc_op_waits),
51196 - atomic_read(&fscache_n_allocs_object_dead));
51197 + atomic_read_unchecked(&fscache_n_alloc_ops),
51198 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
51199 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
51200
51201 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
51202 " int=%u oom=%u\n",
51203 - atomic_read(&fscache_n_retrievals),
51204 - atomic_read(&fscache_n_retrievals_ok),
51205 - atomic_read(&fscache_n_retrievals_wait),
51206 - atomic_read(&fscache_n_retrievals_nodata),
51207 - atomic_read(&fscache_n_retrievals_nobufs),
51208 - atomic_read(&fscache_n_retrievals_intr),
51209 - atomic_read(&fscache_n_retrievals_nomem));
51210 + atomic_read_unchecked(&fscache_n_retrievals),
51211 + atomic_read_unchecked(&fscache_n_retrievals_ok),
51212 + atomic_read_unchecked(&fscache_n_retrievals_wait),
51213 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
51214 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
51215 + atomic_read_unchecked(&fscache_n_retrievals_intr),
51216 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
51217 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
51218 - atomic_read(&fscache_n_retrieval_ops),
51219 - atomic_read(&fscache_n_retrieval_op_waits),
51220 - atomic_read(&fscache_n_retrievals_object_dead));
51221 + atomic_read_unchecked(&fscache_n_retrieval_ops),
51222 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
51223 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
51224
51225 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
51226 - atomic_read(&fscache_n_stores),
51227 - atomic_read(&fscache_n_stores_ok),
51228 - atomic_read(&fscache_n_stores_again),
51229 - atomic_read(&fscache_n_stores_nobufs),
51230 - atomic_read(&fscache_n_stores_oom));
51231 + atomic_read_unchecked(&fscache_n_stores),
51232 + atomic_read_unchecked(&fscache_n_stores_ok),
51233 + atomic_read_unchecked(&fscache_n_stores_again),
51234 + atomic_read_unchecked(&fscache_n_stores_nobufs),
51235 + atomic_read_unchecked(&fscache_n_stores_oom));
51236 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
51237 - atomic_read(&fscache_n_store_ops),
51238 - atomic_read(&fscache_n_store_calls),
51239 - atomic_read(&fscache_n_store_pages),
51240 - atomic_read(&fscache_n_store_radix_deletes),
51241 - atomic_read(&fscache_n_store_pages_over_limit));
51242 + atomic_read_unchecked(&fscache_n_store_ops),
51243 + atomic_read_unchecked(&fscache_n_store_calls),
51244 + atomic_read_unchecked(&fscache_n_store_pages),
51245 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
51246 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
51247
51248 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
51249 - atomic_read(&fscache_n_store_vmscan_not_storing),
51250 - atomic_read(&fscache_n_store_vmscan_gone),
51251 - atomic_read(&fscache_n_store_vmscan_busy),
51252 - atomic_read(&fscache_n_store_vmscan_cancelled));
51253 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
51254 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
51255 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
51256 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
51257
51258 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
51259 - atomic_read(&fscache_n_op_pend),
51260 - atomic_read(&fscache_n_op_run),
51261 - atomic_read(&fscache_n_op_enqueue),
51262 - atomic_read(&fscache_n_op_cancelled),
51263 - atomic_read(&fscache_n_op_rejected));
51264 + atomic_read_unchecked(&fscache_n_op_pend),
51265 + atomic_read_unchecked(&fscache_n_op_run),
51266 + atomic_read_unchecked(&fscache_n_op_enqueue),
51267 + atomic_read_unchecked(&fscache_n_op_cancelled),
51268 + atomic_read_unchecked(&fscache_n_op_rejected));
51269 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
51270 - atomic_read(&fscache_n_op_deferred_release),
51271 - atomic_read(&fscache_n_op_release),
51272 - atomic_read(&fscache_n_op_gc));
51273 + atomic_read_unchecked(&fscache_n_op_deferred_release),
51274 + atomic_read_unchecked(&fscache_n_op_release),
51275 + atomic_read_unchecked(&fscache_n_op_gc));
51276
51277 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
51278 atomic_read(&fscache_n_cop_alloc_object),
51279 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
51280 index de792dc..448b532 100644
51281 --- a/fs/fuse/cuse.c
51282 +++ b/fs/fuse/cuse.c
51283 @@ -576,10 +576,12 @@ static int __init cuse_init(void)
51284 INIT_LIST_HEAD(&cuse_conntbl[i]);
51285
51286 /* inherit and extend fuse_dev_operations */
51287 - cuse_channel_fops = fuse_dev_operations;
51288 - cuse_channel_fops.owner = THIS_MODULE;
51289 - cuse_channel_fops.open = cuse_channel_open;
51290 - cuse_channel_fops.release = cuse_channel_release;
51291 + pax_open_kernel();
51292 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
51293 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
51294 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
51295 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
51296 + pax_close_kernel();
51297
51298 cuse_class = class_create(THIS_MODULE, "cuse");
51299 if (IS_ERR(cuse_class))
51300 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
51301 index 1facb39..7f48557 100644
51302 --- a/fs/fuse/dev.c
51303 +++ b/fs/fuse/dev.c
51304 @@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51305 {
51306 struct fuse_notify_inval_entry_out outarg;
51307 int err = -EINVAL;
51308 - char buf[FUSE_NAME_MAX+1];
51309 + char *buf = NULL;
51310 struct qstr name;
51311
51312 if (size < sizeof(outarg))
51313 @@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51314 if (outarg.namelen > FUSE_NAME_MAX)
51315 goto err;
51316
51317 + err = -ENOMEM;
51318 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
51319 + if (!buf)
51320 + goto err;
51321 +
51322 err = -EINVAL;
51323 if (size != sizeof(outarg) + outarg.namelen + 1)
51324 goto err;
51325 @@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51326
51327 down_read(&fc->killsb);
51328 err = -ENOENT;
51329 - if (!fc->sb)
51330 - goto err_unlock;
51331 -
51332 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
51333 -
51334 -err_unlock:
51335 + if (fc->sb)
51336 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
51337 up_read(&fc->killsb);
51338 + kfree(buf);
51339 return err;
51340
51341 err:
51342 fuse_copy_finish(cs);
51343 + kfree(buf);
51344 return err;
51345 }
51346
51347 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
51348 index 4787ae6..73efff7 100644
51349 --- a/fs/fuse/dir.c
51350 +++ b/fs/fuse/dir.c
51351 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
51352 return link;
51353 }
51354
51355 -static void free_link(char *link)
51356 +static void free_link(const char *link)
51357 {
51358 if (!IS_ERR(link))
51359 free_page((unsigned long) link);
51360 diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
51361 index 247436c..e650ccb 100644
51362 --- a/fs/gfs2/ops_inode.c
51363 +++ b/fs/gfs2/ops_inode.c
51364 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
51365 unsigned int x;
51366 int error;
51367
51368 + pax_track_stack();
51369 +
51370 if (ndentry->d_inode) {
51371 nip = GFS2_I(ndentry->d_inode);
51372 if (ip == nip)
51373 diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
51374 index 4463297..4fed53b 100644
51375 --- a/fs/gfs2/sys.c
51376 +++ b/fs/gfs2/sys.c
51377 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
51378 return a->store ? a->store(sdp, buf, len) : len;
51379 }
51380
51381 -static struct sysfs_ops gfs2_attr_ops = {
51382 +static const struct sysfs_ops gfs2_attr_ops = {
51383 .show = gfs2_attr_show,
51384 .store = gfs2_attr_store,
51385 };
51386 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
51387 return 0;
51388 }
51389
51390 -static struct kset_uevent_ops gfs2_uevent_ops = {
51391 +static const struct kset_uevent_ops gfs2_uevent_ops = {
51392 .uevent = gfs2_uevent,
51393 };
51394
51395 diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
51396 index f6874ac..7cd98a8 100644
51397 --- a/fs/hfsplus/catalog.c
51398 +++ b/fs/hfsplus/catalog.c
51399 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
51400 int err;
51401 u16 type;
51402
51403 + pax_track_stack();
51404 +
51405 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
51406 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
51407 if (err)
51408 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
51409 int entry_size;
51410 int err;
51411
51412 + pax_track_stack();
51413 +
51414 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
51415 sb = dir->i_sb;
51416 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
51417 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
51418 int entry_size, type;
51419 int err = 0;
51420
51421 + pax_track_stack();
51422 +
51423 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
51424 dst_dir->i_ino, dst_name->name);
51425 sb = src_dir->i_sb;
51426 diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
51427 index 5f40236..dac3421 100644
51428 --- a/fs/hfsplus/dir.c
51429 +++ b/fs/hfsplus/dir.c
51430 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
51431 struct hfsplus_readdir_data *rd;
51432 u16 type;
51433
51434 + pax_track_stack();
51435 +
51436 if (filp->f_pos >= inode->i_size)
51437 return 0;
51438
51439 diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
51440 index 1bcf597..905a251 100644
51441 --- a/fs/hfsplus/inode.c
51442 +++ b/fs/hfsplus/inode.c
51443 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
51444 int res = 0;
51445 u16 type;
51446
51447 + pax_track_stack();
51448 +
51449 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
51450
51451 HFSPLUS_I(inode).dev = 0;
51452 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
51453 struct hfs_find_data fd;
51454 hfsplus_cat_entry entry;
51455
51456 + pax_track_stack();
51457 +
51458 if (HFSPLUS_IS_RSRC(inode))
51459 main_inode = HFSPLUS_I(inode).rsrc_inode;
51460
51461 diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
51462 index f457d2c..7ef4ad5 100644
51463 --- a/fs/hfsplus/ioctl.c
51464 +++ b/fs/hfsplus/ioctl.c
51465 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
51466 struct hfsplus_cat_file *file;
51467 int res;
51468
51469 + pax_track_stack();
51470 +
51471 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
51472 return -EOPNOTSUPP;
51473
51474 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
51475 struct hfsplus_cat_file *file;
51476 ssize_t res = 0;
51477
51478 + pax_track_stack();
51479 +
51480 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
51481 return -EOPNOTSUPP;
51482
51483 diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
51484 index 43022f3..7298079 100644
51485 --- a/fs/hfsplus/super.c
51486 +++ b/fs/hfsplus/super.c
51487 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
51488 struct nls_table *nls = NULL;
51489 int err = -EINVAL;
51490
51491 + pax_track_stack();
51492 +
51493 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
51494 if (!sbi)
51495 return -ENOMEM;
51496 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
51497 index 87a1258..5694d91 100644
51498 --- a/fs/hugetlbfs/inode.c
51499 +++ b/fs/hugetlbfs/inode.c
51500 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
51501 .kill_sb = kill_litter_super,
51502 };
51503
51504 -static struct vfsmount *hugetlbfs_vfsmount;
51505 +struct vfsmount *hugetlbfs_vfsmount;
51506
51507 static int can_do_hugetlb_shm(void)
51508 {
51509 diff --git a/fs/ioctl.c b/fs/ioctl.c
51510 index 6c75110..19d2c3c 100644
51511 --- a/fs/ioctl.c
51512 +++ b/fs/ioctl.c
51513 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
51514 u64 phys, u64 len, u32 flags)
51515 {
51516 struct fiemap_extent extent;
51517 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
51518 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
51519
51520 /* only count the extents */
51521 if (fieinfo->fi_extents_max == 0) {
51522 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51523
51524 fieinfo.fi_flags = fiemap.fm_flags;
51525 fieinfo.fi_extents_max = fiemap.fm_extent_count;
51526 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
51527 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
51528
51529 if (fiemap.fm_extent_count != 0 &&
51530 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
51531 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51532 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
51533 fiemap.fm_flags = fieinfo.fi_flags;
51534 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
51535 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
51536 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
51537 error = -EFAULT;
51538
51539 return error;
51540 diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
51541 index b0435dd..81ee0be 100644
51542 --- a/fs/jbd/checkpoint.c
51543 +++ b/fs/jbd/checkpoint.c
51544 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
51545 tid_t this_tid;
51546 int result;
51547
51548 + pax_track_stack();
51549 +
51550 jbd_debug(1, "Start checkpoint\n");
51551
51552 /*
51553 diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
51554 index 546d153..736896c 100644
51555 --- a/fs/jffs2/compr_rtime.c
51556 +++ b/fs/jffs2/compr_rtime.c
51557 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
51558 int outpos = 0;
51559 int pos=0;
51560
51561 + pax_track_stack();
51562 +
51563 memset(positions,0,sizeof(positions));
51564
51565 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
51566 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
51567 int outpos = 0;
51568 int pos=0;
51569
51570 + pax_track_stack();
51571 +
51572 memset(positions,0,sizeof(positions));
51573
51574 while (outpos<destlen) {
51575 diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
51576 index 170d289..3254b98 100644
51577 --- a/fs/jffs2/compr_rubin.c
51578 +++ b/fs/jffs2/compr_rubin.c
51579 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
51580 int ret;
51581 uint32_t mysrclen, mydstlen;
51582
51583 + pax_track_stack();
51584 +
51585 mysrclen = *sourcelen;
51586 mydstlen = *dstlen - 8;
51587
51588 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
51589 index b47679b..00d65d3 100644
51590 --- a/fs/jffs2/erase.c
51591 +++ b/fs/jffs2/erase.c
51592 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
51593 struct jffs2_unknown_node marker = {
51594 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
51595 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51596 - .totlen = cpu_to_je32(c->cleanmarker_size)
51597 + .totlen = cpu_to_je32(c->cleanmarker_size),
51598 + .hdr_crc = cpu_to_je32(0)
51599 };
51600
51601 jffs2_prealloc_raw_node_refs(c, jeb, 1);
51602 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
51603 index 5ef7bac..4fd1e3c 100644
51604 --- a/fs/jffs2/wbuf.c
51605 +++ b/fs/jffs2/wbuf.c
51606 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
51607 {
51608 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
51609 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51610 - .totlen = constant_cpu_to_je32(8)
51611 + .totlen = constant_cpu_to_je32(8),
51612 + .hdr_crc = constant_cpu_to_je32(0)
51613 };
51614
51615 /*
51616 diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
51617 index 082e844..52012a1 100644
51618 --- a/fs/jffs2/xattr.c
51619 +++ b/fs/jffs2/xattr.c
51620 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
51621
51622 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
51623
51624 + pax_track_stack();
51625 +
51626 /* Phase.1 : Merge same xref */
51627 for (i=0; i < XREF_TMPHASH_SIZE; i++)
51628 xref_tmphash[i] = NULL;
51629 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
51630 index 2234c73..f6e6e6b 100644
51631 --- a/fs/jfs/super.c
51632 +++ b/fs/jfs/super.c
51633 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
51634
51635 jfs_inode_cachep =
51636 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
51637 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
51638 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
51639 init_once);
51640 if (jfs_inode_cachep == NULL)
51641 return -ENOMEM;
51642 diff --git a/fs/libfs.c b/fs/libfs.c
51643 index ba36e93..3153fce 100644
51644 --- a/fs/libfs.c
51645 +++ b/fs/libfs.c
51646 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
51647
51648 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
51649 struct dentry *next;
51650 + char d_name[sizeof(next->d_iname)];
51651 + const unsigned char *name;
51652 +
51653 next = list_entry(p, struct dentry, d_u.d_child);
51654 if (d_unhashed(next) || !next->d_inode)
51655 continue;
51656
51657 spin_unlock(&dcache_lock);
51658 - if (filldir(dirent, next->d_name.name,
51659 + name = next->d_name.name;
51660 + if (name == next->d_iname) {
51661 + memcpy(d_name, name, next->d_name.len);
51662 + name = d_name;
51663 + }
51664 + if (filldir(dirent, name,
51665 next->d_name.len, filp->f_pos,
51666 next->d_inode->i_ino,
51667 dt_type(next->d_inode)) < 0)
51668 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
51669 index c325a83..d15b07b 100644
51670 --- a/fs/lockd/clntproc.c
51671 +++ b/fs/lockd/clntproc.c
51672 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
51673 /*
51674 * Cookie counter for NLM requests
51675 */
51676 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
51677 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
51678
51679 void nlmclnt_next_cookie(struct nlm_cookie *c)
51680 {
51681 - u32 cookie = atomic_inc_return(&nlm_cookie);
51682 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
51683
51684 memcpy(c->data, &cookie, 4);
51685 c->len=4;
51686 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
51687 struct nlm_rqst reqst, *req;
51688 int status;
51689
51690 + pax_track_stack();
51691 +
51692 req = &reqst;
51693 memset(req, 0, sizeof(*req));
51694 locks_init_lock(&req->a_args.lock.fl);
51695 diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
51696 index 1a54ae1..6a16c27 100644
51697 --- a/fs/lockd/svc.c
51698 +++ b/fs/lockd/svc.c
51699 @@ -43,7 +43,7 @@
51700
51701 static struct svc_program nlmsvc_program;
51702
51703 -struct nlmsvc_binding * nlmsvc_ops;
51704 +const struct nlmsvc_binding * nlmsvc_ops;
51705 EXPORT_SYMBOL_GPL(nlmsvc_ops);
51706
51707 static DEFINE_MUTEX(nlmsvc_mutex);
51708 diff --git a/fs/locks.c b/fs/locks.c
51709 index a8794f2..4041e55 100644
51710 --- a/fs/locks.c
51711 +++ b/fs/locks.c
51712 @@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
51713
51714 static struct kmem_cache *filelock_cache __read_mostly;
51715
51716 +static void locks_init_lock_always(struct file_lock *fl)
51717 +{
51718 + fl->fl_next = NULL;
51719 + fl->fl_fasync = NULL;
51720 + fl->fl_owner = NULL;
51721 + fl->fl_pid = 0;
51722 + fl->fl_nspid = NULL;
51723 + fl->fl_file = NULL;
51724 + fl->fl_flags = 0;
51725 + fl->fl_type = 0;
51726 + fl->fl_start = fl->fl_end = 0;
51727 +}
51728 +
51729 /* Allocate an empty lock structure. */
51730 static struct file_lock *locks_alloc_lock(void)
51731 {
51732 - return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51733 + struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51734 +
51735 + if (fl)
51736 + locks_init_lock_always(fl);
51737 +
51738 + return fl;
51739 }
51740
51741 void locks_release_private(struct file_lock *fl)
51742 @@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
51743 INIT_LIST_HEAD(&fl->fl_link);
51744 INIT_LIST_HEAD(&fl->fl_block);
51745 init_waitqueue_head(&fl->fl_wait);
51746 - fl->fl_next = NULL;
51747 - fl->fl_fasync = NULL;
51748 - fl->fl_owner = NULL;
51749 - fl->fl_pid = 0;
51750 - fl->fl_nspid = NULL;
51751 - fl->fl_file = NULL;
51752 - fl->fl_flags = 0;
51753 - fl->fl_type = 0;
51754 - fl->fl_start = fl->fl_end = 0;
51755 fl->fl_ops = NULL;
51756 fl->fl_lmops = NULL;
51757 + locks_init_lock_always(fl);
51758 }
51759
51760 EXPORT_SYMBOL(locks_init_lock);
51761 @@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
51762 return;
51763
51764 if (filp->f_op && filp->f_op->flock) {
51765 - struct file_lock fl = {
51766 + struct file_lock flock = {
51767 .fl_pid = current->tgid,
51768 .fl_file = filp,
51769 .fl_flags = FL_FLOCK,
51770 .fl_type = F_UNLCK,
51771 .fl_end = OFFSET_MAX,
51772 };
51773 - filp->f_op->flock(filp, F_SETLKW, &fl);
51774 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
51775 - fl.fl_ops->fl_release_private(&fl);
51776 + filp->f_op->flock(filp, F_SETLKW, &flock);
51777 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
51778 + flock.fl_ops->fl_release_private(&flock);
51779 }
51780
51781 lock_kernel();
51782 diff --git a/fs/mbcache.c b/fs/mbcache.c
51783 index ec88ff3..b843a82 100644
51784 --- a/fs/mbcache.c
51785 +++ b/fs/mbcache.c
51786 @@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
51787 if (!cache)
51788 goto fail;
51789 cache->c_name = name;
51790 - cache->c_op.free = NULL;
51791 + *(void **)&cache->c_op.free = NULL;
51792 if (cache_op)
51793 - cache->c_op.free = cache_op->free;
51794 + *(void **)&cache->c_op.free = cache_op->free;
51795 atomic_set(&cache->c_entry_count, 0);
51796 cache->c_bucket_bits = bucket_bits;
51797 #ifdef MB_CACHE_INDEXES_COUNT
51798 diff --git a/fs/namei.c b/fs/namei.c
51799 index b0afbd4..8d065a1 100644
51800 --- a/fs/namei.c
51801 +++ b/fs/namei.c
51802 @@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
51803 return ret;
51804
51805 /*
51806 + * Searching includes executable on directories, else just read.
51807 + */
51808 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51809 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51810 + if (capable(CAP_DAC_READ_SEARCH))
51811 + return 0;
51812 +
51813 + /*
51814 * Read/write DACs are always overridable.
51815 * Executable DACs are overridable if at least one exec bit is set.
51816 */
51817 @@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask,
51818 if (capable(CAP_DAC_OVERRIDE))
51819 return 0;
51820
51821 - /*
51822 - * Searching includes executable on directories, else just read.
51823 - */
51824 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51825 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51826 - if (capable(CAP_DAC_READ_SEARCH))
51827 - return 0;
51828 -
51829 return -EACCES;
51830 }
51831
51832 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
51833 if (!ret)
51834 goto ok;
51835
51836 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
51837 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
51838 + capable(CAP_DAC_OVERRIDE))
51839 goto ok;
51840
51841 return ret;
51842 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
51843 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
51844 error = PTR_ERR(cookie);
51845 if (!IS_ERR(cookie)) {
51846 - char *s = nd_get_link(nd);
51847 + const char *s = nd_get_link(nd);
51848 error = 0;
51849 if (s)
51850 error = __vfs_follow_link(nd, s);
51851 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
51852 err = security_inode_follow_link(path->dentry, nd);
51853 if (err)
51854 goto loop;
51855 +
51856 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
51857 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
51858 + err = -EACCES;
51859 + goto loop;
51860 + }
51861 +
51862 current->link_count++;
51863 current->total_link_count++;
51864 nd->depth++;
51865 @@ -1016,11 +1024,19 @@ return_reval:
51866 break;
51867 }
51868 return_base:
51869 + if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
51870 + !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
51871 + path_put(&nd->path);
51872 + return -ENOENT;
51873 + }
51874 return 0;
51875 out_dput:
51876 path_put_conditional(&next, nd);
51877 break;
51878 }
51879 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
51880 + err = -ENOENT;
51881 +
51882 path_put(&nd->path);
51883 return_err:
51884 return err;
51885 @@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
51886 int retval = path_init(dfd, name, flags, nd);
51887 if (!retval)
51888 retval = path_walk(name, nd);
51889 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
51890 - nd->path.dentry->d_inode))
51891 - audit_inode(name, nd->path.dentry);
51892 +
51893 + if (likely(!retval)) {
51894 + if (nd->path.dentry && nd->path.dentry->d_inode) {
51895 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
51896 + retval = -ENOENT;
51897 + if (!audit_dummy_context())
51898 + audit_inode(name, nd->path.dentry);
51899 + }
51900 + }
51901 if (nd->root.mnt) {
51902 path_put(&nd->root);
51903 nd->root.mnt = NULL;
51904 }
51905 +
51906 return retval;
51907 }
51908
51909 @@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
51910 if (error)
51911 goto err_out;
51912
51913 +
51914 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
51915 + error = -EPERM;
51916 + goto err_out;
51917 + }
51918 + if (gr_handle_rawio(inode)) {
51919 + error = -EPERM;
51920 + goto err_out;
51921 + }
51922 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
51923 + error = -EACCES;
51924 + goto err_out;
51925 + }
51926 +
51927 if (flag & O_TRUNC) {
51928 error = get_write_access(inode);
51929 if (error)
51930 @@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51931 {
51932 int error;
51933 struct dentry *dir = nd->path.dentry;
51934 + int acc_mode = ACC_MODE(flag);
51935 +
51936 + if (flag & O_TRUNC)
51937 + acc_mode |= MAY_WRITE;
51938 + if (flag & O_APPEND)
51939 + acc_mode |= MAY_APPEND;
51940 +
51941 + if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
51942 + error = -EACCES;
51943 + goto out_unlock;
51944 + }
51945
51946 if (!IS_POSIXACL(dir->d_inode))
51947 mode &= ~current_umask();
51948 @@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51949 if (error)
51950 goto out_unlock;
51951 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
51952 + if (!error)
51953 + gr_handle_create(path->dentry, nd->path.mnt);
51954 out_unlock:
51955 mutex_unlock(&dir->d_inode->i_mutex);
51956 dput(nd->path.dentry);
51957 @@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
51958 &nd, flag);
51959 if (error)
51960 return ERR_PTR(error);
51961 +
51962 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
51963 + error = -EPERM;
51964 + goto exit;
51965 + }
51966 +
51967 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
51968 + error = -EPERM;
51969 + goto exit;
51970 + }
51971 +
51972 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
51973 + error = -EACCES;
51974 + goto exit;
51975 + }
51976 +
51977 goto ok;
51978 }
51979
51980 @@ -1795,6 +1861,19 @@ do_last:
51981 /*
51982 * It already exists.
51983 */
51984 +
51985 + if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
51986 + error = -ENOENT;
51987 + goto exit_mutex_unlock;
51988 + }
51989 +
51990 + /* only check if O_CREAT is specified, all other checks need
51991 + to go into may_open */
51992 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
51993 + error = -EACCES;
51994 + goto exit_mutex_unlock;
51995 + }
51996 +
51997 mutex_unlock(&dir->d_inode->i_mutex);
51998 audit_inode(pathname, path.dentry);
51999
52000 @@ -1887,6 +1966,13 @@ do_link:
52001 error = security_inode_follow_link(path.dentry, &nd);
52002 if (error)
52003 goto exit_dput;
52004 +
52005 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
52006 + path.dentry, nd.path.mnt)) {
52007 + error = -EACCES;
52008 + goto exit_dput;
52009 + }
52010 +
52011 error = __do_follow_link(&path, &nd);
52012 if (error) {
52013 /* Does someone understand code flow here? Or it is only
52014 @@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
52015 }
52016 return dentry;
52017 eexist:
52018 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
52019 + dput(dentry);
52020 + return ERR_PTR(-ENOENT);
52021 + }
52022 dput(dentry);
52023 dentry = ERR_PTR(-EEXIST);
52024 fail:
52025 @@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
52026 error = may_mknod(mode);
52027 if (error)
52028 goto out_dput;
52029 +
52030 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
52031 + error = -EPERM;
52032 + goto out_dput;
52033 + }
52034 +
52035 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
52036 + error = -EACCES;
52037 + goto out_dput;
52038 + }
52039 +
52040 error = mnt_want_write(nd.path.mnt);
52041 if (error)
52042 goto out_dput;
52043 @@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
52044 }
52045 out_drop_write:
52046 mnt_drop_write(nd.path.mnt);
52047 +
52048 + if (!error)
52049 + gr_handle_create(dentry, nd.path.mnt);
52050 out_dput:
52051 dput(dentry);
52052 out_unlock:
52053 @@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
52054 if (IS_ERR(dentry))
52055 goto out_unlock;
52056
52057 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
52058 + error = -EACCES;
52059 + goto out_dput;
52060 + }
52061 +
52062 if (!IS_POSIXACL(nd.path.dentry->d_inode))
52063 mode &= ~current_umask();
52064 error = mnt_want_write(nd.path.mnt);
52065 @@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
52066 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
52067 out_drop_write:
52068 mnt_drop_write(nd.path.mnt);
52069 +
52070 + if (!error)
52071 + gr_handle_create(dentry, nd.path.mnt);
52072 +
52073 out_dput:
52074 dput(dentry);
52075 out_unlock:
52076 @@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
52077 char * name;
52078 struct dentry *dentry;
52079 struct nameidata nd;
52080 + ino_t saved_ino = 0;
52081 + dev_t saved_dev = 0;
52082
52083 error = user_path_parent(dfd, pathname, &nd, &name);
52084 if (error)
52085 @@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
52086 error = PTR_ERR(dentry);
52087 if (IS_ERR(dentry))
52088 goto exit2;
52089 +
52090 + if (dentry->d_inode != NULL) {
52091 + saved_ino = dentry->d_inode->i_ino;
52092 + saved_dev = gr_get_dev_from_dentry(dentry);
52093 +
52094 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
52095 + error = -EACCES;
52096 + goto exit3;
52097 + }
52098 + }
52099 +
52100 error = mnt_want_write(nd.path.mnt);
52101 if (error)
52102 goto exit3;
52103 @@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
52104 if (error)
52105 goto exit4;
52106 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
52107 + if (!error && (saved_dev || saved_ino))
52108 + gr_handle_delete(saved_ino, saved_dev);
52109 exit4:
52110 mnt_drop_write(nd.path.mnt);
52111 exit3:
52112 @@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
52113 struct dentry *dentry;
52114 struct nameidata nd;
52115 struct inode *inode = NULL;
52116 + ino_t saved_ino = 0;
52117 + dev_t saved_dev = 0;
52118
52119 error = user_path_parent(dfd, pathname, &nd, &name);
52120 if (error)
52121 @@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
52122 if (nd.last.name[nd.last.len])
52123 goto slashes;
52124 inode = dentry->d_inode;
52125 - if (inode)
52126 + if (inode) {
52127 + if (inode->i_nlink <= 1) {
52128 + saved_ino = inode->i_ino;
52129 + saved_dev = gr_get_dev_from_dentry(dentry);
52130 + }
52131 +
52132 atomic_inc(&inode->i_count);
52133 +
52134 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
52135 + error = -EACCES;
52136 + goto exit2;
52137 + }
52138 + }
52139 error = mnt_want_write(nd.path.mnt);
52140 if (error)
52141 goto exit2;
52142 @@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
52143 if (error)
52144 goto exit3;
52145 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
52146 + if (!error && (saved_ino || saved_dev))
52147 + gr_handle_delete(saved_ino, saved_dev);
52148 exit3:
52149 mnt_drop_write(nd.path.mnt);
52150 exit2:
52151 @@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
52152 if (IS_ERR(dentry))
52153 goto out_unlock;
52154
52155 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
52156 + error = -EACCES;
52157 + goto out_dput;
52158 + }
52159 +
52160 error = mnt_want_write(nd.path.mnt);
52161 if (error)
52162 goto out_dput;
52163 @@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
52164 if (error)
52165 goto out_drop_write;
52166 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
52167 + if (!error)
52168 + gr_handle_create(dentry, nd.path.mnt);
52169 out_drop_write:
52170 mnt_drop_write(nd.path.mnt);
52171 out_dput:
52172 @@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
52173 error = PTR_ERR(new_dentry);
52174 if (IS_ERR(new_dentry))
52175 goto out_unlock;
52176 +
52177 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
52178 + old_path.dentry->d_inode,
52179 + old_path.dentry->d_inode->i_mode, to)) {
52180 + error = -EACCES;
52181 + goto out_dput;
52182 + }
52183 +
52184 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
52185 + old_path.dentry, old_path.mnt, to)) {
52186 + error = -EACCES;
52187 + goto out_dput;
52188 + }
52189 +
52190 error = mnt_want_write(nd.path.mnt);
52191 if (error)
52192 goto out_dput;
52193 @@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
52194 if (error)
52195 goto out_drop_write;
52196 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
52197 + if (!error)
52198 + gr_handle_create(new_dentry, nd.path.mnt);
52199 out_drop_write:
52200 mnt_drop_write(nd.path.mnt);
52201 out_dput:
52202 @@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52203 char *to;
52204 int error;
52205
52206 + pax_track_stack();
52207 +
52208 error = user_path_parent(olddfd, oldname, &oldnd, &from);
52209 if (error)
52210 goto exit;
52211 @@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52212 if (new_dentry == trap)
52213 goto exit5;
52214
52215 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
52216 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
52217 + to);
52218 + if (error)
52219 + goto exit5;
52220 +
52221 error = mnt_want_write(oldnd.path.mnt);
52222 if (error)
52223 goto exit5;
52224 @@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52225 goto exit6;
52226 error = vfs_rename(old_dir->d_inode, old_dentry,
52227 new_dir->d_inode, new_dentry);
52228 + if (!error)
52229 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
52230 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
52231 exit6:
52232 mnt_drop_write(oldnd.path.mnt);
52233 exit5:
52234 @@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
52235
52236 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
52237 {
52238 + char tmpbuf[64];
52239 + const char *newlink;
52240 int len;
52241
52242 len = PTR_ERR(link);
52243 @@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
52244 len = strlen(link);
52245 if (len > (unsigned) buflen)
52246 len = buflen;
52247 - if (copy_to_user(buffer, link, len))
52248 +
52249 + if (len < sizeof(tmpbuf)) {
52250 + memcpy(tmpbuf, link, len);
52251 + newlink = tmpbuf;
52252 + } else
52253 + newlink = link;
52254 +
52255 + if (copy_to_user(buffer, newlink, len))
52256 len = -EFAULT;
52257 out:
52258 return len;
52259 diff --git a/fs/namespace.c b/fs/namespace.c
52260 index 2beb0fb..11a95a5 100644
52261 --- a/fs/namespace.c
52262 +++ b/fs/namespace.c
52263 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
52264 if (!(sb->s_flags & MS_RDONLY))
52265 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
52266 up_write(&sb->s_umount);
52267 +
52268 + gr_log_remount(mnt->mnt_devname, retval);
52269 +
52270 return retval;
52271 }
52272
52273 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
52274 security_sb_umount_busy(mnt);
52275 up_write(&namespace_sem);
52276 release_mounts(&umount_list);
52277 +
52278 + gr_log_unmount(mnt->mnt_devname, retval);
52279 +
52280 return retval;
52281 }
52282
52283 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
52284 if (retval)
52285 goto dput_out;
52286
52287 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
52288 + retval = -EPERM;
52289 + goto dput_out;
52290 + }
52291 +
52292 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
52293 + retval = -EPERM;
52294 + goto dput_out;
52295 + }
52296 +
52297 if (flags & MS_REMOUNT)
52298 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
52299 data_page);
52300 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
52301 dev_name, data_page);
52302 dput_out:
52303 path_put(&path);
52304 +
52305 + gr_log_mount(dev_name, dir_name, retval);
52306 +
52307 return retval;
52308 }
52309
52310 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
52311 goto out1;
52312 }
52313
52314 + if (gr_handle_chroot_pivot()) {
52315 + error = -EPERM;
52316 + path_put(&old);
52317 + goto out1;
52318 + }
52319 +
52320 read_lock(&current->fs->lock);
52321 root = current->fs->root;
52322 path_get(&current->fs->root);
52323 diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
52324 index b8b5b30..2bd9ccb 100644
52325 --- a/fs/ncpfs/dir.c
52326 +++ b/fs/ncpfs/dir.c
52327 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
52328 int res, val = 0, len;
52329 __u8 __name[NCP_MAXPATHLEN + 1];
52330
52331 + pax_track_stack();
52332 +
52333 parent = dget_parent(dentry);
52334 dir = parent->d_inode;
52335
52336 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
52337 int error, res, len;
52338 __u8 __name[NCP_MAXPATHLEN + 1];
52339
52340 + pax_track_stack();
52341 +
52342 lock_kernel();
52343 error = -EIO;
52344 if (!ncp_conn_valid(server))
52345 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
52346 int error, result, len;
52347 int opmode;
52348 __u8 __name[NCP_MAXPATHLEN + 1];
52349 -
52350 +
52351 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
52352 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
52353
52354 + pax_track_stack();
52355 +
52356 error = -EIO;
52357 lock_kernel();
52358 if (!ncp_conn_valid(server))
52359 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
52360 int error, len;
52361 __u8 __name[NCP_MAXPATHLEN + 1];
52362
52363 + pax_track_stack();
52364 +
52365 DPRINTK("ncp_mkdir: making %s/%s\n",
52366 dentry->d_parent->d_name.name, dentry->d_name.name);
52367
52368 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
52369 if (!ncp_conn_valid(server))
52370 goto out;
52371
52372 + pax_track_stack();
52373 +
52374 ncp_age_dentry(server, dentry);
52375 len = sizeof(__name);
52376 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
52377 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
52378 int old_len, new_len;
52379 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
52380
52381 + pax_track_stack();
52382 +
52383 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
52384 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
52385 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
52386 diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
52387 index cf98da1..da890a9 100644
52388 --- a/fs/ncpfs/inode.c
52389 +++ b/fs/ncpfs/inode.c
52390 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
52391 #endif
52392 struct ncp_entry_info finfo;
52393
52394 + pax_track_stack();
52395 +
52396 data.wdog_pid = NULL;
52397 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
52398 if (!server)
52399 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
52400 index bfaef7b..e9d03ca 100644
52401 --- a/fs/nfs/inode.c
52402 +++ b/fs/nfs/inode.c
52403 @@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
52404 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
52405 nfsi->attrtimeo_timestamp = jiffies;
52406
52407 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
52408 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
52409 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
52410 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
52411 else
52412 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
52413 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
52414 }
52415
52416 -static atomic_long_t nfs_attr_generation_counter;
52417 +static atomic_long_unchecked_t nfs_attr_generation_counter;
52418
52419 static unsigned long nfs_read_attr_generation_counter(void)
52420 {
52421 - return atomic_long_read(&nfs_attr_generation_counter);
52422 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
52423 }
52424
52425 unsigned long nfs_inc_attr_generation_counter(void)
52426 {
52427 - return atomic_long_inc_return(&nfs_attr_generation_counter);
52428 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
52429 }
52430
52431 void nfs_fattr_init(struct nfs_fattr *fattr)
52432 diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
52433 index cc2f505..f6a236f 100644
52434 --- a/fs/nfsd/lockd.c
52435 +++ b/fs/nfsd/lockd.c
52436 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
52437 fput(filp);
52438 }
52439
52440 -static struct nlmsvc_binding nfsd_nlm_ops = {
52441 +static const struct nlmsvc_binding nfsd_nlm_ops = {
52442 .fopen = nlm_fopen, /* open file for locking */
52443 .fclose = nlm_fclose, /* close file */
52444 };
52445 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
52446 index cfc3391..dcc083a 100644
52447 --- a/fs/nfsd/nfs4state.c
52448 +++ b/fs/nfsd/nfs4state.c
52449 @@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
52450 unsigned int cmd;
52451 int err;
52452
52453 + pax_track_stack();
52454 +
52455 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
52456 (long long) lock->lk_offset,
52457 (long long) lock->lk_length);
52458 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
52459 index 4a82a96..0d5fb49 100644
52460 --- a/fs/nfsd/nfs4xdr.c
52461 +++ b/fs/nfsd/nfs4xdr.c
52462 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
52463 struct nfsd4_compoundres *resp = rqstp->rq_resp;
52464 u32 minorversion = resp->cstate.minorversion;
52465
52466 + pax_track_stack();
52467 +
52468 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
52469 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
52470 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
52471 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
52472 index 2e09588..596421d 100644
52473 --- a/fs/nfsd/vfs.c
52474 +++ b/fs/nfsd/vfs.c
52475 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
52476 } else {
52477 oldfs = get_fs();
52478 set_fs(KERNEL_DS);
52479 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
52480 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
52481 set_fs(oldfs);
52482 }
52483
52484 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
52485
52486 /* Write the data. */
52487 oldfs = get_fs(); set_fs(KERNEL_DS);
52488 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
52489 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
52490 set_fs(oldfs);
52491 if (host_err < 0)
52492 goto out_nfserr;
52493 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
52494 */
52495
52496 oldfs = get_fs(); set_fs(KERNEL_DS);
52497 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
52498 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
52499 set_fs(oldfs);
52500
52501 if (host_err < 0)
52502 diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
52503 index f6af760..d0adf34 100644
52504 --- a/fs/nilfs2/ioctl.c
52505 +++ b/fs/nilfs2/ioctl.c
52506 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52507 unsigned int cmd, void __user *argp)
52508 {
52509 struct nilfs_argv argv[5];
52510 - const static size_t argsz[5] = {
52511 + static const size_t argsz[5] = {
52512 sizeof(struct nilfs_vdesc),
52513 sizeof(struct nilfs_period),
52514 sizeof(__u64),
52515 @@ -522,6 +522,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52516 if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
52517 goto out_free;
52518
52519 + if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
52520 + goto out_free;
52521 +
52522 len = argv[n].v_size * argv[n].v_nmembs;
52523 base = (void __user *)(unsigned long)argv[n].v_base;
52524 if (len == 0) {
52525 diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
52526 index 7e54e52..9337248 100644
52527 --- a/fs/notify/dnotify/dnotify.c
52528 +++ b/fs/notify/dnotify/dnotify.c
52529 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
52530 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
52531 }
52532
52533 -static struct fsnotify_ops dnotify_fsnotify_ops = {
52534 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
52535 .handle_event = dnotify_handle_event,
52536 .should_send_event = dnotify_should_send_event,
52537 .free_group_priv = NULL,
52538 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
52539 index b8bf53b..c518688 100644
52540 --- a/fs/notify/notification.c
52541 +++ b/fs/notify/notification.c
52542 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
52543 * get set to 0 so it will never get 'freed'
52544 */
52545 static struct fsnotify_event q_overflow_event;
52546 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52547 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52548
52549 /**
52550 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
52551 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52552 */
52553 u32 fsnotify_get_cookie(void)
52554 {
52555 - return atomic_inc_return(&fsnotify_sync_cookie);
52556 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
52557 }
52558 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
52559
52560 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
52561 index 5a9e344..0f8cd28 100644
52562 --- a/fs/ntfs/dir.c
52563 +++ b/fs/ntfs/dir.c
52564 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
52565 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
52566 ~(s64)(ndir->itype.index.block_size - 1)));
52567 /* Bounds checks. */
52568 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52569 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52570 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
52571 "inode 0x%lx or driver bug.", vdir->i_ino);
52572 goto err_out;
52573 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
52574 index 663c0e3..b6868e9 100644
52575 --- a/fs/ntfs/file.c
52576 +++ b/fs/ntfs/file.c
52577 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
52578 #endif /* NTFS_RW */
52579 };
52580
52581 -const struct file_operations ntfs_empty_file_ops = {};
52582 +const struct file_operations ntfs_empty_file_ops __read_only;
52583
52584 -const struct inode_operations ntfs_empty_inode_ops = {};
52585 +const struct inode_operations ntfs_empty_inode_ops __read_only;
52586 diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
52587 index 1cd2934..880b5d2 100644
52588 --- a/fs/ocfs2/cluster/masklog.c
52589 +++ b/fs/ocfs2/cluster/masklog.c
52590 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
52591 return mlog_mask_store(mlog_attr->mask, buf, count);
52592 }
52593
52594 -static struct sysfs_ops mlog_attr_ops = {
52595 +static const struct sysfs_ops mlog_attr_ops = {
52596 .show = mlog_show,
52597 .store = mlog_store,
52598 };
52599 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
52600 index ac10f83..2cd2607 100644
52601 --- a/fs/ocfs2/localalloc.c
52602 +++ b/fs/ocfs2/localalloc.c
52603 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
52604 goto bail;
52605 }
52606
52607 - atomic_inc(&osb->alloc_stats.moves);
52608 + atomic_inc_unchecked(&osb->alloc_stats.moves);
52609
52610 status = 0;
52611 bail:
52612 diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
52613 index f010b22..9f9ed34 100644
52614 --- a/fs/ocfs2/namei.c
52615 +++ b/fs/ocfs2/namei.c
52616 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
52617 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
52618 struct ocfs2_dir_lookup_result target_insert = { NULL, };
52619
52620 + pax_track_stack();
52621 +
52622 /* At some point it might be nice to break this function up a
52623 * bit. */
52624
52625 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
52626 index d963d86..914cfbd 100644
52627 --- a/fs/ocfs2/ocfs2.h
52628 +++ b/fs/ocfs2/ocfs2.h
52629 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
52630
52631 struct ocfs2_alloc_stats
52632 {
52633 - atomic_t moves;
52634 - atomic_t local_data;
52635 - atomic_t bitmap_data;
52636 - atomic_t bg_allocs;
52637 - atomic_t bg_extends;
52638 + atomic_unchecked_t moves;
52639 + atomic_unchecked_t local_data;
52640 + atomic_unchecked_t bitmap_data;
52641 + atomic_unchecked_t bg_allocs;
52642 + atomic_unchecked_t bg_extends;
52643 };
52644
52645 enum ocfs2_local_alloc_state
52646 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
52647 index 79b5dac..d322952 100644
52648 --- a/fs/ocfs2/suballoc.c
52649 +++ b/fs/ocfs2/suballoc.c
52650 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
52651 mlog_errno(status);
52652 goto bail;
52653 }
52654 - atomic_inc(&osb->alloc_stats.bg_extends);
52655 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
52656
52657 /* You should never ask for this much metadata */
52658 BUG_ON(bits_wanted >
52659 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
52660 mlog_errno(status);
52661 goto bail;
52662 }
52663 - atomic_inc(&osb->alloc_stats.bg_allocs);
52664 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52665
52666 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
52667 ac->ac_bits_given += (*num_bits);
52668 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
52669 mlog_errno(status);
52670 goto bail;
52671 }
52672 - atomic_inc(&osb->alloc_stats.bg_allocs);
52673 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52674
52675 BUG_ON(num_bits != 1);
52676
52677 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52678 cluster_start,
52679 num_clusters);
52680 if (!status)
52681 - atomic_inc(&osb->alloc_stats.local_data);
52682 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
52683 } else {
52684 if (min_clusters > (osb->bitmap_cpg - 1)) {
52685 /* The only paths asking for contiguousness
52686 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52687 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
52688 bg_blkno,
52689 bg_bit_off);
52690 - atomic_inc(&osb->alloc_stats.bitmap_data);
52691 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
52692 }
52693 }
52694 if (status < 0) {
52695 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
52696 index 9f55be4..a3f8048 100644
52697 --- a/fs/ocfs2/super.c
52698 +++ b/fs/ocfs2/super.c
52699 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
52700 "%10s => GlobalAllocs: %d LocalAllocs: %d "
52701 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
52702 "Stats",
52703 - atomic_read(&osb->alloc_stats.bitmap_data),
52704 - atomic_read(&osb->alloc_stats.local_data),
52705 - atomic_read(&osb->alloc_stats.bg_allocs),
52706 - atomic_read(&osb->alloc_stats.moves),
52707 - atomic_read(&osb->alloc_stats.bg_extends));
52708 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
52709 + atomic_read_unchecked(&osb->alloc_stats.local_data),
52710 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
52711 + atomic_read_unchecked(&osb->alloc_stats.moves),
52712 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
52713
52714 out += snprintf(buf + out, len - out,
52715 "%10s => State: %u Descriptor: %llu Size: %u bits "
52716 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
52717 spin_lock_init(&osb->osb_xattr_lock);
52718 ocfs2_init_inode_steal_slot(osb);
52719
52720 - atomic_set(&osb->alloc_stats.moves, 0);
52721 - atomic_set(&osb->alloc_stats.local_data, 0);
52722 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
52723 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
52724 - atomic_set(&osb->alloc_stats.bg_extends, 0);
52725 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
52726 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
52727 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
52728 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
52729 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
52730
52731 /* Copy the blockcheck stats from the superblock probe */
52732 osb->osb_ecc_stats = *stats;
52733 diff --git a/fs/open.c b/fs/open.c
52734 index 4f01e06..2a8057a 100644
52735 --- a/fs/open.c
52736 +++ b/fs/open.c
52737 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
52738 error = locks_verify_truncate(inode, NULL, length);
52739 if (!error)
52740 error = security_path_truncate(&path, length, 0);
52741 +
52742 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
52743 + error = -EACCES;
52744 +
52745 if (!error) {
52746 vfs_dq_init(inode);
52747 error = do_truncate(path.dentry, length, 0, NULL);
52748 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
52749 if (__mnt_is_readonly(path.mnt))
52750 res = -EROFS;
52751
52752 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
52753 + res = -EACCES;
52754 +
52755 out_path_release:
52756 path_put(&path);
52757 out:
52758 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
52759 if (error)
52760 goto dput_and_out;
52761
52762 + gr_log_chdir(path.dentry, path.mnt);
52763 +
52764 set_fs_pwd(current->fs, &path);
52765
52766 dput_and_out:
52767 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
52768 goto out_putf;
52769
52770 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
52771 +
52772 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
52773 + error = -EPERM;
52774 +
52775 + if (!error)
52776 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
52777 +
52778 if (!error)
52779 set_fs_pwd(current->fs, &file->f_path);
52780 out_putf:
52781 @@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
52782 if (!capable(CAP_SYS_CHROOT))
52783 goto dput_and_out;
52784
52785 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
52786 + goto dput_and_out;
52787 +
52788 set_fs_root(current->fs, &path);
52789 +
52790 + gr_handle_chroot_chdir(&path);
52791 +
52792 error = 0;
52793 dput_and_out:
52794 path_put(&path);
52795 @@ -596,66 +618,57 @@ out:
52796 return error;
52797 }
52798
52799 +static int chmod_common(struct path *path, umode_t mode)
52800 +{
52801 + struct inode *inode = path->dentry->d_inode;
52802 + struct iattr newattrs;
52803 + int error;
52804 +
52805 + error = mnt_want_write(path->mnt);
52806 + if (error)
52807 + return error;
52808 + mutex_lock(&inode->i_mutex);
52809 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
52810 + error = -EACCES;
52811 + goto out_unlock;
52812 + }
52813 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
52814 + error = -EPERM;
52815 + goto out_unlock;
52816 + }
52817 + newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52818 + newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52819 + error = notify_change(path->dentry, &newattrs);
52820 +out_unlock:
52821 + mutex_unlock(&inode->i_mutex);
52822 + mnt_drop_write(path->mnt);
52823 + return error;
52824 +}
52825 +
52826 SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
52827 {
52828 - struct inode * inode;
52829 - struct dentry * dentry;
52830 struct file * file;
52831 int err = -EBADF;
52832 - struct iattr newattrs;
52833
52834 file = fget(fd);
52835 - if (!file)
52836 - goto out;
52837 -
52838 - dentry = file->f_path.dentry;
52839 - inode = dentry->d_inode;
52840 -
52841 - audit_inode(NULL, dentry);
52842 -
52843 - err = mnt_want_write_file(file);
52844 - if (err)
52845 - goto out_putf;
52846 - mutex_lock(&inode->i_mutex);
52847 - if (mode == (mode_t) -1)
52848 - mode = inode->i_mode;
52849 - newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52850 - newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52851 - err = notify_change(dentry, &newattrs);
52852 - mutex_unlock(&inode->i_mutex);
52853 - mnt_drop_write(file->f_path.mnt);
52854 -out_putf:
52855 - fput(file);
52856 -out:
52857 + if (file) {
52858 + audit_inode(NULL, file->f_path.dentry);
52859 + err = chmod_common(&file->f_path, mode);
52860 + fput(file);
52861 + }
52862 return err;
52863 }
52864
52865 SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
52866 {
52867 struct path path;
52868 - struct inode *inode;
52869 int error;
52870 - struct iattr newattrs;
52871
52872 error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
52873 - if (error)
52874 - goto out;
52875 - inode = path.dentry->d_inode;
52876 -
52877 - error = mnt_want_write(path.mnt);
52878 - if (error)
52879 - goto dput_and_out;
52880 - mutex_lock(&inode->i_mutex);
52881 - if (mode == (mode_t) -1)
52882 - mode = inode->i_mode;
52883 - newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52884 - newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52885 - error = notify_change(path.dentry, &newattrs);
52886 - mutex_unlock(&inode->i_mutex);
52887 - mnt_drop_write(path.mnt);
52888 -dput_and_out:
52889 - path_put(&path);
52890 -out:
52891 + if (!error) {
52892 + error = chmod_common(&path, mode);
52893 + path_put(&path);
52894 + }
52895 return error;
52896 }
52897
52898 @@ -664,12 +677,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
52899 return sys_fchmodat(AT_FDCWD, filename, mode);
52900 }
52901
52902 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
52903 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
52904 {
52905 struct inode *inode = dentry->d_inode;
52906 int error;
52907 struct iattr newattrs;
52908
52909 + if (!gr_acl_handle_chown(dentry, mnt))
52910 + return -EACCES;
52911 +
52912 newattrs.ia_valid = ATTR_CTIME;
52913 if (user != (uid_t) -1) {
52914 newattrs.ia_valid |= ATTR_UID;
52915 @@ -700,7 +716,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
52916 error = mnt_want_write(path.mnt);
52917 if (error)
52918 goto out_release;
52919 - error = chown_common(path.dentry, user, group);
52920 + error = chown_common(path.dentry, user, group, path.mnt);
52921 mnt_drop_write(path.mnt);
52922 out_release:
52923 path_put(&path);
52924 @@ -725,7 +741,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
52925 error = mnt_want_write(path.mnt);
52926 if (error)
52927 goto out_release;
52928 - error = chown_common(path.dentry, user, group);
52929 + error = chown_common(path.dentry, user, group, path.mnt);
52930 mnt_drop_write(path.mnt);
52931 out_release:
52932 path_put(&path);
52933 @@ -744,7 +760,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
52934 error = mnt_want_write(path.mnt);
52935 if (error)
52936 goto out_release;
52937 - error = chown_common(path.dentry, user, group);
52938 + error = chown_common(path.dentry, user, group, path.mnt);
52939 mnt_drop_write(path.mnt);
52940 out_release:
52941 path_put(&path);
52942 @@ -767,7 +783,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
52943 goto out_fput;
52944 dentry = file->f_path.dentry;
52945 audit_inode(NULL, dentry);
52946 - error = chown_common(dentry, user, group);
52947 + error = chown_common(dentry, user, group, file->f_path.mnt);
52948 mnt_drop_write(file->f_path.mnt);
52949 out_fput:
52950 fput(file);
52951 @@ -1036,7 +1052,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
52952 if (!IS_ERR(tmp)) {
52953 fd = get_unused_fd_flags(flags);
52954 if (fd >= 0) {
52955 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52956 + struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52957 if (IS_ERR(f)) {
52958 put_unused_fd(fd);
52959 fd = PTR_ERR(f);
52960 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
52961 index 6ab70f4..f4103d1 100644
52962 --- a/fs/partitions/efi.c
52963 +++ b/fs/partitions/efi.c
52964 @@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
52965 if (!bdev || !gpt)
52966 return NULL;
52967
52968 + if (!le32_to_cpu(gpt->num_partition_entries))
52969 + return NULL;
52970 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
52971 + if (!pte)
52972 + return NULL;
52973 +
52974 count = le32_to_cpu(gpt->num_partition_entries) *
52975 le32_to_cpu(gpt->sizeof_partition_entry);
52976 - if (!count)
52977 - return NULL;
52978 - pte = kzalloc(count, GFP_KERNEL);
52979 - if (!pte)
52980 - return NULL;
52981 -
52982 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
52983 (u8 *) pte,
52984 count) < count) {
52985 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
52986 index dd6efdb..3babc6c 100644
52987 --- a/fs/partitions/ldm.c
52988 +++ b/fs/partitions/ldm.c
52989 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52990 ldm_error ("A VBLK claims to have %d parts.", num);
52991 return false;
52992 }
52993 +
52994 if (rec >= num) {
52995 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
52996 return false;
52997 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52998 goto found;
52999 }
53000
53001 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
53002 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
53003 if (!f) {
53004 ldm_crit ("Out of memory.");
53005 return false;
53006 diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
53007 index 5765198..7f8e9e0 100644
53008 --- a/fs/partitions/mac.c
53009 +++ b/fs/partitions/mac.c
53010 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
53011 return 0; /* not a MacOS disk */
53012 }
53013 blocks_in_map = be32_to_cpu(part->map_count);
53014 - if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
53015 - put_dev_sector(sect);
53016 - return 0;
53017 - }
53018 printk(" [mac]");
53019 + if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
53020 + put_dev_sector(sect);
53021 + return 0;
53022 + }
53023 for (slot = 1; slot <= blocks_in_map; ++slot) {
53024 int pos = slot * secsize;
53025 put_dev_sector(sect);
53026 diff --git a/fs/pipe.c b/fs/pipe.c
53027 index d0cc080..8a6f211 100644
53028 --- a/fs/pipe.c
53029 +++ b/fs/pipe.c
53030 @@ -401,9 +401,9 @@ redo:
53031 }
53032 if (bufs) /* More to do? */
53033 continue;
53034 - if (!pipe->writers)
53035 + if (!atomic_read(&pipe->writers))
53036 break;
53037 - if (!pipe->waiting_writers) {
53038 + if (!atomic_read(&pipe->waiting_writers)) {
53039 /* syscall merging: Usually we must not sleep
53040 * if O_NONBLOCK is set, or if we got some data.
53041 * But if a writer sleeps in kernel space, then
53042 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
53043 mutex_lock(&inode->i_mutex);
53044 pipe = inode->i_pipe;
53045
53046 - if (!pipe->readers) {
53047 + if (!atomic_read(&pipe->readers)) {
53048 send_sig(SIGPIPE, current, 0);
53049 ret = -EPIPE;
53050 goto out;
53051 @@ -511,7 +511,7 @@ redo1:
53052 for (;;) {
53053 int bufs;
53054
53055 - if (!pipe->readers) {
53056 + if (!atomic_read(&pipe->readers)) {
53057 send_sig(SIGPIPE, current, 0);
53058 if (!ret)
53059 ret = -EPIPE;
53060 @@ -597,9 +597,9 @@ redo2:
53061 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
53062 do_wakeup = 0;
53063 }
53064 - pipe->waiting_writers++;
53065 + atomic_inc(&pipe->waiting_writers);
53066 pipe_wait(pipe);
53067 - pipe->waiting_writers--;
53068 + atomic_dec(&pipe->waiting_writers);
53069 }
53070 out:
53071 mutex_unlock(&inode->i_mutex);
53072 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
53073 mask = 0;
53074 if (filp->f_mode & FMODE_READ) {
53075 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
53076 - if (!pipe->writers && filp->f_version != pipe->w_counter)
53077 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
53078 mask |= POLLHUP;
53079 }
53080
53081 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
53082 * Most Unices do not set POLLERR for FIFOs but on Linux they
53083 * behave exactly like pipes for poll().
53084 */
53085 - if (!pipe->readers)
53086 + if (!atomic_read(&pipe->readers))
53087 mask |= POLLERR;
53088 }
53089
53090 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
53091
53092 mutex_lock(&inode->i_mutex);
53093 pipe = inode->i_pipe;
53094 - pipe->readers -= decr;
53095 - pipe->writers -= decw;
53096 + atomic_sub(decr, &pipe->readers);
53097 + atomic_sub(decw, &pipe->writers);
53098
53099 - if (!pipe->readers && !pipe->writers) {
53100 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
53101 free_pipe_info(inode);
53102 } else {
53103 wake_up_interruptible_sync(&pipe->wait);
53104 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
53105
53106 if (inode->i_pipe) {
53107 ret = 0;
53108 - inode->i_pipe->readers++;
53109 + atomic_inc(&inode->i_pipe->readers);
53110 }
53111
53112 mutex_unlock(&inode->i_mutex);
53113 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
53114
53115 if (inode->i_pipe) {
53116 ret = 0;
53117 - inode->i_pipe->writers++;
53118 + atomic_inc(&inode->i_pipe->writers);
53119 }
53120
53121 mutex_unlock(&inode->i_mutex);
53122 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
53123 if (inode->i_pipe) {
53124 ret = 0;
53125 if (filp->f_mode & FMODE_READ)
53126 - inode->i_pipe->readers++;
53127 + atomic_inc(&inode->i_pipe->readers);
53128 if (filp->f_mode & FMODE_WRITE)
53129 - inode->i_pipe->writers++;
53130 + atomic_inc(&inode->i_pipe->writers);
53131 }
53132
53133 mutex_unlock(&inode->i_mutex);
53134 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
53135 inode->i_pipe = NULL;
53136 }
53137
53138 -static struct vfsmount *pipe_mnt __read_mostly;
53139 +struct vfsmount *pipe_mnt __read_mostly;
53140 static int pipefs_delete_dentry(struct dentry *dentry)
53141 {
53142 /*
53143 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
53144 goto fail_iput;
53145 inode->i_pipe = pipe;
53146
53147 - pipe->readers = pipe->writers = 1;
53148 + atomic_set(&pipe->readers, 1);
53149 + atomic_set(&pipe->writers, 1);
53150 inode->i_fop = &rdwr_pipefifo_fops;
53151
53152 /*
53153 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
53154 index 50f8f06..c5755df 100644
53155 --- a/fs/proc/Kconfig
53156 +++ b/fs/proc/Kconfig
53157 @@ -30,12 +30,12 @@ config PROC_FS
53158
53159 config PROC_KCORE
53160 bool "/proc/kcore support" if !ARM
53161 - depends on PROC_FS && MMU
53162 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
53163
53164 config PROC_VMCORE
53165 bool "/proc/vmcore support (EXPERIMENTAL)"
53166 - depends on PROC_FS && CRASH_DUMP
53167 - default y
53168 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
53169 + default n
53170 help
53171 Exports the dump image of crashed kernel in ELF format.
53172
53173 @@ -59,8 +59,8 @@ config PROC_SYSCTL
53174 limited in memory.
53175
53176 config PROC_PAGE_MONITOR
53177 - default y
53178 - depends on PROC_FS && MMU
53179 + default n
53180 + depends on PROC_FS && MMU && !GRKERNSEC
53181 bool "Enable /proc page monitoring" if EMBEDDED
53182 help
53183 Various /proc files exist to monitor process memory utilization:
53184 diff --git a/fs/proc/array.c b/fs/proc/array.c
53185 index c5ef152..28c94f7 100644
53186 --- a/fs/proc/array.c
53187 +++ b/fs/proc/array.c
53188 @@ -60,6 +60,7 @@
53189 #include <linux/tty.h>
53190 #include <linux/string.h>
53191 #include <linux/mman.h>
53192 +#include <linux/grsecurity.h>
53193 #include <linux/proc_fs.h>
53194 #include <linux/ioport.h>
53195 #include <linux/uaccess.h>
53196 @@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
53197 p->nivcsw);
53198 }
53199
53200 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53201 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
53202 +{
53203 + if (p->mm)
53204 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
53205 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
53206 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
53207 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
53208 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
53209 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
53210 + else
53211 + seq_printf(m, "PaX:\t-----\n");
53212 +}
53213 +#endif
53214 +
53215 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53216 struct pid *pid, struct task_struct *task)
53217 {
53218 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53219 task_cap(m, task);
53220 cpuset_task_status_allowed(m, task);
53221 task_context_switch_counts(m, task);
53222 +
53223 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53224 + task_pax(m, task);
53225 +#endif
53226 +
53227 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
53228 + task_grsec_rbac(m, task);
53229 +#endif
53230 +
53231 return 0;
53232 }
53233
53234 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53235 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53236 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
53237 + _mm->pax_flags & MF_PAX_SEGMEXEC))
53238 +#endif
53239 +
53240 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53241 struct pid *pid, struct task_struct *task, int whole)
53242 {
53243 @@ -358,9 +389,18 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53244 cputime_t cutime, cstime, utime, stime;
53245 cputime_t cgtime, gtime;
53246 unsigned long rsslim = 0;
53247 - char tcomm[sizeof(task->comm)];
53248 + char tcomm[sizeof(task->comm)] = { 0 };
53249 unsigned long flags;
53250
53251 + pax_track_stack();
53252 +
53253 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53254 + if (current->exec_id != m->exec_id) {
53255 + gr_log_badprocpid("stat");
53256 + return 0;
53257 + }
53258 +#endif
53259 +
53260 state = *get_task_state(task);
53261 vsize = eip = esp = 0;
53262 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
53263 @@ -433,6 +473,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53264 gtime = task_gtime(task);
53265 }
53266
53267 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53268 + if (PAX_RAND_FLAGS(mm)) {
53269 + eip = 0;
53270 + esp = 0;
53271 + wchan = 0;
53272 + }
53273 +#endif
53274 +#ifdef CONFIG_GRKERNSEC_HIDESYM
53275 + wchan = 0;
53276 + eip =0;
53277 + esp =0;
53278 +#endif
53279 +
53280 /* scale priority and nice values from timeslices to -20..20 */
53281 /* to make it look like a "normal" Unix priority/nice value */
53282 priority = task_prio(task);
53283 @@ -473,9 +526,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53284 vsize,
53285 mm ? get_mm_rss(mm) : 0,
53286 rsslim,
53287 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53288 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
53289 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
53290 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
53291 +#else
53292 mm ? (permitted ? mm->start_code : 1) : 0,
53293 mm ? (permitted ? mm->end_code : 1) : 0,
53294 (permitted && mm) ? mm->start_stack : 0,
53295 +#endif
53296 esp,
53297 eip,
53298 /* The signal information here is obsolete.
53299 @@ -517,8 +576,16 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53300 struct pid *pid, struct task_struct *task)
53301 {
53302 int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
53303 - struct mm_struct *mm = get_task_mm(task);
53304 + struct mm_struct *mm;
53305
53306 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53307 + if (current->exec_id != m->exec_id) {
53308 + gr_log_badprocpid("statm");
53309 + return 0;
53310 + }
53311 +#endif
53312 +
53313 + mm = get_task_mm(task);
53314 if (mm) {
53315 size = task_statm(mm, &shared, &text, &data, &resident);
53316 mmput(mm);
53317 @@ -528,3 +595,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53318
53319 return 0;
53320 }
53321 +
53322 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53323 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
53324 +{
53325 + u32 curr_ip = 0;
53326 + unsigned long flags;
53327 +
53328 + if (lock_task_sighand(task, &flags)) {
53329 + curr_ip = task->signal->curr_ip;
53330 + unlock_task_sighand(task, &flags);
53331 + }
53332 +
53333 + return sprintf(buffer, "%pI4\n", &curr_ip);
53334 +}
53335 +#endif
53336 diff --git a/fs/proc/base.c b/fs/proc/base.c
53337 index 67f7dc0..a86ad9a 100644
53338 --- a/fs/proc/base.c
53339 +++ b/fs/proc/base.c
53340 @@ -102,6 +102,22 @@ struct pid_entry {
53341 union proc_op op;
53342 };
53343
53344 +struct getdents_callback {
53345 + struct linux_dirent __user * current_dir;
53346 + struct linux_dirent __user * previous;
53347 + struct file * file;
53348 + int count;
53349 + int error;
53350 +};
53351 +
53352 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
53353 + loff_t offset, u64 ino, unsigned int d_type)
53354 +{
53355 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
53356 + buf->error = -EINVAL;
53357 + return 0;
53358 +}
53359 +
53360 #define NOD(NAME, MODE, IOP, FOP, OP) { \
53361 .name = (NAME), \
53362 .len = sizeof(NAME) - 1, \
53363 @@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
53364 if (task == current)
53365 return 0;
53366
53367 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
53368 + return -EPERM;
53369 +
53370 /*
53371 * If current is actively ptrace'ing, and would also be
53372 * permitted to freshly attach with ptrace now, permit it.
53373 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
53374 if (!mm->arg_end)
53375 goto out_mm; /* Shh! No looking before we're done */
53376
53377 + if (gr_acl_handle_procpidmem(task))
53378 + goto out_mm;
53379 +
53380 len = mm->arg_end - mm->arg_start;
53381
53382 if (len > PAGE_SIZE)
53383 @@ -287,12 +309,28 @@ out:
53384 return res;
53385 }
53386
53387 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53388 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53389 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
53390 + _mm->pax_flags & MF_PAX_SEGMEXEC))
53391 +#endif
53392 +
53393 static int proc_pid_auxv(struct task_struct *task, char *buffer)
53394 {
53395 int res = 0;
53396 struct mm_struct *mm = get_task_mm(task);
53397 if (mm) {
53398 unsigned int nwords = 0;
53399 +
53400 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53401 + /* allow if we're currently ptracing this task */
53402 + if (PAX_RAND_FLAGS(mm) &&
53403 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
53404 + mmput(mm);
53405 + return 0;
53406 + }
53407 +#endif
53408 +
53409 do {
53410 nwords += 2;
53411 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
53412 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
53413 }
53414
53415
53416 -#ifdef CONFIG_KALLSYMS
53417 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53418 /*
53419 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
53420 * Returns the resolved symbol. If that fails, simply return the address.
53421 @@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
53422 mutex_unlock(&task->cred_guard_mutex);
53423 }
53424
53425 -#ifdef CONFIG_STACKTRACE
53426 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53427
53428 #define MAX_STACK_TRACE_DEPTH 64
53429
53430 @@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
53431 return count;
53432 }
53433
53434 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53435 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53436 static int proc_pid_syscall(struct task_struct *task, char *buffer)
53437 {
53438 long nr;
53439 @@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
53440 /************************************************************************/
53441
53442 /* permission checks */
53443 -static int proc_fd_access_allowed(struct inode *inode)
53444 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
53445 {
53446 struct task_struct *task;
53447 int allowed = 0;
53448 @@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
53449 */
53450 task = get_proc_task(inode);
53451 if (task) {
53452 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
53453 + if (log)
53454 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
53455 + else
53456 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
53457 put_task_struct(task);
53458 }
53459 return allowed;
53460 @@ -806,9 +847,16 @@ static const struct file_operations proc_single_file_operations = {
53461 static int mem_open(struct inode* inode, struct file* file)
53462 {
53463 file->private_data = (void*)((long)current->self_exec_id);
53464 +
53465 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53466 + file->f_version = current->exec_id;
53467 +#endif
53468 +
53469 return 0;
53470 }
53471
53472 +static int task_dumpable(struct task_struct *task);
53473 +
53474 static ssize_t mem_read(struct file * file, char __user * buf,
53475 size_t count, loff_t *ppos)
53476 {
53477 @@ -818,6 +866,13 @@ static ssize_t mem_read(struct file * file, char __user * buf,
53478 int ret = -ESRCH;
53479 struct mm_struct *mm;
53480
53481 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53482 + if (file->f_version != current->exec_id) {
53483 + gr_log_badprocpid("mem");
53484 + return 0;
53485 + }
53486 +#endif
53487 +
53488 if (!task)
53489 goto out_no_task;
53490
53491 @@ -963,6 +1018,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
53492 if (!task)
53493 goto out_no_task;
53494
53495 + if (gr_acl_handle_procpidmem(task))
53496 + goto out;
53497 +
53498 if (!ptrace_may_access(task, PTRACE_MODE_READ))
53499 goto out;
53500
53501 @@ -1377,7 +1435,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
53502 path_put(&nd->path);
53503
53504 /* Are we allowed to snoop on the tasks file descriptors? */
53505 - if (!proc_fd_access_allowed(inode))
53506 + if (!proc_fd_access_allowed(inode,0))
53507 goto out;
53508
53509 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
53510 @@ -1417,8 +1475,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
53511 struct path path;
53512
53513 /* Are we allowed to snoop on the tasks file descriptors? */
53514 - if (!proc_fd_access_allowed(inode))
53515 - goto out;
53516 + /* logging this is needed for learning on chromium to work properly,
53517 + but we don't want to flood the logs from 'ps' which does a readlink
53518 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
53519 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
53520 + */
53521 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
53522 + if (!proc_fd_access_allowed(inode,0))
53523 + goto out;
53524 + } else {
53525 + if (!proc_fd_access_allowed(inode,1))
53526 + goto out;
53527 + }
53528
53529 error = PROC_I(inode)->op.proc_get_link(inode, &path);
53530 if (error)
53531 @@ -1483,7 +1551,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
53532 rcu_read_lock();
53533 cred = __task_cred(task);
53534 inode->i_uid = cred->euid;
53535 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53536 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53537 +#else
53538 inode->i_gid = cred->egid;
53539 +#endif
53540 rcu_read_unlock();
53541 }
53542 security_task_to_inode(task, inode);
53543 @@ -1501,6 +1573,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
53544 struct inode *inode = dentry->d_inode;
53545 struct task_struct *task;
53546 const struct cred *cred;
53547 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53548 + const struct cred *tmpcred = current_cred();
53549 +#endif
53550
53551 generic_fillattr(inode, stat);
53552
53553 @@ -1508,13 +1583,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
53554 stat->uid = 0;
53555 stat->gid = 0;
53556 task = pid_task(proc_pid(inode), PIDTYPE_PID);
53557 +
53558 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
53559 + rcu_read_unlock();
53560 + return -ENOENT;
53561 + }
53562 +
53563 if (task) {
53564 + cred = __task_cred(task);
53565 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53566 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
53567 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53568 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53569 +#endif
53570 + ) {
53571 +#endif
53572 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53573 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53574 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53575 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53576 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53577 +#endif
53578 task_dumpable(task)) {
53579 - cred = __task_cred(task);
53580 stat->uid = cred->euid;
53581 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53582 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
53583 +#else
53584 stat->gid = cred->egid;
53585 +#endif
53586 }
53587 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53588 + } else {
53589 + rcu_read_unlock();
53590 + return -ENOENT;
53591 + }
53592 +#endif
53593 }
53594 rcu_read_unlock();
53595 return 0;
53596 @@ -1545,11 +1648,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
53597
53598 if (task) {
53599 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53600 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53601 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53602 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53603 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53604 +#endif
53605 task_dumpable(task)) {
53606 rcu_read_lock();
53607 cred = __task_cred(task);
53608 inode->i_uid = cred->euid;
53609 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53610 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53611 +#else
53612 inode->i_gid = cred->egid;
53613 +#endif
53614 rcu_read_unlock();
53615 } else {
53616 inode->i_uid = 0;
53617 @@ -1670,7 +1782,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
53618 int fd = proc_fd(inode);
53619
53620 if (task) {
53621 - files = get_files_struct(task);
53622 + if (!gr_acl_handle_procpidmem(task))
53623 + files = get_files_struct(task);
53624 put_task_struct(task);
53625 }
53626 if (files) {
53627 @@ -1922,12 +2035,22 @@ static const struct file_operations proc_fd_operations = {
53628 static int proc_fd_permission(struct inode *inode, int mask)
53629 {
53630 int rv;
53631 + struct task_struct *task;
53632
53633 rv = generic_permission(inode, mask, NULL);
53634 - if (rv == 0)
53635 - return 0;
53636 +
53637 if (task_pid(current) == proc_pid(inode))
53638 rv = 0;
53639 +
53640 + task = get_proc_task(inode);
53641 + if (task == NULL)
53642 + return rv;
53643 +
53644 + if (gr_acl_handle_procpidmem(task))
53645 + rv = -EACCES;
53646 +
53647 + put_task_struct(task);
53648 +
53649 return rv;
53650 }
53651
53652 @@ -2036,6 +2159,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
53653 if (!task)
53654 goto out_no_task;
53655
53656 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53657 + goto out;
53658 +
53659 /*
53660 * Yes, it does not scale. And it should not. Don't add
53661 * new entries into /proc/<tgid>/ without very good reasons.
53662 @@ -2080,6 +2206,9 @@ static int proc_pident_readdir(struct file *filp,
53663 if (!task)
53664 goto out_no_task;
53665
53666 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53667 + goto out;
53668 +
53669 ret = 0;
53670 i = filp->f_pos;
53671 switch (i) {
53672 @@ -2347,7 +2476,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
53673 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
53674 void *cookie)
53675 {
53676 - char *s = nd_get_link(nd);
53677 + const char *s = nd_get_link(nd);
53678 if (!IS_ERR(s))
53679 __putname(s);
53680 }
53681 @@ -2553,7 +2682,7 @@ static const struct pid_entry tgid_base_stuff[] = {
53682 #ifdef CONFIG_SCHED_DEBUG
53683 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53684 #endif
53685 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53686 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53687 INF("syscall", S_IRUGO, proc_pid_syscall),
53688 #endif
53689 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53690 @@ -2578,10 +2707,10 @@ static const struct pid_entry tgid_base_stuff[] = {
53691 #ifdef CONFIG_SECURITY
53692 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53693 #endif
53694 -#ifdef CONFIG_KALLSYMS
53695 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53696 INF("wchan", S_IRUGO, proc_pid_wchan),
53697 #endif
53698 -#ifdef CONFIG_STACKTRACE
53699 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53700 ONE("stack", S_IRUGO, proc_pid_stack),
53701 #endif
53702 #ifdef CONFIG_SCHEDSTATS
53703 @@ -2611,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
53704 #ifdef CONFIG_TASK_IO_ACCOUNTING
53705 INF("io", S_IRUSR, proc_tgid_io_accounting),
53706 #endif
53707 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53708 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
53709 +#endif
53710 };
53711
53712 static int proc_tgid_base_readdir(struct file * filp,
53713 @@ -2735,7 +2867,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
53714 if (!inode)
53715 goto out;
53716
53717 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53718 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
53719 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53720 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53721 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
53722 +#else
53723 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
53724 +#endif
53725 inode->i_op = &proc_tgid_base_inode_operations;
53726 inode->i_fop = &proc_tgid_base_operations;
53727 inode->i_flags|=S_IMMUTABLE;
53728 @@ -2777,7 +2916,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
53729 if (!task)
53730 goto out;
53731
53732 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53733 + goto out_put_task;
53734 +
53735 result = proc_pid_instantiate(dir, dentry, task, NULL);
53736 +out_put_task:
53737 put_task_struct(task);
53738 out:
53739 return result;
53740 @@ -2842,6 +2985,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53741 {
53742 unsigned int nr;
53743 struct task_struct *reaper;
53744 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53745 + const struct cred *tmpcred = current_cred();
53746 + const struct cred *itercred;
53747 +#endif
53748 + filldir_t __filldir = filldir;
53749 struct tgid_iter iter;
53750 struct pid_namespace *ns;
53751
53752 @@ -2865,8 +3013,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53753 for (iter = next_tgid(ns, iter);
53754 iter.task;
53755 iter.tgid += 1, iter = next_tgid(ns, iter)) {
53756 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53757 + rcu_read_lock();
53758 + itercred = __task_cred(iter.task);
53759 +#endif
53760 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
53761 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53762 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
53763 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53764 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53765 +#endif
53766 + )
53767 +#endif
53768 + )
53769 + __filldir = &gr_fake_filldir;
53770 + else
53771 + __filldir = filldir;
53772 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53773 + rcu_read_unlock();
53774 +#endif
53775 filp->f_pos = iter.tgid + TGID_OFFSET;
53776 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
53777 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
53778 put_task_struct(iter.task);
53779 goto out;
53780 }
53781 @@ -2892,7 +3059,7 @@ static const struct pid_entry tid_base_stuff[] = {
53782 #ifdef CONFIG_SCHED_DEBUG
53783 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53784 #endif
53785 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53786 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53787 INF("syscall", S_IRUGO, proc_pid_syscall),
53788 #endif
53789 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53790 @@ -2916,10 +3083,10 @@ static const struct pid_entry tid_base_stuff[] = {
53791 #ifdef CONFIG_SECURITY
53792 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53793 #endif
53794 -#ifdef CONFIG_KALLSYMS
53795 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53796 INF("wchan", S_IRUGO, proc_pid_wchan),
53797 #endif
53798 -#ifdef CONFIG_STACKTRACE
53799 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53800 ONE("stack", S_IRUGO, proc_pid_stack),
53801 #endif
53802 #ifdef CONFIG_SCHEDSTATS
53803 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
53804 index 82676e3..5f8518a 100644
53805 --- a/fs/proc/cmdline.c
53806 +++ b/fs/proc/cmdline.c
53807 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
53808
53809 static int __init proc_cmdline_init(void)
53810 {
53811 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
53812 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
53813 +#else
53814 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
53815 +#endif
53816 return 0;
53817 }
53818 module_init(proc_cmdline_init);
53819 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
53820 index 59ee7da..469b4b6 100644
53821 --- a/fs/proc/devices.c
53822 +++ b/fs/proc/devices.c
53823 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
53824
53825 static int __init proc_devices_init(void)
53826 {
53827 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
53828 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
53829 +#else
53830 proc_create("devices", 0, NULL, &proc_devinfo_operations);
53831 +#endif
53832 return 0;
53833 }
53834 module_init(proc_devices_init);
53835 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
53836 index d78ade3..81767f9 100644
53837 --- a/fs/proc/inode.c
53838 +++ b/fs/proc/inode.c
53839 @@ -18,12 +18,19 @@
53840 #include <linux/module.h>
53841 #include <linux/smp_lock.h>
53842 #include <linux/sysctl.h>
53843 +#include <linux/grsecurity.h>
53844
53845 #include <asm/system.h>
53846 #include <asm/uaccess.h>
53847
53848 #include "internal.h"
53849
53850 +#ifdef CONFIG_PROC_SYSCTL
53851 +extern const struct inode_operations proc_sys_inode_operations;
53852 +extern const struct inode_operations proc_sys_dir_operations;
53853 +#endif
53854 +
53855 +
53856 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
53857 {
53858 atomic_inc(&de->count);
53859 @@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
53860 de_put(de);
53861 if (PROC_I(inode)->sysctl)
53862 sysctl_head_put(PROC_I(inode)->sysctl);
53863 +
53864 +#ifdef CONFIG_PROC_SYSCTL
53865 + if (inode->i_op == &proc_sys_inode_operations ||
53866 + inode->i_op == &proc_sys_dir_operations)
53867 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
53868 +#endif
53869 +
53870 clear_inode(inode);
53871 }
53872
53873 @@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
53874 if (de->mode) {
53875 inode->i_mode = de->mode;
53876 inode->i_uid = de->uid;
53877 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53878 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53879 +#else
53880 inode->i_gid = de->gid;
53881 +#endif
53882 }
53883 if (de->size)
53884 inode->i_size = de->size;
53885 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
53886 index 753ca37..26bcf3b 100644
53887 --- a/fs/proc/internal.h
53888 +++ b/fs/proc/internal.h
53889 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53890 struct pid *pid, struct task_struct *task);
53891 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53892 struct pid *pid, struct task_struct *task);
53893 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53894 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
53895 +#endif
53896 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
53897
53898 extern const struct file_operations proc_maps_operations;
53899 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
53900 index b442dac..aab29cb 100644
53901 --- a/fs/proc/kcore.c
53902 +++ b/fs/proc/kcore.c
53903 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
53904 off_t offset = 0;
53905 struct kcore_list *m;
53906
53907 + pax_track_stack();
53908 +
53909 /* setup ELF header */
53910 elf = (struct elfhdr *) bufp;
53911 bufp += sizeof(struct elfhdr);
53912 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53913 * the addresses in the elf_phdr on our list.
53914 */
53915 start = kc_offset_to_vaddr(*fpos - elf_buflen);
53916 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
53917 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
53918 + if (tsz > buflen)
53919 tsz = buflen;
53920 -
53921 +
53922 while (buflen) {
53923 struct kcore_list *m;
53924
53925 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53926 kfree(elf_buf);
53927 } else {
53928 if (kern_addr_valid(start)) {
53929 - unsigned long n;
53930 + char *elf_buf;
53931 + mm_segment_t oldfs;
53932
53933 - n = copy_to_user(buffer, (char *)start, tsz);
53934 - /*
53935 - * We cannot distingush between fault on source
53936 - * and fault on destination. When this happens
53937 - * we clear too and hope it will trigger the
53938 - * EFAULT again.
53939 - */
53940 - if (n) {
53941 - if (clear_user(buffer + tsz - n,
53942 - n))
53943 + elf_buf = kmalloc(tsz, GFP_KERNEL);
53944 + if (!elf_buf)
53945 + return -ENOMEM;
53946 + oldfs = get_fs();
53947 + set_fs(KERNEL_DS);
53948 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
53949 + set_fs(oldfs);
53950 + if (copy_to_user(buffer, elf_buf, tsz)) {
53951 + kfree(elf_buf);
53952 return -EFAULT;
53953 + }
53954 }
53955 + set_fs(oldfs);
53956 + kfree(elf_buf);
53957 } else {
53958 if (clear_user(buffer, tsz))
53959 return -EFAULT;
53960 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53961
53962 static int open_kcore(struct inode *inode, struct file *filp)
53963 {
53964 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
53965 + return -EPERM;
53966 +#endif
53967 if (!capable(CAP_SYS_RAWIO))
53968 return -EPERM;
53969 if (kcore_need_update)
53970 diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
53971 index 7ca7834..cfe90a4 100644
53972 --- a/fs/proc/kmsg.c
53973 +++ b/fs/proc/kmsg.c
53974 @@ -12,37 +12,37 @@
53975 #include <linux/poll.h>
53976 #include <linux/proc_fs.h>
53977 #include <linux/fs.h>
53978 +#include <linux/syslog.h>
53979
53980 #include <asm/uaccess.h>
53981 #include <asm/io.h>
53982
53983 extern wait_queue_head_t log_wait;
53984
53985 -extern int do_syslog(int type, char __user *bug, int count);
53986 -
53987 static int kmsg_open(struct inode * inode, struct file * file)
53988 {
53989 - return do_syslog(1,NULL,0);
53990 + return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
53991 }
53992
53993 static int kmsg_release(struct inode * inode, struct file * file)
53994 {
53995 - (void) do_syslog(0,NULL,0);
53996 + (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
53997 return 0;
53998 }
53999
54000 static ssize_t kmsg_read(struct file *file, char __user *buf,
54001 size_t count, loff_t *ppos)
54002 {
54003 - if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
54004 + if ((file->f_flags & O_NONBLOCK) &&
54005 + !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
54006 return -EAGAIN;
54007 - return do_syslog(2, buf, count);
54008 + return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
54009 }
54010
54011 static unsigned int kmsg_poll(struct file *file, poll_table *wait)
54012 {
54013 poll_wait(file, &log_wait, wait);
54014 - if (do_syslog(9, NULL, 0))
54015 + if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
54016 return POLLIN | POLLRDNORM;
54017 return 0;
54018 }
54019 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
54020 index a65239c..ad1182a 100644
54021 --- a/fs/proc/meminfo.c
54022 +++ b/fs/proc/meminfo.c
54023 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
54024 unsigned long pages[NR_LRU_LISTS];
54025 int lru;
54026
54027 + pax_track_stack();
54028 +
54029 /*
54030 * display in kilobytes.
54031 */
54032 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
54033 vmi.used >> 10,
54034 vmi.largest_chunk >> 10
54035 #ifdef CONFIG_MEMORY_FAILURE
54036 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
54037 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
54038 #endif
54039 );
54040
54041 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
54042 index 9fe7d7e..cdb62c9 100644
54043 --- a/fs/proc/nommu.c
54044 +++ b/fs/proc/nommu.c
54045 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
54046 if (len < 1)
54047 len = 1;
54048 seq_printf(m, "%*c", len, ' ');
54049 - seq_path(m, &file->f_path, "");
54050 + seq_path(m, &file->f_path, "\n\\");
54051 }
54052
54053 seq_putc(m, '\n');
54054 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
54055 index 04d1270..25e1173 100644
54056 --- a/fs/proc/proc_net.c
54057 +++ b/fs/proc/proc_net.c
54058 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
54059 struct task_struct *task;
54060 struct nsproxy *ns;
54061 struct net *net = NULL;
54062 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54063 + const struct cred *cred = current_cred();
54064 +#endif
54065 +
54066 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54067 + if (cred->fsuid)
54068 + return net;
54069 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54070 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
54071 + return net;
54072 +#endif
54073
54074 rcu_read_lock();
54075 task = pid_task(proc_pid(dir), PIDTYPE_PID);
54076 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
54077 index f667e8a..55f4d96 100644
54078 --- a/fs/proc/proc_sysctl.c
54079 +++ b/fs/proc/proc_sysctl.c
54080 @@ -7,11 +7,13 @@
54081 #include <linux/security.h>
54082 #include "internal.h"
54083
54084 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
54085 +
54086 static const struct dentry_operations proc_sys_dentry_operations;
54087 static const struct file_operations proc_sys_file_operations;
54088 -static const struct inode_operations proc_sys_inode_operations;
54089 +const struct inode_operations proc_sys_inode_operations;
54090 static const struct file_operations proc_sys_dir_file_operations;
54091 -static const struct inode_operations proc_sys_dir_operations;
54092 +const struct inode_operations proc_sys_dir_operations;
54093
54094 static struct inode *proc_sys_make_inode(struct super_block *sb,
54095 struct ctl_table_header *head, struct ctl_table *table)
54096 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
54097 if (!p)
54098 goto out;
54099
54100 + if (gr_handle_sysctl(p, MAY_EXEC))
54101 + goto out;
54102 +
54103 err = ERR_PTR(-ENOMEM);
54104 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
54105 if (h)
54106 @@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
54107
54108 err = NULL;
54109 dentry->d_op = &proc_sys_dentry_operations;
54110 +
54111 + gr_handle_proc_create(dentry, inode);
54112 +
54113 d_add(dentry, inode);
54114
54115 out:
54116 @@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
54117 return -ENOMEM;
54118 } else {
54119 child->d_op = &proc_sys_dentry_operations;
54120 +
54121 + gr_handle_proc_create(child, inode);
54122 +
54123 d_add(child, inode);
54124 }
54125 } else {
54126 @@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
54127 if (*pos < file->f_pos)
54128 continue;
54129
54130 + if (gr_handle_sysctl(table, 0))
54131 + continue;
54132 +
54133 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
54134 if (res)
54135 return res;
54136 @@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
54137 if (IS_ERR(head))
54138 return PTR_ERR(head);
54139
54140 + if (table && gr_handle_sysctl(table, MAY_EXEC))
54141 + return -ENOENT;
54142 +
54143 generic_fillattr(inode, stat);
54144 if (table)
54145 stat->mode = (stat->mode & S_IFMT) | table->mode;
54146 @@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
54147 };
54148
54149 static const struct file_operations proc_sys_dir_file_operations = {
54150 + .read = generic_read_dir,
54151 .readdir = proc_sys_readdir,
54152 .llseek = generic_file_llseek,
54153 };
54154
54155 -static const struct inode_operations proc_sys_inode_operations = {
54156 +const struct inode_operations proc_sys_inode_operations = {
54157 .permission = proc_sys_permission,
54158 .setattr = proc_sys_setattr,
54159 .getattr = proc_sys_getattr,
54160 };
54161
54162 -static const struct inode_operations proc_sys_dir_operations = {
54163 +const struct inode_operations proc_sys_dir_operations = {
54164 .lookup = proc_sys_lookup,
54165 .permission = proc_sys_permission,
54166 .setattr = proc_sys_setattr,
54167 diff --git a/fs/proc/root.c b/fs/proc/root.c
54168 index b080b79..d957e63 100644
54169 --- a/fs/proc/root.c
54170 +++ b/fs/proc/root.c
54171 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
54172 #ifdef CONFIG_PROC_DEVICETREE
54173 proc_device_tree_init();
54174 #endif
54175 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
54176 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54177 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
54178 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54179 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
54180 +#endif
54181 +#else
54182 proc_mkdir("bus", NULL);
54183 +#endif
54184 proc_sys_init();
54185 }
54186
54187 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
54188 index 3b7b82a..4b420b0 100644
54189 --- a/fs/proc/task_mmu.c
54190 +++ b/fs/proc/task_mmu.c
54191 @@ -8,6 +8,7 @@
54192 #include <linux/mempolicy.h>
54193 #include <linux/swap.h>
54194 #include <linux/swapops.h>
54195 +#include <linux/grsecurity.h>
54196
54197 #include <asm/elf.h>
54198 #include <asm/uaccess.h>
54199 @@ -46,15 +47,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54200 "VmStk:\t%8lu kB\n"
54201 "VmExe:\t%8lu kB\n"
54202 "VmLib:\t%8lu kB\n"
54203 - "VmPTE:\t%8lu kB\n",
54204 - hiwater_vm << (PAGE_SHIFT-10),
54205 + "VmPTE:\t%8lu kB\n"
54206 +
54207 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54208 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
54209 +#endif
54210 +
54211 + ,hiwater_vm << (PAGE_SHIFT-10),
54212 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
54213 mm->locked_vm << (PAGE_SHIFT-10),
54214 hiwater_rss << (PAGE_SHIFT-10),
54215 total_rss << (PAGE_SHIFT-10),
54216 data << (PAGE_SHIFT-10),
54217 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
54218 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
54219 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
54220 +
54221 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54222 + , mm->context.user_cs_base, mm->context.user_cs_limit
54223 +#endif
54224 +
54225 + );
54226 }
54227
54228 unsigned long task_vsize(struct mm_struct *mm)
54229 @@ -175,7 +187,8 @@ static void m_stop(struct seq_file *m, void *v)
54230 struct proc_maps_private *priv = m->private;
54231 struct vm_area_struct *vma = v;
54232
54233 - vma_stop(priv, vma);
54234 + if (!IS_ERR(vma))
54235 + vma_stop(priv, vma);
54236 if (priv->task)
54237 put_task_struct(priv->task);
54238 }
54239 @@ -199,6 +212,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
54240 return ret;
54241 }
54242
54243 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54244 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
54245 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
54246 + _mm->pax_flags & MF_PAX_SEGMEXEC))
54247 +#endif
54248 +
54249 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54250 {
54251 struct mm_struct *mm = vma->vm_mm;
54252 @@ -206,7 +225,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54253 int flags = vma->vm_flags;
54254 unsigned long ino = 0;
54255 unsigned long long pgoff = 0;
54256 - unsigned long start;
54257 dev_t dev = 0;
54258 int len;
54259
54260 @@ -217,20 +235,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54261 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
54262 }
54263
54264 - /* We don't show the stack guard page in /proc/maps */
54265 - start = vma->vm_start;
54266 - if (vma->vm_flags & VM_GROWSDOWN)
54267 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
54268 - start += PAGE_SIZE;
54269 -
54270 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
54271 - start,
54272 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54273 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
54274 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
54275 +#else
54276 + vma->vm_start,
54277 vma->vm_end,
54278 +#endif
54279 flags & VM_READ ? 'r' : '-',
54280 flags & VM_WRITE ? 'w' : '-',
54281 flags & VM_EXEC ? 'x' : '-',
54282 flags & VM_MAYSHARE ? 's' : 'p',
54283 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54284 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
54285 +#else
54286 pgoff,
54287 +#endif
54288 MAJOR(dev), MINOR(dev), ino, &len);
54289
54290 /*
54291 @@ -239,7 +260,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54292 */
54293 if (file) {
54294 pad_len_spaces(m, len);
54295 - seq_path(m, &file->f_path, "\n");
54296 + seq_path(m, &file->f_path, "\n\\");
54297 } else {
54298 const char *name = arch_vma_name(vma);
54299 if (!name) {
54300 @@ -247,8 +268,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54301 if (vma->vm_start <= mm->brk &&
54302 vma->vm_end >= mm->start_brk) {
54303 name = "[heap]";
54304 - } else if (vma->vm_start <= mm->start_stack &&
54305 - vma->vm_end >= mm->start_stack) {
54306 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
54307 + (vma->vm_start <= mm->start_stack &&
54308 + vma->vm_end >= mm->start_stack)) {
54309 name = "[stack]";
54310 }
54311 } else {
54312 @@ -269,6 +291,13 @@ static int show_map(struct seq_file *m, void *v)
54313 struct proc_maps_private *priv = m->private;
54314 struct task_struct *task = priv->task;
54315
54316 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54317 + if (current->exec_id != m->exec_id) {
54318 + gr_log_badprocpid("maps");
54319 + return 0;
54320 + }
54321 +#endif
54322 +
54323 show_map_vma(m, vma);
54324
54325 if (m->count < m->size) /* vma is copied successfully */
54326 @@ -390,10 +419,23 @@ static int show_smap(struct seq_file *m, void *v)
54327 .private = &mss,
54328 };
54329
54330 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54331 + if (current->exec_id != m->exec_id) {
54332 + gr_log_badprocpid("smaps");
54333 + return 0;
54334 + }
54335 +#endif
54336 memset(&mss, 0, sizeof mss);
54337 - mss.vma = vma;
54338 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
54339 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
54340 +
54341 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54342 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
54343 +#endif
54344 + mss.vma = vma;
54345 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
54346 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
54347 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54348 + }
54349 +#endif
54350
54351 show_map_vma(m, vma);
54352
54353 @@ -409,7 +451,11 @@ static int show_smap(struct seq_file *m, void *v)
54354 "Swap: %8lu kB\n"
54355 "KernelPageSize: %8lu kB\n"
54356 "MMUPageSize: %8lu kB\n",
54357 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54358 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
54359 +#else
54360 (vma->vm_end - vma->vm_start) >> 10,
54361 +#endif
54362 mss.resident >> 10,
54363 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
54364 mss.shared_clean >> 10,
54365 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
54366 index 8f5c05d..c99c76d 100644
54367 --- a/fs/proc/task_nommu.c
54368 +++ b/fs/proc/task_nommu.c
54369 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54370 else
54371 bytes += kobjsize(mm);
54372
54373 - if (current->fs && current->fs->users > 1)
54374 + if (current->fs && atomic_read(&current->fs->users) > 1)
54375 sbytes += kobjsize(current->fs);
54376 else
54377 bytes += kobjsize(current->fs);
54378 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
54379 if (len < 1)
54380 len = 1;
54381 seq_printf(m, "%*c", len, ' ');
54382 - seq_path(m, &file->f_path, "");
54383 + seq_path(m, &file->f_path, "\n\\");
54384 }
54385
54386 seq_putc(m, '\n');
54387 diff --git a/fs/readdir.c b/fs/readdir.c
54388 index 7723401..30059a6 100644
54389 --- a/fs/readdir.c
54390 +++ b/fs/readdir.c
54391 @@ -16,6 +16,7 @@
54392 #include <linux/security.h>
54393 #include <linux/syscalls.h>
54394 #include <linux/unistd.h>
54395 +#include <linux/namei.h>
54396
54397 #include <asm/uaccess.h>
54398
54399 @@ -67,6 +68,7 @@ struct old_linux_dirent {
54400
54401 struct readdir_callback {
54402 struct old_linux_dirent __user * dirent;
54403 + struct file * file;
54404 int result;
54405 };
54406
54407 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
54408 buf->result = -EOVERFLOW;
54409 return -EOVERFLOW;
54410 }
54411 +
54412 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54413 + return 0;
54414 +
54415 buf->result++;
54416 dirent = buf->dirent;
54417 if (!access_ok(VERIFY_WRITE, dirent,
54418 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
54419
54420 buf.result = 0;
54421 buf.dirent = dirent;
54422 + buf.file = file;
54423
54424 error = vfs_readdir(file, fillonedir, &buf);
54425 if (buf.result)
54426 @@ -142,6 +149,7 @@ struct linux_dirent {
54427 struct getdents_callback {
54428 struct linux_dirent __user * current_dir;
54429 struct linux_dirent __user * previous;
54430 + struct file * file;
54431 int count;
54432 int error;
54433 };
54434 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
54435 buf->error = -EOVERFLOW;
54436 return -EOVERFLOW;
54437 }
54438 +
54439 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54440 + return 0;
54441 +
54442 dirent = buf->previous;
54443 if (dirent) {
54444 if (__put_user(offset, &dirent->d_off))
54445 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
54446 buf.previous = NULL;
54447 buf.count = count;
54448 buf.error = 0;
54449 + buf.file = file;
54450
54451 error = vfs_readdir(file, filldir, &buf);
54452 if (error >= 0)
54453 @@ -228,6 +241,7 @@ out:
54454 struct getdents_callback64 {
54455 struct linux_dirent64 __user * current_dir;
54456 struct linux_dirent64 __user * previous;
54457 + struct file *file;
54458 int count;
54459 int error;
54460 };
54461 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
54462 buf->error = -EINVAL; /* only used if we fail.. */
54463 if (reclen > buf->count)
54464 return -EINVAL;
54465 +
54466 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54467 + return 0;
54468 +
54469 dirent = buf->previous;
54470 if (dirent) {
54471 if (__put_user(offset, &dirent->d_off))
54472 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
54473
54474 buf.current_dir = dirent;
54475 buf.previous = NULL;
54476 + buf.file = file;
54477 buf.count = count;
54478 buf.error = 0;
54479
54480 @@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
54481 error = buf.error;
54482 lastdirent = buf.previous;
54483 if (lastdirent) {
54484 - typeof(lastdirent->d_off) d_off = file->f_pos;
54485 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
54486 if (__put_user(d_off, &lastdirent->d_off))
54487 error = -EFAULT;
54488 else
54489 diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
54490 index d42c30c..4fd8718 100644
54491 --- a/fs/reiserfs/dir.c
54492 +++ b/fs/reiserfs/dir.c
54493 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
54494 struct reiserfs_dir_entry de;
54495 int ret = 0;
54496
54497 + pax_track_stack();
54498 +
54499 reiserfs_write_lock(inode->i_sb);
54500
54501 reiserfs_check_lock_depth(inode->i_sb, "readdir");
54502 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
54503 index 128d3f7..8840d44 100644
54504 --- a/fs/reiserfs/do_balan.c
54505 +++ b/fs/reiserfs/do_balan.c
54506 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
54507 return;
54508 }
54509
54510 - atomic_inc(&(fs_generation(tb->tb_sb)));
54511 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
54512 do_balance_starts(tb);
54513
54514 /* balance leaf returns 0 except if combining L R and S into
54515 diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
54516 index 72cb1cc..d0e3181 100644
54517 --- a/fs/reiserfs/item_ops.c
54518 +++ b/fs/reiserfs/item_ops.c
54519 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
54520 vi->vi_index, vi->vi_type, vi->vi_ih);
54521 }
54522
54523 -static struct item_operations stat_data_ops = {
54524 +static const struct item_operations stat_data_ops = {
54525 .bytes_number = sd_bytes_number,
54526 .decrement_key = sd_decrement_key,
54527 .is_left_mergeable = sd_is_left_mergeable,
54528 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
54529 vi->vi_index, vi->vi_type, vi->vi_ih);
54530 }
54531
54532 -static struct item_operations direct_ops = {
54533 +static const struct item_operations direct_ops = {
54534 .bytes_number = direct_bytes_number,
54535 .decrement_key = direct_decrement_key,
54536 .is_left_mergeable = direct_is_left_mergeable,
54537 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
54538 vi->vi_index, vi->vi_type, vi->vi_ih);
54539 }
54540
54541 -static struct item_operations indirect_ops = {
54542 +static const struct item_operations indirect_ops = {
54543 .bytes_number = indirect_bytes_number,
54544 .decrement_key = indirect_decrement_key,
54545 .is_left_mergeable = indirect_is_left_mergeable,
54546 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
54547 printk("\n");
54548 }
54549
54550 -static struct item_operations direntry_ops = {
54551 +static const struct item_operations direntry_ops = {
54552 .bytes_number = direntry_bytes_number,
54553 .decrement_key = direntry_decrement_key,
54554 .is_left_mergeable = direntry_is_left_mergeable,
54555 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
54556 "Invalid item type observed, run fsck ASAP");
54557 }
54558
54559 -static struct item_operations errcatch_ops = {
54560 +static const struct item_operations errcatch_ops = {
54561 errcatch_bytes_number,
54562 errcatch_decrement_key,
54563 errcatch_is_left_mergeable,
54564 @@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
54565 #error Item types must use disk-format assigned values.
54566 #endif
54567
54568 -struct item_operations *item_ops[TYPE_ANY + 1] = {
54569 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
54570 &stat_data_ops,
54571 &indirect_ops,
54572 &direct_ops,
54573 diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
54574 index b5fe0aa..e0e25c4 100644
54575 --- a/fs/reiserfs/journal.c
54576 +++ b/fs/reiserfs/journal.c
54577 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
54578 struct buffer_head *bh;
54579 int i, j;
54580
54581 + pax_track_stack();
54582 +
54583 bh = __getblk(dev, block, bufsize);
54584 if (buffer_uptodate(bh))
54585 return (bh);
54586 diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
54587 index 2715791..b8996db 100644
54588 --- a/fs/reiserfs/namei.c
54589 +++ b/fs/reiserfs/namei.c
54590 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
54591 unsigned long savelink = 1;
54592 struct timespec ctime;
54593
54594 + pax_track_stack();
54595 +
54596 /* three balancings: (1) old name removal, (2) new name insertion
54597 and (3) maybe "save" link insertion
54598 stat data updates: (1) old directory,
54599 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
54600 index 9229e55..3d2e3b7 100644
54601 --- a/fs/reiserfs/procfs.c
54602 +++ b/fs/reiserfs/procfs.c
54603 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
54604 "SMALL_TAILS " : "NO_TAILS ",
54605 replay_only(sb) ? "REPLAY_ONLY " : "",
54606 convert_reiserfs(sb) ? "CONV " : "",
54607 - atomic_read(&r->s_generation_counter),
54608 + atomic_read_unchecked(&r->s_generation_counter),
54609 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
54610 SF(s_do_balance), SF(s_unneeded_left_neighbor),
54611 SF(s_good_search_by_key_reada), SF(s_bmaps),
54612 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
54613 struct journal_params *jp = &rs->s_v1.s_journal;
54614 char b[BDEVNAME_SIZE];
54615
54616 + pax_track_stack();
54617 +
54618 seq_printf(m, /* on-disk fields */
54619 "jp_journal_1st_block: \t%i\n"
54620 "jp_journal_dev: \t%s[%x]\n"
54621 diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
54622 index d036ee5..4c7dca1 100644
54623 --- a/fs/reiserfs/stree.c
54624 +++ b/fs/reiserfs/stree.c
54625 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
54626 int iter = 0;
54627 #endif
54628
54629 + pax_track_stack();
54630 +
54631 BUG_ON(!th->t_trans_id);
54632
54633 init_tb_struct(th, &s_del_balance, sb, path,
54634 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
54635 int retval;
54636 int quota_cut_bytes = 0;
54637
54638 + pax_track_stack();
54639 +
54640 BUG_ON(!th->t_trans_id);
54641
54642 le_key2cpu_key(&cpu_key, key);
54643 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
54644 int quota_cut_bytes;
54645 loff_t tail_pos = 0;
54646
54647 + pax_track_stack();
54648 +
54649 BUG_ON(!th->t_trans_id);
54650
54651 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
54652 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
54653 int retval;
54654 int fs_gen;
54655
54656 + pax_track_stack();
54657 +
54658 BUG_ON(!th->t_trans_id);
54659
54660 fs_gen = get_generation(inode->i_sb);
54661 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
54662 int fs_gen = 0;
54663 int quota_bytes = 0;
54664
54665 + pax_track_stack();
54666 +
54667 BUG_ON(!th->t_trans_id);
54668
54669 if (inode) { /* Do we count quotas for item? */
54670 diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
54671 index 7cb1285..c726cd0 100644
54672 --- a/fs/reiserfs/super.c
54673 +++ b/fs/reiserfs/super.c
54674 @@ -916,6 +916,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
54675 {.option_name = NULL}
54676 };
54677
54678 + pax_track_stack();
54679 +
54680 *blocks = 0;
54681 if (!options || !*options)
54682 /* use default configuration: create tails, journaling on, no
54683 diff --git a/fs/select.c b/fs/select.c
54684 index fd38ce2..f5381b8 100644
54685 --- a/fs/select.c
54686 +++ b/fs/select.c
54687 @@ -20,6 +20,7 @@
54688 #include <linux/module.h>
54689 #include <linux/slab.h>
54690 #include <linux/poll.h>
54691 +#include <linux/security.h>
54692 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
54693 #include <linux/file.h>
54694 #include <linux/fdtable.h>
54695 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
54696 int retval, i, timed_out = 0;
54697 unsigned long slack = 0;
54698
54699 + pax_track_stack();
54700 +
54701 rcu_read_lock();
54702 retval = max_select_fd(n, fds);
54703 rcu_read_unlock();
54704 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
54705 /* Allocate small arguments on the stack to save memory and be faster */
54706 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
54707
54708 + pax_track_stack();
54709 +
54710 ret = -EINVAL;
54711 if (n < 0)
54712 goto out_nofds;
54713 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
54714 struct poll_list *walk = head;
54715 unsigned long todo = nfds;
54716
54717 + pax_track_stack();
54718 +
54719 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
54720 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
54721 return -EINVAL;
54722
54723 diff --git a/fs/seq_file.c b/fs/seq_file.c
54724 index eae7d9d..4ddabe2 100644
54725 --- a/fs/seq_file.c
54726 +++ b/fs/seq_file.c
54727 @@ -9,6 +9,7 @@
54728 #include <linux/module.h>
54729 #include <linux/seq_file.h>
54730 #include <linux/slab.h>
54731 +#include <linux/sched.h>
54732
54733 #include <asm/uaccess.h>
54734 #include <asm/page.h>
54735 @@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
54736 memset(p, 0, sizeof(*p));
54737 mutex_init(&p->lock);
54738 p->op = op;
54739 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54740 + p->exec_id = current->exec_id;
54741 +#endif
54742
54743 /*
54744 * Wrappers around seq_open(e.g. swaps_open) need to be
54745 @@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54746 return 0;
54747 }
54748 if (!m->buf) {
54749 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54750 + m->size = PAGE_SIZE;
54751 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54752 if (!m->buf)
54753 return -ENOMEM;
54754 }
54755 @@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54756 Eoverflow:
54757 m->op->stop(m, p);
54758 kfree(m->buf);
54759 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54760 + m->size <<= 1;
54761 + m->buf = kmalloc(m->size, GFP_KERNEL);
54762 return !m->buf ? -ENOMEM : -EAGAIN;
54763 }
54764
54765 @@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54766 m->version = file->f_version;
54767 /* grab buffer if we didn't have one */
54768 if (!m->buf) {
54769 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54770 + m->size = PAGE_SIZE;
54771 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54772 if (!m->buf)
54773 goto Enomem;
54774 }
54775 @@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54776 goto Fill;
54777 m->op->stop(m, p);
54778 kfree(m->buf);
54779 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54780 + m->size <<= 1;
54781 + m->buf = kmalloc(m->size, GFP_KERNEL);
54782 if (!m->buf)
54783 goto Enomem;
54784 m->count = 0;
54785 @@ -551,7 +559,7 @@ static void single_stop(struct seq_file *p, void *v)
54786 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
54787 void *data)
54788 {
54789 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
54790 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
54791 int res = -ENOMEM;
54792
54793 if (op) {
54794 diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
54795 index 71c29b6..54694dd 100644
54796 --- a/fs/smbfs/proc.c
54797 +++ b/fs/smbfs/proc.c
54798 @@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
54799
54800 out:
54801 if (server->local_nls != NULL && server->remote_nls != NULL)
54802 - server->ops->convert = convert_cp;
54803 + *(void **)&server->ops->convert = convert_cp;
54804 else
54805 - server->ops->convert = convert_memcpy;
54806 + *(void **)&server->ops->convert = convert_memcpy;
54807
54808 smb_unlock_server(server);
54809 return n;
54810 @@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
54811
54812 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
54813 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
54814 - server->ops->getattr = smb_proc_getattr_core;
54815 + *(void **)&server->ops->getattr = smb_proc_getattr_core;
54816 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
54817 - server->ops->getattr = smb_proc_getattr_ff;
54818 + *(void **)&server->ops->getattr = smb_proc_getattr_ff;
54819 }
54820
54821 /* Decode server capabilities */
54822 @@ -3439,7 +3439,7 @@ out:
54823 static void
54824 install_ops(struct smb_ops *dst, struct smb_ops *src)
54825 {
54826 - memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54827 + memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54828 }
54829
54830 /* < LANMAN2 */
54831 diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
54832 index 00b2909..2ace383 100644
54833 --- a/fs/smbfs/symlink.c
54834 +++ b/fs/smbfs/symlink.c
54835 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
54836
54837 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
54838 {
54839 - char *s = nd_get_link(nd);
54840 + const char *s = nd_get_link(nd);
54841 if (!IS_ERR(s))
54842 __putname(s);
54843 }
54844 diff --git a/fs/splice.c b/fs/splice.c
54845 index bb92b7c..5aa72b0 100644
54846 --- a/fs/splice.c
54847 +++ b/fs/splice.c
54848 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54849 pipe_lock(pipe);
54850
54851 for (;;) {
54852 - if (!pipe->readers) {
54853 + if (!atomic_read(&pipe->readers)) {
54854 send_sig(SIGPIPE, current, 0);
54855 if (!ret)
54856 ret = -EPIPE;
54857 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54858 do_wakeup = 0;
54859 }
54860
54861 - pipe->waiting_writers++;
54862 + atomic_inc(&pipe->waiting_writers);
54863 pipe_wait(pipe);
54864 - pipe->waiting_writers--;
54865 + atomic_dec(&pipe->waiting_writers);
54866 }
54867
54868 pipe_unlock(pipe);
54869 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
54870 .spd_release = spd_release_page,
54871 };
54872
54873 + pax_track_stack();
54874 +
54875 index = *ppos >> PAGE_CACHE_SHIFT;
54876 loff = *ppos & ~PAGE_CACHE_MASK;
54877 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54878 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
54879 old_fs = get_fs();
54880 set_fs(get_ds());
54881 /* The cast to a user pointer is valid due to the set_fs() */
54882 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
54883 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
54884 set_fs(old_fs);
54885
54886 return res;
54887 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
54888 old_fs = get_fs();
54889 set_fs(get_ds());
54890 /* The cast to a user pointer is valid due to the set_fs() */
54891 - res = vfs_write(file, (const char __user *)buf, count, &pos);
54892 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
54893 set_fs(old_fs);
54894
54895 return res;
54896 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54897 .spd_release = spd_release_page,
54898 };
54899
54900 + pax_track_stack();
54901 +
54902 index = *ppos >> PAGE_CACHE_SHIFT;
54903 offset = *ppos & ~PAGE_CACHE_MASK;
54904 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54905 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54906 goto err;
54907
54908 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
54909 - vec[i].iov_base = (void __user *) page_address(page);
54910 + vec[i].iov_base = (__force void __user *) page_address(page);
54911 vec[i].iov_len = this_len;
54912 pages[i] = page;
54913 spd.nr_pages++;
54914 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
54915 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
54916 {
54917 while (!pipe->nrbufs) {
54918 - if (!pipe->writers)
54919 + if (!atomic_read(&pipe->writers))
54920 return 0;
54921
54922 - if (!pipe->waiting_writers && sd->num_spliced)
54923 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
54924 return 0;
54925
54926 if (sd->flags & SPLICE_F_NONBLOCK)
54927 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
54928 * out of the pipe right after the splice_to_pipe(). So set
54929 * PIPE_READERS appropriately.
54930 */
54931 - pipe->readers = 1;
54932 + atomic_set(&pipe->readers, 1);
54933
54934 current->splice_pipe = pipe;
54935 }
54936 @@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
54937 .spd_release = spd_release_page,
54938 };
54939
54940 + pax_track_stack();
54941 +
54942 pipe = pipe_info(file->f_path.dentry->d_inode);
54943 if (!pipe)
54944 return -EBADF;
54945 @@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54946 ret = -ERESTARTSYS;
54947 break;
54948 }
54949 - if (!pipe->writers)
54950 + if (!atomic_read(&pipe->writers))
54951 break;
54952 - if (!pipe->waiting_writers) {
54953 + if (!atomic_read(&pipe->waiting_writers)) {
54954 if (flags & SPLICE_F_NONBLOCK) {
54955 ret = -EAGAIN;
54956 break;
54957 @@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54958 pipe_lock(pipe);
54959
54960 while (pipe->nrbufs >= PIPE_BUFFERS) {
54961 - if (!pipe->readers) {
54962 + if (!atomic_read(&pipe->readers)) {
54963 send_sig(SIGPIPE, current, 0);
54964 ret = -EPIPE;
54965 break;
54966 @@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54967 ret = -ERESTARTSYS;
54968 break;
54969 }
54970 - pipe->waiting_writers++;
54971 + atomic_inc(&pipe->waiting_writers);
54972 pipe_wait(pipe);
54973 - pipe->waiting_writers--;
54974 + atomic_dec(&pipe->waiting_writers);
54975 }
54976
54977 pipe_unlock(pipe);
54978 @@ -1786,14 +1792,14 @@ retry:
54979 pipe_double_lock(ipipe, opipe);
54980
54981 do {
54982 - if (!opipe->readers) {
54983 + if (!atomic_read(&opipe->readers)) {
54984 send_sig(SIGPIPE, current, 0);
54985 if (!ret)
54986 ret = -EPIPE;
54987 break;
54988 }
54989
54990 - if (!ipipe->nrbufs && !ipipe->writers)
54991 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
54992 break;
54993
54994 /*
54995 @@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
54996 pipe_double_lock(ipipe, opipe);
54997
54998 do {
54999 - if (!opipe->readers) {
55000 + if (!atomic_read(&opipe->readers)) {
55001 send_sig(SIGPIPE, current, 0);
55002 if (!ret)
55003 ret = -EPIPE;
55004 @@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
55005 * return EAGAIN if we have the potential of some data in the
55006 * future, otherwise just return 0
55007 */
55008 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
55009 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
55010 ret = -EAGAIN;
55011
55012 pipe_unlock(ipipe);
55013 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
55014 index e020183..18d64b4 100644
55015 --- a/fs/sysfs/dir.c
55016 +++ b/fs/sysfs/dir.c
55017 @@ -678,6 +678,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
55018 struct sysfs_dirent *sd;
55019 int rc;
55020
55021 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
55022 + const char *parent_name = parent_sd->s_name;
55023 +
55024 + mode = S_IFDIR | S_IRWXU;
55025 +
55026 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
55027 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
55028 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
55029 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
55030 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
55031 +#endif
55032 +
55033 /* allocate */
55034 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
55035 if (!sd)
55036 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
55037 index 7118a38..70af853 100644
55038 --- a/fs/sysfs/file.c
55039 +++ b/fs/sysfs/file.c
55040 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
55041
55042 struct sysfs_open_dirent {
55043 atomic_t refcnt;
55044 - atomic_t event;
55045 + atomic_unchecked_t event;
55046 wait_queue_head_t poll;
55047 struct list_head buffers; /* goes through sysfs_buffer.list */
55048 };
55049 @@ -53,7 +53,7 @@ struct sysfs_buffer {
55050 size_t count;
55051 loff_t pos;
55052 char * page;
55053 - struct sysfs_ops * ops;
55054 + const struct sysfs_ops * ops;
55055 struct mutex mutex;
55056 int needs_read_fill;
55057 int event;
55058 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
55059 {
55060 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
55061 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
55062 - struct sysfs_ops * ops = buffer->ops;
55063 + const struct sysfs_ops * ops = buffer->ops;
55064 int ret = 0;
55065 ssize_t count;
55066
55067 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
55068 if (!sysfs_get_active_two(attr_sd))
55069 return -ENODEV;
55070
55071 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
55072 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
55073 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
55074
55075 sysfs_put_active_two(attr_sd);
55076 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
55077 {
55078 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
55079 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
55080 - struct sysfs_ops * ops = buffer->ops;
55081 + const struct sysfs_ops * ops = buffer->ops;
55082 int rc;
55083
55084 /* need attr_sd for attr and ops, its parent for kobj */
55085 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
55086 return -ENOMEM;
55087
55088 atomic_set(&new_od->refcnt, 0);
55089 - atomic_set(&new_od->event, 1);
55090 + atomic_set_unchecked(&new_od->event, 1);
55091 init_waitqueue_head(&new_od->poll);
55092 INIT_LIST_HEAD(&new_od->buffers);
55093 goto retry;
55094 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
55095 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
55096 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
55097 struct sysfs_buffer *buffer;
55098 - struct sysfs_ops *ops;
55099 + const struct sysfs_ops *ops;
55100 int error = -EACCES;
55101 char *p;
55102
55103 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
55104
55105 sysfs_put_active_two(attr_sd);
55106
55107 - if (buffer->event != atomic_read(&od->event))
55108 + if (buffer->event != atomic_read_unchecked(&od->event))
55109 goto trigger;
55110
55111 return DEFAULT_POLLMASK;
55112 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
55113
55114 od = sd->s_attr.open;
55115 if (od) {
55116 - atomic_inc(&od->event);
55117 + atomic_inc_unchecked(&od->event);
55118 wake_up_interruptible(&od->poll);
55119 }
55120
55121 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
55122 index c5081ad..342ea86 100644
55123 --- a/fs/sysfs/symlink.c
55124 +++ b/fs/sysfs/symlink.c
55125 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
55126
55127 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
55128 {
55129 - char *page = nd_get_link(nd);
55130 + const char *page = nd_get_link(nd);
55131 if (!IS_ERR(page))
55132 free_page((unsigned long)page);
55133 }
55134 diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
55135 index 1e06853..b06d325 100644
55136 --- a/fs/udf/balloc.c
55137 +++ b/fs/udf/balloc.c
55138 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
55139
55140 mutex_lock(&sbi->s_alloc_mutex);
55141 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
55142 - if (bloc->logicalBlockNum < 0 ||
55143 - (bloc->logicalBlockNum + count) >
55144 - partmap->s_partition_len) {
55145 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
55146 udf_debug("%d < %d || %d + %d > %d\n",
55147 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
55148 count, partmap->s_partition_len);
55149 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
55150
55151 mutex_lock(&sbi->s_alloc_mutex);
55152 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
55153 - if (bloc->logicalBlockNum < 0 ||
55154 - (bloc->logicalBlockNum + count) >
55155 - partmap->s_partition_len) {
55156 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
55157 udf_debug("%d < %d || %d + %d > %d\n",
55158 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
55159 partmap->s_partition_len);
55160 diff --git a/fs/udf/inode.c b/fs/udf/inode.c
55161 index 6d24c2c..fff470f 100644
55162 --- a/fs/udf/inode.c
55163 +++ b/fs/udf/inode.c
55164 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
55165 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
55166 int lastblock = 0;
55167
55168 + pax_track_stack();
55169 +
55170 prev_epos.offset = udf_file_entry_alloc_offset(inode);
55171 prev_epos.block = iinfo->i_location;
55172 prev_epos.bh = NULL;
55173 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
55174 index 9215700..bf1f68e 100644
55175 --- a/fs/udf/misc.c
55176 +++ b/fs/udf/misc.c
55177 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
55178
55179 u8 udf_tag_checksum(const struct tag *t)
55180 {
55181 - u8 *data = (u8 *)t;
55182 + const u8 *data = (const u8 *)t;
55183 u8 checksum = 0;
55184 int i;
55185 for (i = 0; i < sizeof(struct tag); ++i)
55186 diff --git a/fs/utimes.c b/fs/utimes.c
55187 index e4c75db..b4df0e0 100644
55188 --- a/fs/utimes.c
55189 +++ b/fs/utimes.c
55190 @@ -1,6 +1,7 @@
55191 #include <linux/compiler.h>
55192 #include <linux/file.h>
55193 #include <linux/fs.h>
55194 +#include <linux/security.h>
55195 #include <linux/linkage.h>
55196 #include <linux/mount.h>
55197 #include <linux/namei.h>
55198 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
55199 goto mnt_drop_write_and_out;
55200 }
55201 }
55202 +
55203 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
55204 + error = -EACCES;
55205 + goto mnt_drop_write_and_out;
55206 + }
55207 +
55208 mutex_lock(&inode->i_mutex);
55209 error = notify_change(path->dentry, &newattrs);
55210 mutex_unlock(&inode->i_mutex);
55211 diff --git a/fs/xattr.c b/fs/xattr.c
55212 index 6d4f6d3..cda3958 100644
55213 --- a/fs/xattr.c
55214 +++ b/fs/xattr.c
55215 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
55216 * Extended attribute SET operations
55217 */
55218 static long
55219 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
55220 +setxattr(struct path *path, const char __user *name, const void __user *value,
55221 size_t size, int flags)
55222 {
55223 int error;
55224 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
55225 return PTR_ERR(kvalue);
55226 }
55227
55228 - error = vfs_setxattr(d, kname, kvalue, size, flags);
55229 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
55230 + error = -EACCES;
55231 + goto out;
55232 + }
55233 +
55234 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
55235 +out:
55236 kfree(kvalue);
55237 return error;
55238 }
55239 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
55240 return error;
55241 error = mnt_want_write(path.mnt);
55242 if (!error) {
55243 - error = setxattr(path.dentry, name, value, size, flags);
55244 + error = setxattr(&path, name, value, size, flags);
55245 mnt_drop_write(path.mnt);
55246 }
55247 path_put(&path);
55248 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
55249 return error;
55250 error = mnt_want_write(path.mnt);
55251 if (!error) {
55252 - error = setxattr(path.dentry, name, value, size, flags);
55253 + error = setxattr(&path, name, value, size, flags);
55254 mnt_drop_write(path.mnt);
55255 }
55256 path_put(&path);
55257 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
55258 const void __user *,value, size_t, size, int, flags)
55259 {
55260 struct file *f;
55261 - struct dentry *dentry;
55262 int error = -EBADF;
55263
55264 f = fget(fd);
55265 if (!f)
55266 return error;
55267 - dentry = f->f_path.dentry;
55268 - audit_inode(NULL, dentry);
55269 + audit_inode(NULL, f->f_path.dentry);
55270 error = mnt_want_write_file(f);
55271 if (!error) {
55272 - error = setxattr(dentry, name, value, size, flags);
55273 + error = setxattr(&f->f_path, name, value, size, flags);
55274 mnt_drop_write(f->f_path.mnt);
55275 }
55276 fput(f);
55277 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
55278 index c6ad7c7..f2847a7 100644
55279 --- a/fs/xattr_acl.c
55280 +++ b/fs/xattr_acl.c
55281 @@ -17,8 +17,8 @@
55282 struct posix_acl *
55283 posix_acl_from_xattr(const void *value, size_t size)
55284 {
55285 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
55286 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
55287 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
55288 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
55289 int count;
55290 struct posix_acl *acl;
55291 struct posix_acl_entry *acl_e;
55292 diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
55293 index 942362f..88f96f5 100644
55294 --- a/fs/xfs/linux-2.6/xfs_ioctl.c
55295 +++ b/fs/xfs/linux-2.6/xfs_ioctl.c
55296 @@ -134,7 +134,7 @@ xfs_find_handle(
55297 }
55298
55299 error = -EFAULT;
55300 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
55301 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
55302 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
55303 goto out_put;
55304
55305 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
55306 if (IS_ERR(dentry))
55307 return PTR_ERR(dentry);
55308
55309 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
55310 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
55311 if (!kbuf)
55312 goto out_dput;
55313
55314 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
55315 xfs_mount_t *mp,
55316 void __user *arg)
55317 {
55318 - xfs_fsop_geom_t fsgeo;
55319 + xfs_fsop_geom_t fsgeo;
55320 int error;
55321
55322 error = xfs_fs_geometry(mp, &fsgeo, 3);
55323 diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
55324 index bad485a..479bd32 100644
55325 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c
55326 +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
55327 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
55328 xfs_fsop_geom_t fsgeo;
55329 int error;
55330
55331 + memset(&fsgeo, 0, sizeof(fsgeo));
55332 error = xfs_fs_geometry(mp, &fsgeo, 3);
55333 if (error)
55334 return -error;
55335 diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
55336 index 1f3b4b8..6102f6d 100644
55337 --- a/fs/xfs/linux-2.6/xfs_iops.c
55338 +++ b/fs/xfs/linux-2.6/xfs_iops.c
55339 @@ -468,7 +468,7 @@ xfs_vn_put_link(
55340 struct nameidata *nd,
55341 void *p)
55342 {
55343 - char *s = nd_get_link(nd);
55344 + const char *s = nd_get_link(nd);
55345
55346 if (!IS_ERR(s))
55347 kfree(s);
55348 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
55349 index 8971fb0..5fc1eb2 100644
55350 --- a/fs/xfs/xfs_bmap.c
55351 +++ b/fs/xfs/xfs_bmap.c
55352 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
55353 int nmap,
55354 int ret_nmap);
55355 #else
55356 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
55357 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
55358 #endif /* DEBUG */
55359
55360 #if defined(XFS_RW_TRACE)
55361 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
55362 index e89734e..5e84d8d 100644
55363 --- a/fs/xfs/xfs_dir2_sf.c
55364 +++ b/fs/xfs/xfs_dir2_sf.c
55365 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
55366 }
55367
55368 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
55369 - if (filldir(dirent, sfep->name, sfep->namelen,
55370 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
55371 + char name[sfep->namelen];
55372 + memcpy(name, sfep->name, sfep->namelen);
55373 + if (filldir(dirent, name, sfep->namelen,
55374 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
55375 + *offset = off & 0x7fffffff;
55376 + return 0;
55377 + }
55378 + } else if (filldir(dirent, sfep->name, sfep->namelen,
55379 off & 0x7fffffff, ino, DT_UNKNOWN)) {
55380 *offset = off & 0x7fffffff;
55381 return 0;
55382 diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
55383 index 8f32f50..b6a41e8 100644
55384 --- a/fs/xfs/xfs_vnodeops.c
55385 +++ b/fs/xfs/xfs_vnodeops.c
55386 @@ -564,13 +564,18 @@ xfs_readlink(
55387
55388 xfs_ilock(ip, XFS_ILOCK_SHARED);
55389
55390 - ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
55391 - ASSERT(ip->i_d.di_size <= MAXPATHLEN);
55392 -
55393 pathlen = ip->i_d.di_size;
55394 if (!pathlen)
55395 goto out;
55396
55397 + if (pathlen > MAXPATHLEN) {
55398 + xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
55399 + __func__, (unsigned long long)ip->i_ino, pathlen);
55400 + ASSERT(0);
55401 + error = XFS_ERROR(EFSCORRUPTED);
55402 + goto out;
55403 + }
55404 +
55405 if (ip->i_df.if_flags & XFS_IFINLINE) {
55406 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
55407 link[pathlen] = '\0';
55408 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
55409 new file mode 100644
55410 index 0000000..7026cbd
55411 --- /dev/null
55412 +++ b/grsecurity/Kconfig
55413 @@ -0,0 +1,1074 @@
55414 +#
55415 +# grecurity configuration
55416 +#
55417 +
55418 +menu "Grsecurity"
55419 +
55420 +config GRKERNSEC
55421 + bool "Grsecurity"
55422 + select CRYPTO
55423 + select CRYPTO_SHA256
55424 + help
55425 + If you say Y here, you will be able to configure many features
55426 + that will enhance the security of your system. It is highly
55427 + recommended that you say Y here and read through the help
55428 + for each option so that you fully understand the features and
55429 + can evaluate their usefulness for your machine.
55430 +
55431 +choice
55432 + prompt "Security Level"
55433 + depends on GRKERNSEC
55434 + default GRKERNSEC_CUSTOM
55435 +
55436 +config GRKERNSEC_LOW
55437 + bool "Low"
55438 + select GRKERNSEC_LINK
55439 + select GRKERNSEC_FIFO
55440 + select GRKERNSEC_RANDNET
55441 + select GRKERNSEC_DMESG
55442 + select GRKERNSEC_CHROOT
55443 + select GRKERNSEC_CHROOT_CHDIR
55444 +
55445 + help
55446 + If you choose this option, several of the grsecurity options will
55447 + be enabled that will give you greater protection against a number
55448 + of attacks, while assuring that none of your software will have any
55449 + conflicts with the additional security measures. If you run a lot
55450 + of unusual software, or you are having problems with the higher
55451 + security levels, you should say Y here. With this option, the
55452 + following features are enabled:
55453 +
55454 + - Linking restrictions
55455 + - FIFO restrictions
55456 + - Restricted dmesg
55457 + - Enforced chdir("/") on chroot
55458 + - Runtime module disabling
55459 +
55460 +config GRKERNSEC_MEDIUM
55461 + bool "Medium"
55462 + select PAX
55463 + select PAX_EI_PAX
55464 + select PAX_PT_PAX_FLAGS
55465 + select PAX_HAVE_ACL_FLAGS
55466 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55467 + select GRKERNSEC_CHROOT
55468 + select GRKERNSEC_CHROOT_SYSCTL
55469 + select GRKERNSEC_LINK
55470 + select GRKERNSEC_FIFO
55471 + select GRKERNSEC_DMESG
55472 + select GRKERNSEC_RANDNET
55473 + select GRKERNSEC_FORKFAIL
55474 + select GRKERNSEC_TIME
55475 + select GRKERNSEC_SIGNAL
55476 + select GRKERNSEC_CHROOT
55477 + select GRKERNSEC_CHROOT_UNIX
55478 + select GRKERNSEC_CHROOT_MOUNT
55479 + select GRKERNSEC_CHROOT_PIVOT
55480 + select GRKERNSEC_CHROOT_DOUBLE
55481 + select GRKERNSEC_CHROOT_CHDIR
55482 + select GRKERNSEC_CHROOT_MKNOD
55483 + select GRKERNSEC_PROC
55484 + select GRKERNSEC_PROC_USERGROUP
55485 + select PAX_RANDUSTACK
55486 + select PAX_ASLR
55487 + select PAX_RANDMMAP
55488 + select PAX_REFCOUNT if (X86 || SPARC64)
55489 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55490 +
55491 + help
55492 + If you say Y here, several features in addition to those included
55493 + in the low additional security level will be enabled. These
55494 + features provide even more security to your system, though in rare
55495 + cases they may be incompatible with very old or poorly written
55496 + software. If you enable this option, make sure that your auth
55497 + service (identd) is running as gid 1001. With this option,
55498 + the following features (in addition to those provided in the
55499 + low additional security level) will be enabled:
55500 +
55501 + - Failed fork logging
55502 + - Time change logging
55503 + - Signal logging
55504 + - Deny mounts in chroot
55505 + - Deny double chrooting
55506 + - Deny sysctl writes in chroot
55507 + - Deny mknod in chroot
55508 + - Deny access to abstract AF_UNIX sockets out of chroot
55509 + - Deny pivot_root in chroot
55510 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
55511 + - /proc restrictions with special GID set to 10 (usually wheel)
55512 + - Address Space Layout Randomization (ASLR)
55513 + - Prevent exploitation of most refcount overflows
55514 + - Bounds checking of copying between the kernel and userland
55515 +
55516 +config GRKERNSEC_HIGH
55517 + bool "High"
55518 + select GRKERNSEC_LINK
55519 + select GRKERNSEC_FIFO
55520 + select GRKERNSEC_DMESG
55521 + select GRKERNSEC_FORKFAIL
55522 + select GRKERNSEC_TIME
55523 + select GRKERNSEC_SIGNAL
55524 + select GRKERNSEC_CHROOT
55525 + select GRKERNSEC_CHROOT_SHMAT
55526 + select GRKERNSEC_CHROOT_UNIX
55527 + select GRKERNSEC_CHROOT_MOUNT
55528 + select GRKERNSEC_CHROOT_FCHDIR
55529 + select GRKERNSEC_CHROOT_PIVOT
55530 + select GRKERNSEC_CHROOT_DOUBLE
55531 + select GRKERNSEC_CHROOT_CHDIR
55532 + select GRKERNSEC_CHROOT_MKNOD
55533 + select GRKERNSEC_CHROOT_CAPS
55534 + select GRKERNSEC_CHROOT_SYSCTL
55535 + select GRKERNSEC_CHROOT_FINDTASK
55536 + select GRKERNSEC_SYSFS_RESTRICT
55537 + select GRKERNSEC_PROC
55538 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55539 + select GRKERNSEC_HIDESYM
55540 + select GRKERNSEC_BRUTE
55541 + select GRKERNSEC_PROC_USERGROUP
55542 + select GRKERNSEC_KMEM
55543 + select GRKERNSEC_RESLOG
55544 + select GRKERNSEC_RANDNET
55545 + select GRKERNSEC_PROC_ADD
55546 + select GRKERNSEC_CHROOT_CHMOD
55547 + select GRKERNSEC_CHROOT_NICE
55548 + select GRKERNSEC_SETXID
55549 + select GRKERNSEC_AUDIT_MOUNT
55550 + select GRKERNSEC_MODHARDEN if (MODULES)
55551 + select GRKERNSEC_HARDEN_PTRACE
55552 + select GRKERNSEC_PTRACE_READEXEC
55553 + select GRKERNSEC_VM86 if (X86_32)
55554 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55555 + select PAX
55556 + select PAX_RANDUSTACK
55557 + select PAX_ASLR
55558 + select PAX_RANDMMAP
55559 + select PAX_NOEXEC
55560 + select PAX_MPROTECT
55561 + select PAX_EI_PAX
55562 + select PAX_PT_PAX_FLAGS
55563 + select PAX_HAVE_ACL_FLAGS
55564 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55565 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
55566 + select PAX_RANDKSTACK if (X86_TSC && X86)
55567 + select PAX_SEGMEXEC if (X86_32)
55568 + select PAX_PAGEEXEC
55569 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55570 + select PAX_EMUTRAMP if (PARISC)
55571 + select PAX_EMUSIGRT if (PARISC)
55572 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55573 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55574 + select PAX_REFCOUNT if (X86 || SPARC64)
55575 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55576 + help
55577 + If you say Y here, many of the features of grsecurity will be
55578 + enabled, which will protect you against many kinds of attacks
55579 + against your system. The heightened security comes at a cost
55580 + of an increased chance of incompatibilities with rare software
55581 + on your machine. Since this security level enables PaX, you should
55582 + view <http://pax.grsecurity.net> and read about the PaX
55583 + project. While you are there, download chpax and run it on
55584 + binaries that cause problems with PaX. Also remember that
55585 + since the /proc restrictions are enabled, you must run your
55586 + identd as gid 1001. This security level enables the following
55587 + features in addition to those listed in the low and medium
55588 + security levels:
55589 +
55590 + - Additional /proc restrictions
55591 + - Chmod restrictions in chroot
55592 + - No signals, ptrace, or viewing of processes outside of chroot
55593 + - Capability restrictions in chroot
55594 + - Deny fchdir out of chroot
55595 + - Priority restrictions in chroot
55596 + - Segmentation-based implementation of PaX
55597 + - Mprotect restrictions
55598 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55599 + - Kernel stack randomization
55600 + - Mount/unmount/remount logging
55601 + - Kernel symbol hiding
55602 + - Hardening of module auto-loading
55603 + - Ptrace restrictions
55604 + - Restricted vm86 mode
55605 + - Restricted sysfs/debugfs
55606 + - Active kernel exploit response
55607 +
55608 +config GRKERNSEC_CUSTOM
55609 + bool "Custom"
55610 + help
55611 + If you say Y here, you will be able to configure every grsecurity
55612 + option, which allows you to enable many more features that aren't
55613 + covered in the basic security levels. These additional features
55614 + include TPE, socket restrictions, and the sysctl system for
55615 + grsecurity. It is advised that you read through the help for
55616 + each option to determine its usefulness in your situation.
55617 +
55618 +endchoice
55619 +
55620 +menu "Memory Protections"
55621 +depends on GRKERNSEC
55622 +
55623 +config GRKERNSEC_KMEM
55624 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
55625 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55626 + help
55627 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55628 + be written to or read from to modify or leak the contents of the running
55629 + kernel. /dev/port will also not be allowed to be opened. If you have module
55630 + support disabled, enabling this will close up four ways that are
55631 + currently used to insert malicious code into the running kernel.
55632 + Even with all these features enabled, we still highly recommend that
55633 + you use the RBAC system, as it is still possible for an attacker to
55634 + modify the running kernel through privileged I/O granted by ioperm/iopl.
55635 + If you are not using XFree86, you may be able to stop this additional
55636 + case by enabling the 'Disable privileged I/O' option. Though nothing
55637 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55638 + but only to video memory, which is the only writing we allow in this
55639 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55640 + not be allowed to mprotect it with PROT_WRITE later.
55641 + It is highly recommended that you say Y here if you meet all the
55642 + conditions above.
55643 +
55644 +config GRKERNSEC_VM86
55645 + bool "Restrict VM86 mode"
55646 + depends on X86_32
55647 +
55648 + help
55649 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55650 + make use of a special execution mode on 32bit x86 processors called
55651 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55652 + video cards and will still work with this option enabled. The purpose
55653 + of the option is to prevent exploitation of emulation errors in
55654 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
55655 + Nearly all users should be able to enable this option.
55656 +
55657 +config GRKERNSEC_IO
55658 + bool "Disable privileged I/O"
55659 + depends on X86
55660 + select RTC_CLASS
55661 + select RTC_INTF_DEV
55662 + select RTC_DRV_CMOS
55663 +
55664 + help
55665 + If you say Y here, all ioperm and iopl calls will return an error.
55666 + Ioperm and iopl can be used to modify the running kernel.
55667 + Unfortunately, some programs need this access to operate properly,
55668 + the most notable of which are XFree86 and hwclock. hwclock can be
55669 + remedied by having RTC support in the kernel, so real-time
55670 + clock support is enabled if this option is enabled, to ensure
55671 + that hwclock operates correctly. XFree86 still will not
55672 + operate correctly with this option enabled, so DO NOT CHOOSE Y
55673 + IF YOU USE XFree86. If you use XFree86 and you still want to
55674 + protect your kernel against modification, use the RBAC system.
55675 +
55676 +config GRKERNSEC_PROC_MEMMAP
55677 + bool "Harden ASLR against information leaks and entropy reduction"
55678 + default y if (PAX_NOEXEC || PAX_ASLR)
55679 + depends on PAX_NOEXEC || PAX_ASLR
55680 + help
55681 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55682 + give no information about the addresses of its mappings if
55683 + PaX features that rely on random addresses are enabled on the task.
55684 + In addition to sanitizing this information and disabling other
55685 + dangerous sources of information, this option causes reads of sensitive
55686 + /proc/<pid> entries where the file descriptor was opened in a different
55687 + task than the one performing the read. Such attempts are logged.
55688 + Finally, this option limits argv/env strings for suid/sgid binaries
55689 + to 1MB to prevent a complete exhaustion of the stack entropy provided
55690 + by ASLR.
55691 + If you use PaX it is essential that you say Y here as it closes up
55692 + several holes that make full ASLR useless for suid/sgid binaries.
55693 +
55694 +config GRKERNSEC_BRUTE
55695 + bool "Deter exploit bruteforcing"
55696 + help
55697 + If you say Y here, attempts to bruteforce exploits against forking
55698 + daemons such as apache or sshd, as well as against suid/sgid binaries
55699 + will be deterred. When a child of a forking daemon is killed by PaX
55700 + or crashes due to an illegal instruction or other suspicious signal,
55701 + the parent process will be delayed 30 seconds upon every subsequent
55702 + fork until the administrator is able to assess the situation and
55703 + restart the daemon.
55704 + In the suid/sgid case, the attempt is logged, the user has all their
55705 + processes terminated, and they are prevented from executing any further
55706 + processes for 15 minutes.
55707 + It is recommended that you also enable signal logging in the auditing
55708 + section so that logs are generated when a process triggers a suspicious
55709 + signal.
55710 + If the sysctl option is enabled, a sysctl option with name
55711 + "deter_bruteforce" is created.
55712 +
55713 +config GRKERNSEC_MODHARDEN
55714 + bool "Harden module auto-loading"
55715 + depends on MODULES
55716 + help
55717 + If you say Y here, module auto-loading in response to use of some
55718 + feature implemented by an unloaded module will be restricted to
55719 + root users. Enabling this option helps defend against attacks
55720 + by unprivileged users who abuse the auto-loading behavior to
55721 + cause a vulnerable module to load that is then exploited.
55722 +
55723 + If this option prevents a legitimate use of auto-loading for a
55724 + non-root user, the administrator can execute modprobe manually
55725 + with the exact name of the module mentioned in the alert log.
55726 + Alternatively, the administrator can add the module to the list
55727 + of modules loaded at boot by modifying init scripts.
55728 +
55729 + Modification of init scripts will most likely be needed on
55730 + Ubuntu servers with encrypted home directory support enabled,
55731 + as the first non-root user logging in will cause the ecb(aes),
55732 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55733 +
55734 +config GRKERNSEC_HIDESYM
55735 + bool "Hide kernel symbols"
55736 + help
55737 + If you say Y here, getting information on loaded modules, and
55738 + displaying all kernel symbols through a syscall will be restricted
55739 + to users with CAP_SYS_MODULE. For software compatibility reasons,
55740 + /proc/kallsyms will be restricted to the root user. The RBAC
55741 + system can hide that entry even from root.
55742 +
55743 + This option also prevents leaking of kernel addresses through
55744 + several /proc entries.
55745 +
55746 + Note that this option is only effective provided the following
55747 + conditions are met:
55748 + 1) The kernel using grsecurity is not precompiled by some distribution
55749 + 2) You have also enabled GRKERNSEC_DMESG
55750 + 3) You are using the RBAC system and hiding other files such as your
55751 + kernel image and System.map. Alternatively, enabling this option
55752 + causes the permissions on /boot, /lib/modules, and the kernel
55753 + source directory to change at compile time to prevent
55754 + reading by non-root users.
55755 + If the above conditions are met, this option will aid in providing a
55756 + useful protection against local kernel exploitation of overflows
55757 + and arbitrary read/write vulnerabilities.
55758 +
55759 +config GRKERNSEC_KERN_LOCKOUT
55760 + bool "Active kernel exploit response"
55761 + depends on X86 || ARM || PPC || SPARC
55762 + help
55763 + If you say Y here, when a PaX alert is triggered due to suspicious
55764 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55765 + or an OOPs occurs due to bad memory accesses, instead of just
55766 + terminating the offending process (and potentially allowing
55767 + a subsequent exploit from the same user), we will take one of two
55768 + actions:
55769 + If the user was root, we will panic the system
55770 + If the user was non-root, we will log the attempt, terminate
55771 + all processes owned by the user, then prevent them from creating
55772 + any new processes until the system is restarted
55773 + This deters repeated kernel exploitation/bruteforcing attempts
55774 + and is useful for later forensics.
55775 +
55776 +endmenu
55777 +menu "Role Based Access Control Options"
55778 +depends on GRKERNSEC
55779 +
55780 +config GRKERNSEC_RBAC_DEBUG
55781 + bool
55782 +
55783 +config GRKERNSEC_NO_RBAC
55784 + bool "Disable RBAC system"
55785 + help
55786 + If you say Y here, the /dev/grsec device will be removed from the kernel,
55787 + preventing the RBAC system from being enabled. You should only say Y
55788 + here if you have no intention of using the RBAC system, so as to prevent
55789 + an attacker with root access from misusing the RBAC system to hide files
55790 + and processes when loadable module support and /dev/[k]mem have been
55791 + locked down.
55792 +
55793 +config GRKERNSEC_ACL_HIDEKERN
55794 + bool "Hide kernel processes"
55795 + help
55796 + If you say Y here, all kernel threads will be hidden to all
55797 + processes but those whose subject has the "view hidden processes"
55798 + flag.
55799 +
55800 +config GRKERNSEC_ACL_MAXTRIES
55801 + int "Maximum tries before password lockout"
55802 + default 3
55803 + help
55804 + This option enforces the maximum number of times a user can attempt
55805 + to authorize themselves with the grsecurity RBAC system before being
55806 + denied the ability to attempt authorization again for a specified time.
55807 + The lower the number, the harder it will be to brute-force a password.
55808 +
55809 +config GRKERNSEC_ACL_TIMEOUT
55810 + int "Time to wait after max password tries, in seconds"
55811 + default 30
55812 + help
55813 + This option specifies the time the user must wait after attempting to
55814 + authorize to the RBAC system with the maximum number of invalid
55815 + passwords. The higher the number, the harder it will be to brute-force
55816 + a password.
55817 +
55818 +endmenu
55819 +menu "Filesystem Protections"
55820 +depends on GRKERNSEC
55821 +
55822 +config GRKERNSEC_PROC
55823 + bool "Proc restrictions"
55824 + help
55825 + If you say Y here, the permissions of the /proc filesystem
55826 + will be altered to enhance system security and privacy. You MUST
55827 + choose either a user only restriction or a user and group restriction.
55828 + Depending upon the option you choose, you can either restrict users to
55829 + see only the processes they themselves run, or choose a group that can
55830 + view all processes and files normally restricted to root if you choose
55831 + the "restrict to user only" option. NOTE: If you're running identd as
55832 + a non-root user, you will have to run it as the group you specify here.
55833 +
55834 +config GRKERNSEC_PROC_USER
55835 + bool "Restrict /proc to user only"
55836 + depends on GRKERNSEC_PROC
55837 + help
55838 + If you say Y here, non-root users will only be able to view their own
55839 + processes, and restricts them from viewing network-related information,
55840 + and viewing kernel symbol and module information.
55841 +
55842 +config GRKERNSEC_PROC_USERGROUP
55843 + bool "Allow special group"
55844 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55845 + help
55846 + If you say Y here, you will be able to select a group that will be
55847 + able to view all processes and network-related information. If you've
55848 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55849 + remain hidden. This option is useful if you want to run identd as
55850 + a non-root user.
55851 +
55852 +config GRKERNSEC_PROC_GID
55853 + int "GID for special group"
55854 + depends on GRKERNSEC_PROC_USERGROUP
55855 + default 1001
55856 +
55857 +config GRKERNSEC_PROC_ADD
55858 + bool "Additional restrictions"
55859 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55860 + help
55861 + If you say Y here, additional restrictions will be placed on
55862 + /proc that keep normal users from viewing device information and
55863 + slabinfo information that could be useful for exploits.
55864 +
55865 +config GRKERNSEC_LINK
55866 + bool "Linking restrictions"
55867 + help
55868 + If you say Y here, /tmp race exploits will be prevented, since users
55869 + will no longer be able to follow symlinks owned by other users in
55870 + world-writable +t directories (e.g. /tmp), unless the owner of the
55871 + symlink is the owner of the directory. users will also not be
55872 + able to hardlink to files they do not own. If the sysctl option is
55873 + enabled, a sysctl option with name "linking_restrictions" is created.
55874 +
55875 +config GRKERNSEC_FIFO
55876 + bool "FIFO restrictions"
55877 + help
55878 + If you say Y here, users will not be able to write to FIFOs they don't
55879 + own in world-writable +t directories (e.g. /tmp), unless the owner of
55880 + the FIFO is the same owner of the directory it's held in. If the sysctl
55881 + option is enabled, a sysctl option with name "fifo_restrictions" is
55882 + created.
55883 +
55884 +config GRKERNSEC_SYSFS_RESTRICT
55885 + bool "Sysfs/debugfs restriction"
55886 + depends on SYSFS
55887 + help
55888 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
55889 + any filesystem normally mounted under it (e.g. debugfs) will be
55890 + mostly accessible only by root. These filesystems generally provide access
55891 + to hardware and debug information that isn't appropriate for unprivileged
55892 + users of the system. Sysfs and debugfs have also become a large source
55893 + of new vulnerabilities, ranging from infoleaks to local compromise.
55894 + There has been very little oversight with an eye toward security involved
55895 + in adding new exporters of information to these filesystems, so their
55896 + use is discouraged.
55897 + For reasons of compatibility, a few directories have been whitelisted
55898 + for access by non-root users:
55899 + /sys/fs/selinux
55900 + /sys/fs/fuse
55901 + /sys/devices/system/cpu
55902 +
55903 +config GRKERNSEC_ROFS
55904 + bool "Runtime read-only mount protection"
55905 + help
55906 + If you say Y here, a sysctl option with name "romount_protect" will
55907 + be created. By setting this option to 1 at runtime, filesystems
55908 + will be protected in the following ways:
55909 + * No new writable mounts will be allowed
55910 + * Existing read-only mounts won't be able to be remounted read/write
55911 + * Write operations will be denied on all block devices
55912 + This option acts independently of grsec_lock: once it is set to 1,
55913 + it cannot be turned off. Therefore, please be mindful of the resulting
55914 + behavior if this option is enabled in an init script on a read-only
55915 + filesystem. This feature is mainly intended for secure embedded systems.
55916 +
55917 +config GRKERNSEC_CHROOT
55918 + bool "Chroot jail restrictions"
55919 + help
55920 + If you say Y here, you will be able to choose several options that will
55921 + make breaking out of a chrooted jail much more difficult. If you
55922 + encounter no software incompatibilities with the following options, it
55923 + is recommended that you enable each one.
55924 +
55925 +config GRKERNSEC_CHROOT_MOUNT
55926 + bool "Deny mounts"
55927 + depends on GRKERNSEC_CHROOT
55928 + help
55929 + If you say Y here, processes inside a chroot will not be able to
55930 + mount or remount filesystems. If the sysctl option is enabled, a
55931 + sysctl option with name "chroot_deny_mount" is created.
55932 +
55933 +config GRKERNSEC_CHROOT_DOUBLE
55934 + bool "Deny double-chroots"
55935 + depends on GRKERNSEC_CHROOT
55936 + help
55937 + If you say Y here, processes inside a chroot will not be able to chroot
55938 + again outside the chroot. This is a widely used method of breaking
55939 + out of a chroot jail and should not be allowed. If the sysctl
55940 + option is enabled, a sysctl option with name
55941 + "chroot_deny_chroot" is created.
55942 +
55943 +config GRKERNSEC_CHROOT_PIVOT
55944 + bool "Deny pivot_root in chroot"
55945 + depends on GRKERNSEC_CHROOT
55946 + help
55947 + If you say Y here, processes inside a chroot will not be able to use
55948 + a function called pivot_root() that was introduced in Linux 2.3.41. It
55949 + works similar to chroot in that it changes the root filesystem. This
55950 + function could be misused in a chrooted process to attempt to break out
55951 + of the chroot, and therefore should not be allowed. If the sysctl
55952 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
55953 + created.
55954 +
55955 +config GRKERNSEC_CHROOT_CHDIR
55956 + bool "Enforce chdir(\"/\") on all chroots"
55957 + depends on GRKERNSEC_CHROOT
55958 + help
55959 + If you say Y here, the current working directory of all newly-chrooted
55960 + applications will be set to the the root directory of the chroot.
55961 + The man page on chroot(2) states:
55962 + Note that this call does not change the current working
55963 + directory, so that `.' can be outside the tree rooted at
55964 + `/'. In particular, the super-user can escape from a
55965 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
55966 +
55967 + It is recommended that you say Y here, since it's not known to break
55968 + any software. If the sysctl option is enabled, a sysctl option with
55969 + name "chroot_enforce_chdir" is created.
55970 +
55971 +config GRKERNSEC_CHROOT_CHMOD
55972 + bool "Deny (f)chmod +s"
55973 + depends on GRKERNSEC_CHROOT
55974 + help
55975 + If you say Y here, processes inside a chroot will not be able to chmod
55976 + or fchmod files to make them have suid or sgid bits. This protects
55977 + against another published method of breaking a chroot. If the sysctl
55978 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
55979 + created.
55980 +
55981 +config GRKERNSEC_CHROOT_FCHDIR
55982 + bool "Deny fchdir out of chroot"
55983 + depends on GRKERNSEC_CHROOT
55984 + help
55985 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
55986 + to a file descriptor of the chrooting process that points to a directory
55987 + outside the filesystem will be stopped. If the sysctl option
55988 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
55989 +
55990 +config GRKERNSEC_CHROOT_MKNOD
55991 + bool "Deny mknod"
55992 + depends on GRKERNSEC_CHROOT
55993 + help
55994 + If you say Y here, processes inside a chroot will not be allowed to
55995 + mknod. The problem with using mknod inside a chroot is that it
55996 + would allow an attacker to create a device entry that is the same
55997 + as one on the physical root of your system, which could range from
55998 + anything from the console device to a device for your harddrive (which
55999 + they could then use to wipe the drive or steal data). It is recommended
56000 + that you say Y here, unless you run into software incompatibilities.
56001 + If the sysctl option is enabled, a sysctl option with name
56002 + "chroot_deny_mknod" is created.
56003 +
56004 +config GRKERNSEC_CHROOT_SHMAT
56005 + bool "Deny shmat() out of chroot"
56006 + depends on GRKERNSEC_CHROOT
56007 + help
56008 + If you say Y here, processes inside a chroot will not be able to attach
56009 + to shared memory segments that were created outside of the chroot jail.
56010 + It is recommended that you say Y here. If the sysctl option is enabled,
56011 + a sysctl option with name "chroot_deny_shmat" is created.
56012 +
56013 +config GRKERNSEC_CHROOT_UNIX
56014 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
56015 + depends on GRKERNSEC_CHROOT
56016 + help
56017 + If you say Y here, processes inside a chroot will not be able to
56018 + connect to abstract (meaning not belonging to a filesystem) Unix
56019 + domain sockets that were bound outside of a chroot. It is recommended
56020 + that you say Y here. If the sysctl option is enabled, a sysctl option
56021 + with name "chroot_deny_unix" is created.
56022 +
56023 +config GRKERNSEC_CHROOT_FINDTASK
56024 + bool "Protect outside processes"
56025 + depends on GRKERNSEC_CHROOT
56026 + help
56027 + If you say Y here, processes inside a chroot will not be able to
56028 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56029 + getsid, or view any process outside of the chroot. If the sysctl
56030 + option is enabled, a sysctl option with name "chroot_findtask" is
56031 + created.
56032 +
56033 +config GRKERNSEC_CHROOT_NICE
56034 + bool "Restrict priority changes"
56035 + depends on GRKERNSEC_CHROOT
56036 + help
56037 + If you say Y here, processes inside a chroot will not be able to raise
56038 + the priority of processes in the chroot, or alter the priority of
56039 + processes outside the chroot. This provides more security than simply
56040 + removing CAP_SYS_NICE from the process' capability set. If the
56041 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56042 + is created.
56043 +
56044 +config GRKERNSEC_CHROOT_SYSCTL
56045 + bool "Deny sysctl writes"
56046 + depends on GRKERNSEC_CHROOT
56047 + help
56048 + If you say Y here, an attacker in a chroot will not be able to
56049 + write to sysctl entries, either by sysctl(2) or through a /proc
56050 + interface. It is strongly recommended that you say Y here. If the
56051 + sysctl option is enabled, a sysctl option with name
56052 + "chroot_deny_sysctl" is created.
56053 +
56054 +config GRKERNSEC_CHROOT_CAPS
56055 + bool "Capability restrictions"
56056 + depends on GRKERNSEC_CHROOT
56057 + help
56058 + If you say Y here, the capabilities on all processes within a
56059 + chroot jail will be lowered to stop module insertion, raw i/o,
56060 + system and net admin tasks, rebooting the system, modifying immutable
56061 + files, modifying IPC owned by another, and changing the system time.
56062 + This is left an option because it can break some apps. Disable this
56063 + if your chrooted apps are having problems performing those kinds of
56064 + tasks. If the sysctl option is enabled, a sysctl option with
56065 + name "chroot_caps" is created.
56066 +
56067 +endmenu
56068 +menu "Kernel Auditing"
56069 +depends on GRKERNSEC
56070 +
56071 +config GRKERNSEC_AUDIT_GROUP
56072 + bool "Single group for auditing"
56073 + help
56074 + If you say Y here, the exec, chdir, and (un)mount logging features
56075 + will only operate on a group you specify. This option is recommended
56076 + if you only want to watch certain users instead of having a large
56077 + amount of logs from the entire system. If the sysctl option is enabled,
56078 + a sysctl option with name "audit_group" is created.
56079 +
56080 +config GRKERNSEC_AUDIT_GID
56081 + int "GID for auditing"
56082 + depends on GRKERNSEC_AUDIT_GROUP
56083 + default 1007
56084 +
56085 +config GRKERNSEC_EXECLOG
56086 + bool "Exec logging"
56087 + help
56088 + If you say Y here, all execve() calls will be logged (since the
56089 + other exec*() calls are frontends to execve(), all execution
56090 + will be logged). Useful for shell-servers that like to keep track
56091 + of their users. If the sysctl option is enabled, a sysctl option with
56092 + name "exec_logging" is created.
56093 + WARNING: This option when enabled will produce a LOT of logs, especially
56094 + on an active system.
56095 +
56096 +config GRKERNSEC_RESLOG
56097 + bool "Resource logging"
56098 + help
56099 + If you say Y here, all attempts to overstep resource limits will
56100 + be logged with the resource name, the requested size, and the current
56101 + limit. It is highly recommended that you say Y here. If the sysctl
56102 + option is enabled, a sysctl option with name "resource_logging" is
56103 + created. If the RBAC system is enabled, the sysctl value is ignored.
56104 +
56105 +config GRKERNSEC_CHROOT_EXECLOG
56106 + bool "Log execs within chroot"
56107 + help
56108 + If you say Y here, all executions inside a chroot jail will be logged
56109 + to syslog. This can cause a large amount of logs if certain
56110 + applications (eg. djb's daemontools) are installed on the system, and
56111 + is therefore left as an option. If the sysctl option is enabled, a
56112 + sysctl option with name "chroot_execlog" is created.
56113 +
56114 +config GRKERNSEC_AUDIT_PTRACE
56115 + bool "Ptrace logging"
56116 + help
56117 + If you say Y here, all attempts to attach to a process via ptrace
56118 + will be logged. If the sysctl option is enabled, a sysctl option
56119 + with name "audit_ptrace" is created.
56120 +
56121 +config GRKERNSEC_AUDIT_CHDIR
56122 + bool "Chdir logging"
56123 + help
56124 + If you say Y here, all chdir() calls will be logged. If the sysctl
56125 + option is enabled, a sysctl option with name "audit_chdir" is created.
56126 +
56127 +config GRKERNSEC_AUDIT_MOUNT
56128 + bool "(Un)Mount logging"
56129 + help
56130 + If you say Y here, all mounts and unmounts will be logged. If the
56131 + sysctl option is enabled, a sysctl option with name "audit_mount" is
56132 + created.
56133 +
56134 +config GRKERNSEC_SIGNAL
56135 + bool "Signal logging"
56136 + help
56137 + If you say Y here, certain important signals will be logged, such as
56138 + SIGSEGV, which will as a result inform you of when a error in a program
56139 + occurred, which in some cases could mean a possible exploit attempt.
56140 + If the sysctl option is enabled, a sysctl option with name
56141 + "signal_logging" is created.
56142 +
56143 +config GRKERNSEC_FORKFAIL
56144 + bool "Fork failure logging"
56145 + help
56146 + If you say Y here, all failed fork() attempts will be logged.
56147 + This could suggest a fork bomb, or someone attempting to overstep
56148 + their process limit. If the sysctl option is enabled, a sysctl option
56149 + with name "forkfail_logging" is created.
56150 +
56151 +config GRKERNSEC_TIME
56152 + bool "Time change logging"
56153 + help
56154 + If you say Y here, any changes of the system clock will be logged.
56155 + If the sysctl option is enabled, a sysctl option with name
56156 + "timechange_logging" is created.
56157 +
56158 +config GRKERNSEC_PROC_IPADDR
56159 + bool "/proc/<pid>/ipaddr support"
56160 + help
56161 + If you say Y here, a new entry will be added to each /proc/<pid>
56162 + directory that contains the IP address of the person using the task.
56163 + The IP is carried across local TCP and AF_UNIX stream sockets.
56164 + This information can be useful for IDS/IPSes to perform remote response
56165 + to a local attack. The entry is readable by only the owner of the
56166 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56167 + the RBAC system), and thus does not create privacy concerns.
56168 +
56169 +config GRKERNSEC_RWXMAP_LOG
56170 + bool 'Denied RWX mmap/mprotect logging'
56171 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56172 + help
56173 + If you say Y here, calls to mmap() and mprotect() with explicit
56174 + usage of PROT_WRITE and PROT_EXEC together will be logged when
56175 + denied by the PAX_MPROTECT feature. If the sysctl option is
56176 + enabled, a sysctl option with name "rwxmap_logging" is created.
56177 +
56178 +config GRKERNSEC_AUDIT_TEXTREL
56179 + bool 'ELF text relocations logging (READ HELP)'
56180 + depends on PAX_MPROTECT
56181 + help
56182 + If you say Y here, text relocations will be logged with the filename
56183 + of the offending library or binary. The purpose of the feature is
56184 + to help Linux distribution developers get rid of libraries and
56185 + binaries that need text relocations which hinder the future progress
56186 + of PaX. Only Linux distribution developers should say Y here, and
56187 + never on a production machine, as this option creates an information
56188 + leak that could aid an attacker in defeating the randomization of
56189 + a single memory region. If the sysctl option is enabled, a sysctl
56190 + option with name "audit_textrel" is created.
56191 +
56192 +endmenu
56193 +
56194 +menu "Executable Protections"
56195 +depends on GRKERNSEC
56196 +
56197 +config GRKERNSEC_DMESG
56198 + bool "Dmesg(8) restriction"
56199 + help
56200 + If you say Y here, non-root users will not be able to use dmesg(8)
56201 + to view up to the last 4kb of messages in the kernel's log buffer.
56202 + The kernel's log buffer often contains kernel addresses and other
56203 + identifying information useful to an attacker in fingerprinting a
56204 + system for a targeted exploit.
56205 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
56206 + created.
56207 +
56208 +config GRKERNSEC_HARDEN_PTRACE
56209 + bool "Deter ptrace-based process snooping"
56210 + help
56211 + If you say Y here, TTY sniffers and other malicious monitoring
56212 + programs implemented through ptrace will be defeated. If you
56213 + have been using the RBAC system, this option has already been
56214 + enabled for several years for all users, with the ability to make
56215 + fine-grained exceptions.
56216 +
56217 + This option only affects the ability of non-root users to ptrace
56218 + processes that are not a descendent of the ptracing process.
56219 + This means that strace ./binary and gdb ./binary will still work,
56220 + but attaching to arbitrary processes will not. If the sysctl
56221 + option is enabled, a sysctl option with name "harden_ptrace" is
56222 + created.
56223 +
56224 +config GRKERNSEC_PTRACE_READEXEC
56225 + bool "Require read access to ptrace sensitive binaries"
56226 + help
56227 + If you say Y here, unprivileged users will not be able to ptrace unreadable
56228 + binaries. This option is useful in environments that
56229 + remove the read bits (e.g. file mode 4711) from suid binaries to
56230 + prevent infoleaking of their contents. This option adds
56231 + consistency to the use of that file mode, as the binary could normally
56232 + be read out when run without privileges while ptracing.
56233 +
56234 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
56235 + is created.
56236 +
56237 +config GRKERNSEC_SETXID
56238 + bool "Enforce consistent multithreaded privileges"
56239 + help
56240 + If you say Y here, a change from a root uid to a non-root uid
56241 + in a multithreaded application will cause the resulting uids,
56242 + gids, supplementary groups, and capabilities in that thread
56243 + to be propagated to the other threads of the process. In most
56244 + cases this is unnecessary, as glibc will emulate this behavior
56245 + on behalf of the application. Other libcs do not act in the
56246 + same way, allowing the other threads of the process to continue
56247 + running with root privileges. If the sysctl option is enabled,
56248 + a sysctl option with name "consistent_setxid" is created.
56249 +
56250 +config GRKERNSEC_TPE
56251 + bool "Trusted Path Execution (TPE)"
56252 + help
56253 + If you say Y here, you will be able to choose a gid to add to the
56254 + supplementary groups of users you want to mark as "untrusted."
56255 + These users will not be able to execute any files that are not in
56256 + root-owned directories writable only by root. If the sysctl option
56257 + is enabled, a sysctl option with name "tpe" is created.
56258 +
56259 +config GRKERNSEC_TPE_ALL
56260 + bool "Partially restrict all non-root users"
56261 + depends on GRKERNSEC_TPE
56262 + help
56263 + If you say Y here, all non-root users will be covered under
56264 + a weaker TPE restriction. This is separate from, and in addition to,
56265 + the main TPE options that you have selected elsewhere. Thus, if a
56266 + "trusted" GID is chosen, this restriction applies to even that GID.
56267 + Under this restriction, all non-root users will only be allowed to
56268 + execute files in directories they own that are not group or
56269 + world-writable, or in directories owned by root and writable only by
56270 + root. If the sysctl option is enabled, a sysctl option with name
56271 + "tpe_restrict_all" is created.
56272 +
56273 +config GRKERNSEC_TPE_INVERT
56274 + bool "Invert GID option"
56275 + depends on GRKERNSEC_TPE
56276 + help
56277 + If you say Y here, the group you specify in the TPE configuration will
56278 + decide what group TPE restrictions will be *disabled* for. This
56279 + option is useful if you want TPE restrictions to be applied to most
56280 + users on the system. If the sysctl option is enabled, a sysctl option
56281 + with name "tpe_invert" is created. Unlike other sysctl options, this
56282 + entry will default to on for backward-compatibility.
56283 +
56284 +config GRKERNSEC_TPE_GID
56285 + int "GID for untrusted users"
56286 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56287 + default 1005
56288 + help
56289 + Setting this GID determines what group TPE restrictions will be
56290 + *enabled* for. If the sysctl option is enabled, a sysctl option
56291 + with name "tpe_gid" is created.
56292 +
56293 +config GRKERNSEC_TPE_GID
56294 + int "GID for trusted users"
56295 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56296 + default 1005
56297 + help
56298 + Setting this GID determines what group TPE restrictions will be
56299 + *disabled* for. If the sysctl option is enabled, a sysctl option
56300 + with name "tpe_gid" is created.
56301 +
56302 +endmenu
56303 +menu "Network Protections"
56304 +depends on GRKERNSEC
56305 +
56306 +config GRKERNSEC_RANDNET
56307 + bool "Larger entropy pools"
56308 + help
56309 + If you say Y here, the entropy pools used for many features of Linux
56310 + and grsecurity will be doubled in size. Since several grsecurity
56311 + features use additional randomness, it is recommended that you say Y
56312 + here. Saying Y here has a similar effect as modifying
56313 + /proc/sys/kernel/random/poolsize.
56314 +
56315 +config GRKERNSEC_BLACKHOLE
56316 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56317 + depends on NET
56318 + help
56319 + If you say Y here, neither TCP resets nor ICMP
56320 + destination-unreachable packets will be sent in response to packets
56321 + sent to ports for which no associated listening process exists.
56322 + This feature supports both IPV4 and IPV6 and exempts the
56323 + loopback interface from blackholing. Enabling this feature
56324 + makes a host more resilient to DoS attacks and reduces network
56325 + visibility against scanners.
56326 +
56327 + The blackhole feature as-implemented is equivalent to the FreeBSD
56328 + blackhole feature, as it prevents RST responses to all packets, not
56329 + just SYNs. Under most application behavior this causes no
56330 + problems, but applications (like haproxy) may not close certain
56331 + connections in a way that cleanly terminates them on the remote
56332 + end, leaving the remote host in LAST_ACK state. Because of this
56333 + side-effect and to prevent intentional LAST_ACK DoSes, this
56334 + feature also adds automatic mitigation against such attacks.
56335 + The mitigation drastically reduces the amount of time a socket
56336 + can spend in LAST_ACK state. If you're using haproxy and not
56337 + all servers it connects to have this option enabled, consider
56338 + disabling this feature on the haproxy host.
56339 +
56340 + If the sysctl option is enabled, two sysctl options with names
56341 + "ip_blackhole" and "lastack_retries" will be created.
56342 + While "ip_blackhole" takes the standard zero/non-zero on/off
56343 + toggle, "lastack_retries" uses the same kinds of values as
56344 + "tcp_retries1" and "tcp_retries2". The default value of 4
56345 + prevents a socket from lasting more than 45 seconds in LAST_ACK
56346 + state.
56347 +
56348 +config GRKERNSEC_SOCKET
56349 + bool "Socket restrictions"
56350 + depends on NET
56351 + help
56352 + If you say Y here, you will be able to choose from several options.
56353 + If you assign a GID on your system and add it to the supplementary
56354 + groups of users you want to restrict socket access to, this patch
56355 + will perform up to three things, based on the option(s) you choose.
56356 +
56357 +config GRKERNSEC_SOCKET_ALL
56358 + bool "Deny any sockets to group"
56359 + depends on GRKERNSEC_SOCKET
56360 + help
56361 + If you say Y here, you will be able to choose a GID of whose users will
56362 + be unable to connect to other hosts from your machine or run server
56363 + applications from your machine. If the sysctl option is enabled, a
56364 + sysctl option with name "socket_all" is created.
56365 +
56366 +config GRKERNSEC_SOCKET_ALL_GID
56367 + int "GID to deny all sockets for"
56368 + depends on GRKERNSEC_SOCKET_ALL
56369 + default 1004
56370 + help
56371 + Here you can choose the GID to disable socket access for. Remember to
56372 + add the users you want socket access disabled for to the GID
56373 + specified here. If the sysctl option is enabled, a sysctl option
56374 + with name "socket_all_gid" is created.
56375 +
56376 +config GRKERNSEC_SOCKET_CLIENT
56377 + bool "Deny client sockets to group"
56378 + depends on GRKERNSEC_SOCKET
56379 + help
56380 + If you say Y here, you will be able to choose a GID of whose users will
56381 + be unable to connect to other hosts from your machine, but will be
56382 + able to run servers. If this option is enabled, all users in the group
56383 + you specify will have to use passive mode when initiating ftp transfers
56384 + from the shell on your machine. If the sysctl option is enabled, a
56385 + sysctl option with name "socket_client" is created.
56386 +
56387 +config GRKERNSEC_SOCKET_CLIENT_GID
56388 + int "GID to deny client sockets for"
56389 + depends on GRKERNSEC_SOCKET_CLIENT
56390 + default 1003
56391 + help
56392 + Here you can choose the GID to disable client socket access for.
56393 + Remember to add the users you want client socket access disabled for to
56394 + the GID specified here. If the sysctl option is enabled, a sysctl
56395 + option with name "socket_client_gid" is created.
56396 +
56397 +config GRKERNSEC_SOCKET_SERVER
56398 + bool "Deny server sockets to group"
56399 + depends on GRKERNSEC_SOCKET
56400 + help
56401 + If you say Y here, you will be able to choose a GID of whose users will
56402 + be unable to run server applications from your machine. If the sysctl
56403 + option is enabled, a sysctl option with name "socket_server" is created.
56404 +
56405 +config GRKERNSEC_SOCKET_SERVER_GID
56406 + int "GID to deny server sockets for"
56407 + depends on GRKERNSEC_SOCKET_SERVER
56408 + default 1002
56409 + help
56410 + Here you can choose the GID to disable server socket access for.
56411 + Remember to add the users you want server socket access disabled for to
56412 + the GID specified here. If the sysctl option is enabled, a sysctl
56413 + option with name "socket_server_gid" is created.
56414 +
56415 +endmenu
56416 +menu "Sysctl support"
56417 +depends on GRKERNSEC && SYSCTL
56418 +
56419 +config GRKERNSEC_SYSCTL
56420 + bool "Sysctl support"
56421 + help
56422 + If you say Y here, you will be able to change the options that
56423 + grsecurity runs with at bootup, without having to recompile your
56424 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56425 + to enable (1) or disable (0) various features. All the sysctl entries
56426 + are mutable until the "grsec_lock" entry is set to a non-zero value.
56427 + All features enabled in the kernel configuration are disabled at boot
56428 + if you do not say Y to the "Turn on features by default" option.
56429 + All options should be set at startup, and the grsec_lock entry should
56430 + be set to a non-zero value after all the options are set.
56431 + *THIS IS EXTREMELY IMPORTANT*
56432 +
56433 +config GRKERNSEC_SYSCTL_DISTRO
56434 + bool "Extra sysctl support for distro makers (READ HELP)"
56435 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56436 + help
56437 + If you say Y here, additional sysctl options will be created
56438 + for features that affect processes running as root. Therefore,
56439 + it is critical when using this option that the grsec_lock entry be
56440 + enabled after boot. Only distros with prebuilt kernel packages
56441 + with this option enabled that can ensure grsec_lock is enabled
56442 + after boot should use this option.
56443 + *Failure to set grsec_lock after boot makes all grsec features
56444 + this option covers useless*
56445 +
56446 + Currently this option creates the following sysctl entries:
56447 + "Disable Privileged I/O": "disable_priv_io"
56448 +
56449 +config GRKERNSEC_SYSCTL_ON
56450 + bool "Turn on features by default"
56451 + depends on GRKERNSEC_SYSCTL
56452 + help
56453 + If you say Y here, instead of having all features enabled in the
56454 + kernel configuration disabled at boot time, the features will be
56455 + enabled at boot time. It is recommended you say Y here unless
56456 + there is some reason you would want all sysctl-tunable features to
56457 + be disabled by default. As mentioned elsewhere, it is important
56458 + to enable the grsec_lock entry once you have finished modifying
56459 + the sysctl entries.
56460 +
56461 +endmenu
56462 +menu "Logging Options"
56463 +depends on GRKERNSEC
56464 +
56465 +config GRKERNSEC_FLOODTIME
56466 + int "Seconds in between log messages (minimum)"
56467 + default 10
56468 + help
56469 + This option allows you to enforce the number of seconds between
56470 + grsecurity log messages. The default should be suitable for most
56471 + people, however, if you choose to change it, choose a value small enough
56472 + to allow informative logs to be produced, but large enough to
56473 + prevent flooding.
56474 +
56475 +config GRKERNSEC_FLOODBURST
56476 + int "Number of messages in a burst (maximum)"
56477 + default 6
56478 + help
56479 + This option allows you to choose the maximum number of messages allowed
56480 + within the flood time interval you chose in a separate option. The
56481 + default should be suitable for most people, however if you find that
56482 + many of your logs are being interpreted as flooding, you may want to
56483 + raise this value.
56484 +
56485 +endmenu
56486 +
56487 +endmenu
56488 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
56489 new file mode 100644
56490 index 0000000..1b9afa9
56491 --- /dev/null
56492 +++ b/grsecurity/Makefile
56493 @@ -0,0 +1,38 @@
56494 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56495 +# during 2001-2009 it has been completely redesigned by Brad Spengler
56496 +# into an RBAC system
56497 +#
56498 +# All code in this directory and various hooks inserted throughout the kernel
56499 +# are copyright Brad Spengler - Open Source Security, Inc., and released
56500 +# under the GPL v2 or higher
56501 +
56502 +KBUILD_CFLAGS += -Werror
56503 +
56504 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56505 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
56506 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56507 +
56508 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56509 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56510 + gracl_learn.o grsec_log.o
56511 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56512 +
56513 +ifdef CONFIG_NET
56514 +obj-y += grsec_sock.o
56515 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56516 +endif
56517 +
56518 +ifndef CONFIG_GRKERNSEC
56519 +obj-y += grsec_disabled.o
56520 +endif
56521 +
56522 +ifdef CONFIG_GRKERNSEC_HIDESYM
56523 +extra-y := grsec_hidesym.o
56524 +$(obj)/grsec_hidesym.o:
56525 + @-chmod -f 500 /boot
56526 + @-chmod -f 500 /lib/modules
56527 + @-chmod -f 500 /lib64/modules
56528 + @-chmod -f 500 /lib32/modules
56529 + @-chmod -f 700 .
56530 + @echo ' grsec: protected kernel image paths'
56531 +endif
56532 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
56533 new file mode 100644
56534 index 0000000..dc4812b
56535 --- /dev/null
56536 +++ b/grsecurity/gracl.c
56537 @@ -0,0 +1,4148 @@
56538 +#include <linux/kernel.h>
56539 +#include <linux/module.h>
56540 +#include <linux/sched.h>
56541 +#include <linux/mm.h>
56542 +#include <linux/file.h>
56543 +#include <linux/fs.h>
56544 +#include <linux/namei.h>
56545 +#include <linux/mount.h>
56546 +#include <linux/tty.h>
56547 +#include <linux/proc_fs.h>
56548 +#include <linux/smp_lock.h>
56549 +#include <linux/slab.h>
56550 +#include <linux/vmalloc.h>
56551 +#include <linux/types.h>
56552 +#include <linux/sysctl.h>
56553 +#include <linux/netdevice.h>
56554 +#include <linux/ptrace.h>
56555 +#include <linux/gracl.h>
56556 +#include <linux/gralloc.h>
56557 +#include <linux/security.h>
56558 +#include <linux/grinternal.h>
56559 +#include <linux/pid_namespace.h>
56560 +#include <linux/fdtable.h>
56561 +#include <linux/percpu.h>
56562 +
56563 +#include <asm/uaccess.h>
56564 +#include <asm/errno.h>
56565 +#include <asm/mman.h>
56566 +
56567 +static struct acl_role_db acl_role_set;
56568 +static struct name_db name_set;
56569 +static struct inodev_db inodev_set;
56570 +
56571 +/* for keeping track of userspace pointers used for subjects, so we
56572 + can share references in the kernel as well
56573 +*/
56574 +
56575 +static struct dentry *real_root;
56576 +static struct vfsmount *real_root_mnt;
56577 +
56578 +static struct acl_subj_map_db subj_map_set;
56579 +
56580 +static struct acl_role_label *default_role;
56581 +
56582 +static struct acl_role_label *role_list;
56583 +
56584 +static u16 acl_sp_role_value;
56585 +
56586 +extern char *gr_shared_page[4];
56587 +static DEFINE_MUTEX(gr_dev_mutex);
56588 +DEFINE_RWLOCK(gr_inode_lock);
56589 +
56590 +struct gr_arg *gr_usermode;
56591 +
56592 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
56593 +
56594 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
56595 +extern void gr_clear_learn_entries(void);
56596 +
56597 +#ifdef CONFIG_GRKERNSEC_RESLOG
56598 +extern void gr_log_resource(const struct task_struct *task,
56599 + const int res, const unsigned long wanted, const int gt);
56600 +#endif
56601 +
56602 +unsigned char *gr_system_salt;
56603 +unsigned char *gr_system_sum;
56604 +
56605 +static struct sprole_pw **acl_special_roles = NULL;
56606 +static __u16 num_sprole_pws = 0;
56607 +
56608 +static struct acl_role_label *kernel_role = NULL;
56609 +
56610 +static unsigned int gr_auth_attempts = 0;
56611 +static unsigned long gr_auth_expires = 0UL;
56612 +
56613 +#ifdef CONFIG_NET
56614 +extern struct vfsmount *sock_mnt;
56615 +#endif
56616 +extern struct vfsmount *pipe_mnt;
56617 +extern struct vfsmount *shm_mnt;
56618 +#ifdef CONFIG_HUGETLBFS
56619 +extern struct vfsmount *hugetlbfs_vfsmount;
56620 +#endif
56621 +
56622 +static struct acl_object_label *fakefs_obj_rw;
56623 +static struct acl_object_label *fakefs_obj_rwx;
56624 +
56625 +extern int gr_init_uidset(void);
56626 +extern void gr_free_uidset(void);
56627 +extern void gr_remove_uid(uid_t uid);
56628 +extern int gr_find_uid(uid_t uid);
56629 +
56630 +__inline__ int
56631 +gr_acl_is_enabled(void)
56632 +{
56633 + return (gr_status & GR_READY);
56634 +}
56635 +
56636 +#ifdef CONFIG_BTRFS_FS
56637 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56638 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56639 +#endif
56640 +
56641 +static inline dev_t __get_dev(const struct dentry *dentry)
56642 +{
56643 +#ifdef CONFIG_BTRFS_FS
56644 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56645 + return get_btrfs_dev_from_inode(dentry->d_inode);
56646 + else
56647 +#endif
56648 + return dentry->d_inode->i_sb->s_dev;
56649 +}
56650 +
56651 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56652 +{
56653 + return __get_dev(dentry);
56654 +}
56655 +
56656 +static char gr_task_roletype_to_char(struct task_struct *task)
56657 +{
56658 + switch (task->role->roletype &
56659 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
56660 + GR_ROLE_SPECIAL)) {
56661 + case GR_ROLE_DEFAULT:
56662 + return 'D';
56663 + case GR_ROLE_USER:
56664 + return 'U';
56665 + case GR_ROLE_GROUP:
56666 + return 'G';
56667 + case GR_ROLE_SPECIAL:
56668 + return 'S';
56669 + }
56670 +
56671 + return 'X';
56672 +}
56673 +
56674 +char gr_roletype_to_char(void)
56675 +{
56676 + return gr_task_roletype_to_char(current);
56677 +}
56678 +
56679 +__inline__ int
56680 +gr_acl_tpe_check(void)
56681 +{
56682 + if (unlikely(!(gr_status & GR_READY)))
56683 + return 0;
56684 + if (current->role->roletype & GR_ROLE_TPE)
56685 + return 1;
56686 + else
56687 + return 0;
56688 +}
56689 +
56690 +int
56691 +gr_handle_rawio(const struct inode *inode)
56692 +{
56693 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56694 + if (inode && S_ISBLK(inode->i_mode) &&
56695 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56696 + !capable(CAP_SYS_RAWIO))
56697 + return 1;
56698 +#endif
56699 + return 0;
56700 +}
56701 +
56702 +static int
56703 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
56704 +{
56705 + if (likely(lena != lenb))
56706 + return 0;
56707 +
56708 + return !memcmp(a, b, lena);
56709 +}
56710 +
56711 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
56712 +{
56713 + *buflen -= namelen;
56714 + if (*buflen < 0)
56715 + return -ENAMETOOLONG;
56716 + *buffer -= namelen;
56717 + memcpy(*buffer, str, namelen);
56718 + return 0;
56719 +}
56720 +
56721 +/* this must be called with vfsmount_lock and dcache_lock held */
56722 +
56723 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56724 + struct dentry *root, struct vfsmount *rootmnt,
56725 + char *buffer, int buflen)
56726 +{
56727 + char * end = buffer+buflen;
56728 + char * retval;
56729 + int namelen;
56730 +
56731 + *--end = '\0';
56732 + buflen--;
56733 +
56734 + if (buflen < 1)
56735 + goto Elong;
56736 + /* Get '/' right */
56737 + retval = end-1;
56738 + *retval = '/';
56739 +
56740 + for (;;) {
56741 + struct dentry * parent;
56742 +
56743 + if (dentry == root && vfsmnt == rootmnt)
56744 + break;
56745 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
56746 + /* Global root? */
56747 + if (vfsmnt->mnt_parent == vfsmnt)
56748 + goto global_root;
56749 + dentry = vfsmnt->mnt_mountpoint;
56750 + vfsmnt = vfsmnt->mnt_parent;
56751 + continue;
56752 + }
56753 + parent = dentry->d_parent;
56754 + prefetch(parent);
56755 + namelen = dentry->d_name.len;
56756 + buflen -= namelen + 1;
56757 + if (buflen < 0)
56758 + goto Elong;
56759 + end -= namelen;
56760 + memcpy(end, dentry->d_name.name, namelen);
56761 + *--end = '/';
56762 + retval = end;
56763 + dentry = parent;
56764 + }
56765 +
56766 +out:
56767 + return retval;
56768 +
56769 +global_root:
56770 + namelen = dentry->d_name.len;
56771 + buflen -= namelen;
56772 + if (buflen < 0)
56773 + goto Elong;
56774 + retval -= namelen-1; /* hit the slash */
56775 + memcpy(retval, dentry->d_name.name, namelen);
56776 + goto out;
56777 +Elong:
56778 + retval = ERR_PTR(-ENAMETOOLONG);
56779 + goto out;
56780 +}
56781 +
56782 +static char *
56783 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56784 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
56785 +{
56786 + char *retval;
56787 +
56788 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
56789 + if (unlikely(IS_ERR(retval)))
56790 + retval = strcpy(buf, "<path too long>");
56791 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
56792 + retval[1] = '\0';
56793 +
56794 + return retval;
56795 +}
56796 +
56797 +static char *
56798 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56799 + char *buf, int buflen)
56800 +{
56801 + char *res;
56802 +
56803 + /* we can use real_root, real_root_mnt, because this is only called
56804 + by the RBAC system */
56805 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
56806 +
56807 + return res;
56808 +}
56809 +
56810 +static char *
56811 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56812 + char *buf, int buflen)
56813 +{
56814 + char *res;
56815 + struct dentry *root;
56816 + struct vfsmount *rootmnt;
56817 + struct task_struct *reaper = &init_task;
56818 +
56819 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
56820 + read_lock(&reaper->fs->lock);
56821 + root = dget(reaper->fs->root.dentry);
56822 + rootmnt = mntget(reaper->fs->root.mnt);
56823 + read_unlock(&reaper->fs->lock);
56824 +
56825 + spin_lock(&dcache_lock);
56826 + spin_lock(&vfsmount_lock);
56827 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
56828 + spin_unlock(&vfsmount_lock);
56829 + spin_unlock(&dcache_lock);
56830 +
56831 + dput(root);
56832 + mntput(rootmnt);
56833 + return res;
56834 +}
56835 +
56836 +static char *
56837 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56838 +{
56839 + char *ret;
56840 + spin_lock(&dcache_lock);
56841 + spin_lock(&vfsmount_lock);
56842 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56843 + PAGE_SIZE);
56844 + spin_unlock(&vfsmount_lock);
56845 + spin_unlock(&dcache_lock);
56846 + return ret;
56847 +}
56848 +
56849 +static char *
56850 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56851 +{
56852 + char *ret;
56853 + char *buf;
56854 + int buflen;
56855 +
56856 + spin_lock(&dcache_lock);
56857 + spin_lock(&vfsmount_lock);
56858 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
56859 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
56860 + buflen = (int)(ret - buf);
56861 + if (buflen >= 5)
56862 + prepend(&ret, &buflen, "/proc", 5);
56863 + else
56864 + ret = strcpy(buf, "<path too long>");
56865 + spin_unlock(&vfsmount_lock);
56866 + spin_unlock(&dcache_lock);
56867 + return ret;
56868 +}
56869 +
56870 +char *
56871 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
56872 +{
56873 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56874 + PAGE_SIZE);
56875 +}
56876 +
56877 +char *
56878 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
56879 +{
56880 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
56881 + PAGE_SIZE);
56882 +}
56883 +
56884 +char *
56885 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
56886 +{
56887 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
56888 + PAGE_SIZE);
56889 +}
56890 +
56891 +char *
56892 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
56893 +{
56894 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
56895 + PAGE_SIZE);
56896 +}
56897 +
56898 +char *
56899 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
56900 +{
56901 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
56902 + PAGE_SIZE);
56903 +}
56904 +
56905 +__inline__ __u32
56906 +to_gr_audit(const __u32 reqmode)
56907 +{
56908 + /* masks off auditable permission flags, then shifts them to create
56909 + auditing flags, and adds the special case of append auditing if
56910 + we're requesting write */
56911 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
56912 +}
56913 +
56914 +struct acl_subject_label *
56915 +lookup_subject_map(const struct acl_subject_label *userp)
56916 +{
56917 + unsigned int index = shash(userp, subj_map_set.s_size);
56918 + struct subject_map *match;
56919 +
56920 + match = subj_map_set.s_hash[index];
56921 +
56922 + while (match && match->user != userp)
56923 + match = match->next;
56924 +
56925 + if (match != NULL)
56926 + return match->kernel;
56927 + else
56928 + return NULL;
56929 +}
56930 +
56931 +static void
56932 +insert_subj_map_entry(struct subject_map *subjmap)
56933 +{
56934 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
56935 + struct subject_map **curr;
56936 +
56937 + subjmap->prev = NULL;
56938 +
56939 + curr = &subj_map_set.s_hash[index];
56940 + if (*curr != NULL)
56941 + (*curr)->prev = subjmap;
56942 +
56943 + subjmap->next = *curr;
56944 + *curr = subjmap;
56945 +
56946 + return;
56947 +}
56948 +
56949 +static struct acl_role_label *
56950 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
56951 + const gid_t gid)
56952 +{
56953 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
56954 + struct acl_role_label *match;
56955 + struct role_allowed_ip *ipp;
56956 + unsigned int x;
56957 + u32 curr_ip = task->signal->curr_ip;
56958 +
56959 + task->signal->saved_ip = curr_ip;
56960 +
56961 + match = acl_role_set.r_hash[index];
56962 +
56963 + while (match) {
56964 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
56965 + for (x = 0; x < match->domain_child_num; x++) {
56966 + if (match->domain_children[x] == uid)
56967 + goto found;
56968 + }
56969 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
56970 + break;
56971 + match = match->next;
56972 + }
56973 +found:
56974 + if (match == NULL) {
56975 + try_group:
56976 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
56977 + match = acl_role_set.r_hash[index];
56978 +
56979 + while (match) {
56980 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
56981 + for (x = 0; x < match->domain_child_num; x++) {
56982 + if (match->domain_children[x] == gid)
56983 + goto found2;
56984 + }
56985 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
56986 + break;
56987 + match = match->next;
56988 + }
56989 +found2:
56990 + if (match == NULL)
56991 + match = default_role;
56992 + if (match->allowed_ips == NULL)
56993 + return match;
56994 + else {
56995 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
56996 + if (likely
56997 + ((ntohl(curr_ip) & ipp->netmask) ==
56998 + (ntohl(ipp->addr) & ipp->netmask)))
56999 + return match;
57000 + }
57001 + match = default_role;
57002 + }
57003 + } else if (match->allowed_ips == NULL) {
57004 + return match;
57005 + } else {
57006 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
57007 + if (likely
57008 + ((ntohl(curr_ip) & ipp->netmask) ==
57009 + (ntohl(ipp->addr) & ipp->netmask)))
57010 + return match;
57011 + }
57012 + goto try_group;
57013 + }
57014 +
57015 + return match;
57016 +}
57017 +
57018 +struct acl_subject_label *
57019 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
57020 + const struct acl_role_label *role)
57021 +{
57022 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
57023 + struct acl_subject_label *match;
57024 +
57025 + match = role->subj_hash[index];
57026 +
57027 + while (match && (match->inode != ino || match->device != dev ||
57028 + (match->mode & GR_DELETED))) {
57029 + match = match->next;
57030 + }
57031 +
57032 + if (match && !(match->mode & GR_DELETED))
57033 + return match;
57034 + else
57035 + return NULL;
57036 +}
57037 +
57038 +struct acl_subject_label *
57039 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
57040 + const struct acl_role_label *role)
57041 +{
57042 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
57043 + struct acl_subject_label *match;
57044 +
57045 + match = role->subj_hash[index];
57046 +
57047 + while (match && (match->inode != ino || match->device != dev ||
57048 + !(match->mode & GR_DELETED))) {
57049 + match = match->next;
57050 + }
57051 +
57052 + if (match && (match->mode & GR_DELETED))
57053 + return match;
57054 + else
57055 + return NULL;
57056 +}
57057 +
57058 +static struct acl_object_label *
57059 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
57060 + const struct acl_subject_label *subj)
57061 +{
57062 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
57063 + struct acl_object_label *match;
57064 +
57065 + match = subj->obj_hash[index];
57066 +
57067 + while (match && (match->inode != ino || match->device != dev ||
57068 + (match->mode & GR_DELETED))) {
57069 + match = match->next;
57070 + }
57071 +
57072 + if (match && !(match->mode & GR_DELETED))
57073 + return match;
57074 + else
57075 + return NULL;
57076 +}
57077 +
57078 +static struct acl_object_label *
57079 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
57080 + const struct acl_subject_label *subj)
57081 +{
57082 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
57083 + struct acl_object_label *match;
57084 +
57085 + match = subj->obj_hash[index];
57086 +
57087 + while (match && (match->inode != ino || match->device != dev ||
57088 + !(match->mode & GR_DELETED))) {
57089 + match = match->next;
57090 + }
57091 +
57092 + if (match && (match->mode & GR_DELETED))
57093 + return match;
57094 +
57095 + match = subj->obj_hash[index];
57096 +
57097 + while (match && (match->inode != ino || match->device != dev ||
57098 + (match->mode & GR_DELETED))) {
57099 + match = match->next;
57100 + }
57101 +
57102 + if (match && !(match->mode & GR_DELETED))
57103 + return match;
57104 + else
57105 + return NULL;
57106 +}
57107 +
57108 +static struct name_entry *
57109 +lookup_name_entry(const char *name)
57110 +{
57111 + unsigned int len = strlen(name);
57112 + unsigned int key = full_name_hash(name, len);
57113 + unsigned int index = key % name_set.n_size;
57114 + struct name_entry *match;
57115 +
57116 + match = name_set.n_hash[index];
57117 +
57118 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
57119 + match = match->next;
57120 +
57121 + return match;
57122 +}
57123 +
57124 +static struct name_entry *
57125 +lookup_name_entry_create(const char *name)
57126 +{
57127 + unsigned int len = strlen(name);
57128 + unsigned int key = full_name_hash(name, len);
57129 + unsigned int index = key % name_set.n_size;
57130 + struct name_entry *match;
57131 +
57132 + match = name_set.n_hash[index];
57133 +
57134 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
57135 + !match->deleted))
57136 + match = match->next;
57137 +
57138 + if (match && match->deleted)
57139 + return match;
57140 +
57141 + match = name_set.n_hash[index];
57142 +
57143 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
57144 + match->deleted))
57145 + match = match->next;
57146 +
57147 + if (match && !match->deleted)
57148 + return match;
57149 + else
57150 + return NULL;
57151 +}
57152 +
57153 +static struct inodev_entry *
57154 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
57155 +{
57156 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
57157 + struct inodev_entry *match;
57158 +
57159 + match = inodev_set.i_hash[index];
57160 +
57161 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
57162 + match = match->next;
57163 +
57164 + return match;
57165 +}
57166 +
57167 +static void
57168 +insert_inodev_entry(struct inodev_entry *entry)
57169 +{
57170 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
57171 + inodev_set.i_size);
57172 + struct inodev_entry **curr;
57173 +
57174 + entry->prev = NULL;
57175 +
57176 + curr = &inodev_set.i_hash[index];
57177 + if (*curr != NULL)
57178 + (*curr)->prev = entry;
57179 +
57180 + entry->next = *curr;
57181 + *curr = entry;
57182 +
57183 + return;
57184 +}
57185 +
57186 +static void
57187 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
57188 +{
57189 + unsigned int index =
57190 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
57191 + struct acl_role_label **curr;
57192 + struct acl_role_label *tmp;
57193 +
57194 + curr = &acl_role_set.r_hash[index];
57195 +
57196 + /* if role was already inserted due to domains and already has
57197 + a role in the same bucket as it attached, then we need to
57198 + combine these two buckets
57199 + */
57200 + if (role->next) {
57201 + tmp = role->next;
57202 + while (tmp->next)
57203 + tmp = tmp->next;
57204 + tmp->next = *curr;
57205 + } else
57206 + role->next = *curr;
57207 + *curr = role;
57208 +
57209 + return;
57210 +}
57211 +
57212 +static void
57213 +insert_acl_role_label(struct acl_role_label *role)
57214 +{
57215 + int i;
57216 +
57217 + if (role_list == NULL) {
57218 + role_list = role;
57219 + role->prev = NULL;
57220 + } else {
57221 + role->prev = role_list;
57222 + role_list = role;
57223 + }
57224 +
57225 + /* used for hash chains */
57226 + role->next = NULL;
57227 +
57228 + if (role->roletype & GR_ROLE_DOMAIN) {
57229 + for (i = 0; i < role->domain_child_num; i++)
57230 + __insert_acl_role_label(role, role->domain_children[i]);
57231 + } else
57232 + __insert_acl_role_label(role, role->uidgid);
57233 +}
57234 +
57235 +static int
57236 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
57237 +{
57238 + struct name_entry **curr, *nentry;
57239 + struct inodev_entry *ientry;
57240 + unsigned int len = strlen(name);
57241 + unsigned int key = full_name_hash(name, len);
57242 + unsigned int index = key % name_set.n_size;
57243 +
57244 + curr = &name_set.n_hash[index];
57245 +
57246 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
57247 + curr = &((*curr)->next);
57248 +
57249 + if (*curr != NULL)
57250 + return 1;
57251 +
57252 + nentry = acl_alloc(sizeof (struct name_entry));
57253 + if (nentry == NULL)
57254 + return 0;
57255 + ientry = acl_alloc(sizeof (struct inodev_entry));
57256 + if (ientry == NULL)
57257 + return 0;
57258 + ientry->nentry = nentry;
57259 +
57260 + nentry->key = key;
57261 + nentry->name = name;
57262 + nentry->inode = inode;
57263 + nentry->device = device;
57264 + nentry->len = len;
57265 + nentry->deleted = deleted;
57266 +
57267 + nentry->prev = NULL;
57268 + curr = &name_set.n_hash[index];
57269 + if (*curr != NULL)
57270 + (*curr)->prev = nentry;
57271 + nentry->next = *curr;
57272 + *curr = nentry;
57273 +
57274 + /* insert us into the table searchable by inode/dev */
57275 + insert_inodev_entry(ientry);
57276 +
57277 + return 1;
57278 +}
57279 +
57280 +static void
57281 +insert_acl_obj_label(struct acl_object_label *obj,
57282 + struct acl_subject_label *subj)
57283 +{
57284 + unsigned int index =
57285 + fhash(obj->inode, obj->device, subj->obj_hash_size);
57286 + struct acl_object_label **curr;
57287 +
57288 +
57289 + obj->prev = NULL;
57290 +
57291 + curr = &subj->obj_hash[index];
57292 + if (*curr != NULL)
57293 + (*curr)->prev = obj;
57294 +
57295 + obj->next = *curr;
57296 + *curr = obj;
57297 +
57298 + return;
57299 +}
57300 +
57301 +static void
57302 +insert_acl_subj_label(struct acl_subject_label *obj,
57303 + struct acl_role_label *role)
57304 +{
57305 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
57306 + struct acl_subject_label **curr;
57307 +
57308 + obj->prev = NULL;
57309 +
57310 + curr = &role->subj_hash[index];
57311 + if (*curr != NULL)
57312 + (*curr)->prev = obj;
57313 +
57314 + obj->next = *curr;
57315 + *curr = obj;
57316 +
57317 + return;
57318 +}
57319 +
57320 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
57321 +
57322 +static void *
57323 +create_table(__u32 * len, int elementsize)
57324 +{
57325 + unsigned int table_sizes[] = {
57326 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
57327 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
57328 + 4194301, 8388593, 16777213, 33554393, 67108859
57329 + };
57330 + void *newtable = NULL;
57331 + unsigned int pwr = 0;
57332 +
57333 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
57334 + table_sizes[pwr] <= *len)
57335 + pwr++;
57336 +
57337 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
57338 + return newtable;
57339 +
57340 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
57341 + newtable =
57342 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
57343 + else
57344 + newtable = vmalloc(table_sizes[pwr] * elementsize);
57345 +
57346 + *len = table_sizes[pwr];
57347 +
57348 + return newtable;
57349 +}
57350 +
57351 +static int
57352 +init_variables(const struct gr_arg *arg)
57353 +{
57354 + struct task_struct *reaper = &init_task;
57355 + unsigned int stacksize;
57356 +
57357 + subj_map_set.s_size = arg->role_db.num_subjects;
57358 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
57359 + name_set.n_size = arg->role_db.num_objects;
57360 + inodev_set.i_size = arg->role_db.num_objects;
57361 +
57362 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
57363 + !name_set.n_size || !inodev_set.i_size)
57364 + return 1;
57365 +
57366 + if (!gr_init_uidset())
57367 + return 1;
57368 +
57369 + /* set up the stack that holds allocation info */
57370 +
57371 + stacksize = arg->role_db.num_pointers + 5;
57372 +
57373 + if (!acl_alloc_stack_init(stacksize))
57374 + return 1;
57375 +
57376 + /* grab reference for the real root dentry and vfsmount */
57377 + read_lock(&reaper->fs->lock);
57378 + real_root = dget(reaper->fs->root.dentry);
57379 + real_root_mnt = mntget(reaper->fs->root.mnt);
57380 + read_unlock(&reaper->fs->lock);
57381 +
57382 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57383 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
57384 +#endif
57385 +
57386 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
57387 + if (fakefs_obj_rw == NULL)
57388 + return 1;
57389 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
57390 +
57391 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
57392 + if (fakefs_obj_rwx == NULL)
57393 + return 1;
57394 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
57395 +
57396 + subj_map_set.s_hash =
57397 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
57398 + acl_role_set.r_hash =
57399 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
57400 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
57401 + inodev_set.i_hash =
57402 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
57403 +
57404 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
57405 + !name_set.n_hash || !inodev_set.i_hash)
57406 + return 1;
57407 +
57408 + memset(subj_map_set.s_hash, 0,
57409 + sizeof(struct subject_map *) * subj_map_set.s_size);
57410 + memset(acl_role_set.r_hash, 0,
57411 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
57412 + memset(name_set.n_hash, 0,
57413 + sizeof (struct name_entry *) * name_set.n_size);
57414 + memset(inodev_set.i_hash, 0,
57415 + sizeof (struct inodev_entry *) * inodev_set.i_size);
57416 +
57417 + return 0;
57418 +}
57419 +
57420 +/* free information not needed after startup
57421 + currently contains user->kernel pointer mappings for subjects
57422 +*/
57423 +
57424 +static void
57425 +free_init_variables(void)
57426 +{
57427 + __u32 i;
57428 +
57429 + if (subj_map_set.s_hash) {
57430 + for (i = 0; i < subj_map_set.s_size; i++) {
57431 + if (subj_map_set.s_hash[i]) {
57432 + kfree(subj_map_set.s_hash[i]);
57433 + subj_map_set.s_hash[i] = NULL;
57434 + }
57435 + }
57436 +
57437 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
57438 + PAGE_SIZE)
57439 + kfree(subj_map_set.s_hash);
57440 + else
57441 + vfree(subj_map_set.s_hash);
57442 + }
57443 +
57444 + return;
57445 +}
57446 +
57447 +static void
57448 +free_variables(void)
57449 +{
57450 + struct acl_subject_label *s;
57451 + struct acl_role_label *r;
57452 + struct task_struct *task, *task2;
57453 + unsigned int x;
57454 +
57455 + gr_clear_learn_entries();
57456 +
57457 + read_lock(&tasklist_lock);
57458 + do_each_thread(task2, task) {
57459 + task->acl_sp_role = 0;
57460 + task->acl_role_id = 0;
57461 + task->acl = NULL;
57462 + task->role = NULL;
57463 + } while_each_thread(task2, task);
57464 + read_unlock(&tasklist_lock);
57465 +
57466 + /* release the reference to the real root dentry and vfsmount */
57467 + if (real_root)
57468 + dput(real_root);
57469 + real_root = NULL;
57470 + if (real_root_mnt)
57471 + mntput(real_root_mnt);
57472 + real_root_mnt = NULL;
57473 +
57474 + /* free all object hash tables */
57475 +
57476 + FOR_EACH_ROLE_START(r)
57477 + if (r->subj_hash == NULL)
57478 + goto next_role;
57479 + FOR_EACH_SUBJECT_START(r, s, x)
57480 + if (s->obj_hash == NULL)
57481 + break;
57482 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57483 + kfree(s->obj_hash);
57484 + else
57485 + vfree(s->obj_hash);
57486 + FOR_EACH_SUBJECT_END(s, x)
57487 + FOR_EACH_NESTED_SUBJECT_START(r, s)
57488 + if (s->obj_hash == NULL)
57489 + break;
57490 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57491 + kfree(s->obj_hash);
57492 + else
57493 + vfree(s->obj_hash);
57494 + FOR_EACH_NESTED_SUBJECT_END(s)
57495 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
57496 + kfree(r->subj_hash);
57497 + else
57498 + vfree(r->subj_hash);
57499 + r->subj_hash = NULL;
57500 +next_role:
57501 + FOR_EACH_ROLE_END(r)
57502 +
57503 + acl_free_all();
57504 +
57505 + if (acl_role_set.r_hash) {
57506 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
57507 + PAGE_SIZE)
57508 + kfree(acl_role_set.r_hash);
57509 + else
57510 + vfree(acl_role_set.r_hash);
57511 + }
57512 + if (name_set.n_hash) {
57513 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
57514 + PAGE_SIZE)
57515 + kfree(name_set.n_hash);
57516 + else
57517 + vfree(name_set.n_hash);
57518 + }
57519 +
57520 + if (inodev_set.i_hash) {
57521 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
57522 + PAGE_SIZE)
57523 + kfree(inodev_set.i_hash);
57524 + else
57525 + vfree(inodev_set.i_hash);
57526 + }
57527 +
57528 + gr_free_uidset();
57529 +
57530 + memset(&name_set, 0, sizeof (struct name_db));
57531 + memset(&inodev_set, 0, sizeof (struct inodev_db));
57532 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
57533 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
57534 +
57535 + default_role = NULL;
57536 + role_list = NULL;
57537 +
57538 + return;
57539 +}
57540 +
57541 +static __u32
57542 +count_user_objs(struct acl_object_label *userp)
57543 +{
57544 + struct acl_object_label o_tmp;
57545 + __u32 num = 0;
57546 +
57547 + while (userp) {
57548 + if (copy_from_user(&o_tmp, userp,
57549 + sizeof (struct acl_object_label)))
57550 + break;
57551 +
57552 + userp = o_tmp.prev;
57553 + num++;
57554 + }
57555 +
57556 + return num;
57557 +}
57558 +
57559 +static struct acl_subject_label *
57560 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
57561 +
57562 +static int
57563 +copy_user_glob(struct acl_object_label *obj)
57564 +{
57565 + struct acl_object_label *g_tmp, **guser;
57566 + unsigned int len;
57567 + char *tmp;
57568 +
57569 + if (obj->globbed == NULL)
57570 + return 0;
57571 +
57572 + guser = &obj->globbed;
57573 + while (*guser) {
57574 + g_tmp = (struct acl_object_label *)
57575 + acl_alloc(sizeof (struct acl_object_label));
57576 + if (g_tmp == NULL)
57577 + return -ENOMEM;
57578 +
57579 + if (copy_from_user(g_tmp, *guser,
57580 + sizeof (struct acl_object_label)))
57581 + return -EFAULT;
57582 +
57583 + len = strnlen_user(g_tmp->filename, PATH_MAX);
57584 +
57585 + if (!len || len >= PATH_MAX)
57586 + return -EINVAL;
57587 +
57588 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57589 + return -ENOMEM;
57590 +
57591 + if (copy_from_user(tmp, g_tmp->filename, len))
57592 + return -EFAULT;
57593 + tmp[len-1] = '\0';
57594 + g_tmp->filename = tmp;
57595 +
57596 + *guser = g_tmp;
57597 + guser = &(g_tmp->next);
57598 + }
57599 +
57600 + return 0;
57601 +}
57602 +
57603 +static int
57604 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
57605 + struct acl_role_label *role)
57606 +{
57607 + struct acl_object_label *o_tmp;
57608 + unsigned int len;
57609 + int ret;
57610 + char *tmp;
57611 +
57612 + while (userp) {
57613 + if ((o_tmp = (struct acl_object_label *)
57614 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
57615 + return -ENOMEM;
57616 +
57617 + if (copy_from_user(o_tmp, userp,
57618 + sizeof (struct acl_object_label)))
57619 + return -EFAULT;
57620 +
57621 + userp = o_tmp->prev;
57622 +
57623 + len = strnlen_user(o_tmp->filename, PATH_MAX);
57624 +
57625 + if (!len || len >= PATH_MAX)
57626 + return -EINVAL;
57627 +
57628 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57629 + return -ENOMEM;
57630 +
57631 + if (copy_from_user(tmp, o_tmp->filename, len))
57632 + return -EFAULT;
57633 + tmp[len-1] = '\0';
57634 + o_tmp->filename = tmp;
57635 +
57636 + insert_acl_obj_label(o_tmp, subj);
57637 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
57638 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
57639 + return -ENOMEM;
57640 +
57641 + ret = copy_user_glob(o_tmp);
57642 + if (ret)
57643 + return ret;
57644 +
57645 + if (o_tmp->nested) {
57646 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
57647 + if (IS_ERR(o_tmp->nested))
57648 + return PTR_ERR(o_tmp->nested);
57649 +
57650 + /* insert into nested subject list */
57651 + o_tmp->nested->next = role->hash->first;
57652 + role->hash->first = o_tmp->nested;
57653 + }
57654 + }
57655 +
57656 + return 0;
57657 +}
57658 +
57659 +static __u32
57660 +count_user_subjs(struct acl_subject_label *userp)
57661 +{
57662 + struct acl_subject_label s_tmp;
57663 + __u32 num = 0;
57664 +
57665 + while (userp) {
57666 + if (copy_from_user(&s_tmp, userp,
57667 + sizeof (struct acl_subject_label)))
57668 + break;
57669 +
57670 + userp = s_tmp.prev;
57671 + /* do not count nested subjects against this count, since
57672 + they are not included in the hash table, but are
57673 + attached to objects. We have already counted
57674 + the subjects in userspace for the allocation
57675 + stack
57676 + */
57677 + if (!(s_tmp.mode & GR_NESTED))
57678 + num++;
57679 + }
57680 +
57681 + return num;
57682 +}
57683 +
57684 +static int
57685 +copy_user_allowedips(struct acl_role_label *rolep)
57686 +{
57687 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
57688 +
57689 + ruserip = rolep->allowed_ips;
57690 +
57691 + while (ruserip) {
57692 + rlast = rtmp;
57693 +
57694 + if ((rtmp = (struct role_allowed_ip *)
57695 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
57696 + return -ENOMEM;
57697 +
57698 + if (copy_from_user(rtmp, ruserip,
57699 + sizeof (struct role_allowed_ip)))
57700 + return -EFAULT;
57701 +
57702 + ruserip = rtmp->prev;
57703 +
57704 + if (!rlast) {
57705 + rtmp->prev = NULL;
57706 + rolep->allowed_ips = rtmp;
57707 + } else {
57708 + rlast->next = rtmp;
57709 + rtmp->prev = rlast;
57710 + }
57711 +
57712 + if (!ruserip)
57713 + rtmp->next = NULL;
57714 + }
57715 +
57716 + return 0;
57717 +}
57718 +
57719 +static int
57720 +copy_user_transitions(struct acl_role_label *rolep)
57721 +{
57722 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
57723 +
57724 + unsigned int len;
57725 + char *tmp;
57726 +
57727 + rusertp = rolep->transitions;
57728 +
57729 + while (rusertp) {
57730 + rlast = rtmp;
57731 +
57732 + if ((rtmp = (struct role_transition *)
57733 + acl_alloc(sizeof (struct role_transition))) == NULL)
57734 + return -ENOMEM;
57735 +
57736 + if (copy_from_user(rtmp, rusertp,
57737 + sizeof (struct role_transition)))
57738 + return -EFAULT;
57739 +
57740 + rusertp = rtmp->prev;
57741 +
57742 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
57743 +
57744 + if (!len || len >= GR_SPROLE_LEN)
57745 + return -EINVAL;
57746 +
57747 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57748 + return -ENOMEM;
57749 +
57750 + if (copy_from_user(tmp, rtmp->rolename, len))
57751 + return -EFAULT;
57752 + tmp[len-1] = '\0';
57753 + rtmp->rolename = tmp;
57754 +
57755 + if (!rlast) {
57756 + rtmp->prev = NULL;
57757 + rolep->transitions = rtmp;
57758 + } else {
57759 + rlast->next = rtmp;
57760 + rtmp->prev = rlast;
57761 + }
57762 +
57763 + if (!rusertp)
57764 + rtmp->next = NULL;
57765 + }
57766 +
57767 + return 0;
57768 +}
57769 +
57770 +static struct acl_subject_label *
57771 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
57772 +{
57773 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
57774 + unsigned int len;
57775 + char *tmp;
57776 + __u32 num_objs;
57777 + struct acl_ip_label **i_tmp, *i_utmp2;
57778 + struct gr_hash_struct ghash;
57779 + struct subject_map *subjmap;
57780 + unsigned int i_num;
57781 + int err;
57782 +
57783 + s_tmp = lookup_subject_map(userp);
57784 +
57785 + /* we've already copied this subject into the kernel, just return
57786 + the reference to it, and don't copy it over again
57787 + */
57788 + if (s_tmp)
57789 + return(s_tmp);
57790 +
57791 + if ((s_tmp = (struct acl_subject_label *)
57792 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
57793 + return ERR_PTR(-ENOMEM);
57794 +
57795 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
57796 + if (subjmap == NULL)
57797 + return ERR_PTR(-ENOMEM);
57798 +
57799 + subjmap->user = userp;
57800 + subjmap->kernel = s_tmp;
57801 + insert_subj_map_entry(subjmap);
57802 +
57803 + if (copy_from_user(s_tmp, userp,
57804 + sizeof (struct acl_subject_label)))
57805 + return ERR_PTR(-EFAULT);
57806 +
57807 + len = strnlen_user(s_tmp->filename, PATH_MAX);
57808 +
57809 + if (!len || len >= PATH_MAX)
57810 + return ERR_PTR(-EINVAL);
57811 +
57812 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57813 + return ERR_PTR(-ENOMEM);
57814 +
57815 + if (copy_from_user(tmp, s_tmp->filename, len))
57816 + return ERR_PTR(-EFAULT);
57817 + tmp[len-1] = '\0';
57818 + s_tmp->filename = tmp;
57819 +
57820 + if (!strcmp(s_tmp->filename, "/"))
57821 + role->root_label = s_tmp;
57822 +
57823 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
57824 + return ERR_PTR(-EFAULT);
57825 +
57826 + /* copy user and group transition tables */
57827 +
57828 + if (s_tmp->user_trans_num) {
57829 + uid_t *uidlist;
57830 +
57831 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
57832 + if (uidlist == NULL)
57833 + return ERR_PTR(-ENOMEM);
57834 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
57835 + return ERR_PTR(-EFAULT);
57836 +
57837 + s_tmp->user_transitions = uidlist;
57838 + }
57839 +
57840 + if (s_tmp->group_trans_num) {
57841 + gid_t *gidlist;
57842 +
57843 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
57844 + if (gidlist == NULL)
57845 + return ERR_PTR(-ENOMEM);
57846 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
57847 + return ERR_PTR(-EFAULT);
57848 +
57849 + s_tmp->group_transitions = gidlist;
57850 + }
57851 +
57852 + /* set up object hash table */
57853 + num_objs = count_user_objs(ghash.first);
57854 +
57855 + s_tmp->obj_hash_size = num_objs;
57856 + s_tmp->obj_hash =
57857 + (struct acl_object_label **)
57858 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
57859 +
57860 + if (!s_tmp->obj_hash)
57861 + return ERR_PTR(-ENOMEM);
57862 +
57863 + memset(s_tmp->obj_hash, 0,
57864 + s_tmp->obj_hash_size *
57865 + sizeof (struct acl_object_label *));
57866 +
57867 + /* add in objects */
57868 + err = copy_user_objs(ghash.first, s_tmp, role);
57869 +
57870 + if (err)
57871 + return ERR_PTR(err);
57872 +
57873 + /* set pointer for parent subject */
57874 + if (s_tmp->parent_subject) {
57875 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
57876 +
57877 + if (IS_ERR(s_tmp2))
57878 + return s_tmp2;
57879 +
57880 + s_tmp->parent_subject = s_tmp2;
57881 + }
57882 +
57883 + /* add in ip acls */
57884 +
57885 + if (!s_tmp->ip_num) {
57886 + s_tmp->ips = NULL;
57887 + goto insert;
57888 + }
57889 +
57890 + i_tmp =
57891 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
57892 + sizeof (struct acl_ip_label *));
57893 +
57894 + if (!i_tmp)
57895 + return ERR_PTR(-ENOMEM);
57896 +
57897 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
57898 + *(i_tmp + i_num) =
57899 + (struct acl_ip_label *)
57900 + acl_alloc(sizeof (struct acl_ip_label));
57901 + if (!*(i_tmp + i_num))
57902 + return ERR_PTR(-ENOMEM);
57903 +
57904 + if (copy_from_user
57905 + (&i_utmp2, s_tmp->ips + i_num,
57906 + sizeof (struct acl_ip_label *)))
57907 + return ERR_PTR(-EFAULT);
57908 +
57909 + if (copy_from_user
57910 + (*(i_tmp + i_num), i_utmp2,
57911 + sizeof (struct acl_ip_label)))
57912 + return ERR_PTR(-EFAULT);
57913 +
57914 + if ((*(i_tmp + i_num))->iface == NULL)
57915 + continue;
57916 +
57917 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
57918 + if (!len || len >= IFNAMSIZ)
57919 + return ERR_PTR(-EINVAL);
57920 + tmp = acl_alloc(len);
57921 + if (tmp == NULL)
57922 + return ERR_PTR(-ENOMEM);
57923 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
57924 + return ERR_PTR(-EFAULT);
57925 + (*(i_tmp + i_num))->iface = tmp;
57926 + }
57927 +
57928 + s_tmp->ips = i_tmp;
57929 +
57930 +insert:
57931 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
57932 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
57933 + return ERR_PTR(-ENOMEM);
57934 +
57935 + return s_tmp;
57936 +}
57937 +
57938 +static int
57939 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
57940 +{
57941 + struct acl_subject_label s_pre;
57942 + struct acl_subject_label * ret;
57943 + int err;
57944 +
57945 + while (userp) {
57946 + if (copy_from_user(&s_pre, userp,
57947 + sizeof (struct acl_subject_label)))
57948 + return -EFAULT;
57949 +
57950 + /* do not add nested subjects here, add
57951 + while parsing objects
57952 + */
57953 +
57954 + if (s_pre.mode & GR_NESTED) {
57955 + userp = s_pre.prev;
57956 + continue;
57957 + }
57958 +
57959 + ret = do_copy_user_subj(userp, role);
57960 +
57961 + err = PTR_ERR(ret);
57962 + if (IS_ERR(ret))
57963 + return err;
57964 +
57965 + insert_acl_subj_label(ret, role);
57966 +
57967 + userp = s_pre.prev;
57968 + }
57969 +
57970 + return 0;
57971 +}
57972 +
57973 +static int
57974 +copy_user_acl(struct gr_arg *arg)
57975 +{
57976 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
57977 + struct sprole_pw *sptmp;
57978 + struct gr_hash_struct *ghash;
57979 + uid_t *domainlist;
57980 + unsigned int r_num;
57981 + unsigned int len;
57982 + char *tmp;
57983 + int err = 0;
57984 + __u16 i;
57985 + __u32 num_subjs;
57986 +
57987 + /* we need a default and kernel role */
57988 + if (arg->role_db.num_roles < 2)
57989 + return -EINVAL;
57990 +
57991 + /* copy special role authentication info from userspace */
57992 +
57993 + num_sprole_pws = arg->num_sprole_pws;
57994 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
57995 +
57996 + if (!acl_special_roles) {
57997 + err = -ENOMEM;
57998 + goto cleanup;
57999 + }
58000 +
58001 + for (i = 0; i < num_sprole_pws; i++) {
58002 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
58003 + if (!sptmp) {
58004 + err = -ENOMEM;
58005 + goto cleanup;
58006 + }
58007 + if (copy_from_user(sptmp, arg->sprole_pws + i,
58008 + sizeof (struct sprole_pw))) {
58009 + err = -EFAULT;
58010 + goto cleanup;
58011 + }
58012 +
58013 + len =
58014 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
58015 +
58016 + if (!len || len >= GR_SPROLE_LEN) {
58017 + err = -EINVAL;
58018 + goto cleanup;
58019 + }
58020 +
58021 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
58022 + err = -ENOMEM;
58023 + goto cleanup;
58024 + }
58025 +
58026 + if (copy_from_user(tmp, sptmp->rolename, len)) {
58027 + err = -EFAULT;
58028 + goto cleanup;
58029 + }
58030 + tmp[len-1] = '\0';
58031 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58032 + printk(KERN_ALERT "Copying special role %s\n", tmp);
58033 +#endif
58034 + sptmp->rolename = tmp;
58035 + acl_special_roles[i] = sptmp;
58036 + }
58037 +
58038 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
58039 +
58040 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
58041 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
58042 +
58043 + if (!r_tmp) {
58044 + err = -ENOMEM;
58045 + goto cleanup;
58046 + }
58047 +
58048 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
58049 + sizeof (struct acl_role_label *))) {
58050 + err = -EFAULT;
58051 + goto cleanup;
58052 + }
58053 +
58054 + if (copy_from_user(r_tmp, r_utmp2,
58055 + sizeof (struct acl_role_label))) {
58056 + err = -EFAULT;
58057 + goto cleanup;
58058 + }
58059 +
58060 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
58061 +
58062 + if (!len || len >= PATH_MAX) {
58063 + err = -EINVAL;
58064 + goto cleanup;
58065 + }
58066 +
58067 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
58068 + err = -ENOMEM;
58069 + goto cleanup;
58070 + }
58071 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
58072 + err = -EFAULT;
58073 + goto cleanup;
58074 + }
58075 + tmp[len-1] = '\0';
58076 + r_tmp->rolename = tmp;
58077 +
58078 + if (!strcmp(r_tmp->rolename, "default")
58079 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
58080 + default_role = r_tmp;
58081 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
58082 + kernel_role = r_tmp;
58083 + }
58084 +
58085 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
58086 + err = -ENOMEM;
58087 + goto cleanup;
58088 + }
58089 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
58090 + err = -EFAULT;
58091 + goto cleanup;
58092 + }
58093 +
58094 + r_tmp->hash = ghash;
58095 +
58096 + num_subjs = count_user_subjs(r_tmp->hash->first);
58097 +
58098 + r_tmp->subj_hash_size = num_subjs;
58099 + r_tmp->subj_hash =
58100 + (struct acl_subject_label **)
58101 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
58102 +
58103 + if (!r_tmp->subj_hash) {
58104 + err = -ENOMEM;
58105 + goto cleanup;
58106 + }
58107 +
58108 + err = copy_user_allowedips(r_tmp);
58109 + if (err)
58110 + goto cleanup;
58111 +
58112 + /* copy domain info */
58113 + if (r_tmp->domain_children != NULL) {
58114 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
58115 + if (domainlist == NULL) {
58116 + err = -ENOMEM;
58117 + goto cleanup;
58118 + }
58119 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
58120 + err = -EFAULT;
58121 + goto cleanup;
58122 + }
58123 + r_tmp->domain_children = domainlist;
58124 + }
58125 +
58126 + err = copy_user_transitions(r_tmp);
58127 + if (err)
58128 + goto cleanup;
58129 +
58130 + memset(r_tmp->subj_hash, 0,
58131 + r_tmp->subj_hash_size *
58132 + sizeof (struct acl_subject_label *));
58133 +
58134 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
58135 +
58136 + if (err)
58137 + goto cleanup;
58138 +
58139 + /* set nested subject list to null */
58140 + r_tmp->hash->first = NULL;
58141 +
58142 + insert_acl_role_label(r_tmp);
58143 + }
58144 +
58145 + goto return_err;
58146 + cleanup:
58147 + free_variables();
58148 + return_err:
58149 + return err;
58150 +
58151 +}
58152 +
58153 +static int
58154 +gracl_init(struct gr_arg *args)
58155 +{
58156 + int error = 0;
58157 +
58158 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
58159 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
58160 +
58161 + if (init_variables(args)) {
58162 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
58163 + error = -ENOMEM;
58164 + free_variables();
58165 + goto out;
58166 + }
58167 +
58168 + error = copy_user_acl(args);
58169 + free_init_variables();
58170 + if (error) {
58171 + free_variables();
58172 + goto out;
58173 + }
58174 +
58175 + if ((error = gr_set_acls(0))) {
58176 + free_variables();
58177 + goto out;
58178 + }
58179 +
58180 + pax_open_kernel();
58181 + gr_status |= GR_READY;
58182 + pax_close_kernel();
58183 +
58184 + out:
58185 + return error;
58186 +}
58187 +
58188 +/* derived from glibc fnmatch() 0: match, 1: no match*/
58189 +
58190 +static int
58191 +glob_match(const char *p, const char *n)
58192 +{
58193 + char c;
58194 +
58195 + while ((c = *p++) != '\0') {
58196 + switch (c) {
58197 + case '?':
58198 + if (*n == '\0')
58199 + return 1;
58200 + else if (*n == '/')
58201 + return 1;
58202 + break;
58203 + case '\\':
58204 + if (*n != c)
58205 + return 1;
58206 + break;
58207 + case '*':
58208 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
58209 + if (*n == '/')
58210 + return 1;
58211 + else if (c == '?') {
58212 + if (*n == '\0')
58213 + return 1;
58214 + else
58215 + ++n;
58216 + }
58217 + }
58218 + if (c == '\0') {
58219 + return 0;
58220 + } else {
58221 + const char *endp;
58222 +
58223 + if ((endp = strchr(n, '/')) == NULL)
58224 + endp = n + strlen(n);
58225 +
58226 + if (c == '[') {
58227 + for (--p; n < endp; ++n)
58228 + if (!glob_match(p, n))
58229 + return 0;
58230 + } else if (c == '/') {
58231 + while (*n != '\0' && *n != '/')
58232 + ++n;
58233 + if (*n == '/' && !glob_match(p, n + 1))
58234 + return 0;
58235 + } else {
58236 + for (--p; n < endp; ++n)
58237 + if (*n == c && !glob_match(p, n))
58238 + return 0;
58239 + }
58240 +
58241 + return 1;
58242 + }
58243 + case '[':
58244 + {
58245 + int not;
58246 + char cold;
58247 +
58248 + if (*n == '\0' || *n == '/')
58249 + return 1;
58250 +
58251 + not = (*p == '!' || *p == '^');
58252 + if (not)
58253 + ++p;
58254 +
58255 + c = *p++;
58256 + for (;;) {
58257 + unsigned char fn = (unsigned char)*n;
58258 +
58259 + if (c == '\0')
58260 + return 1;
58261 + else {
58262 + if (c == fn)
58263 + goto matched;
58264 + cold = c;
58265 + c = *p++;
58266 +
58267 + if (c == '-' && *p != ']') {
58268 + unsigned char cend = *p++;
58269 +
58270 + if (cend == '\0')
58271 + return 1;
58272 +
58273 + if (cold <= fn && fn <= cend)
58274 + goto matched;
58275 +
58276 + c = *p++;
58277 + }
58278 + }
58279 +
58280 + if (c == ']')
58281 + break;
58282 + }
58283 + if (!not)
58284 + return 1;
58285 + break;
58286 + matched:
58287 + while (c != ']') {
58288 + if (c == '\0')
58289 + return 1;
58290 +
58291 + c = *p++;
58292 + }
58293 + if (not)
58294 + return 1;
58295 + }
58296 + break;
58297 + default:
58298 + if (c != *n)
58299 + return 1;
58300 + }
58301 +
58302 + ++n;
58303 + }
58304 +
58305 + if (*n == '\0')
58306 + return 0;
58307 +
58308 + if (*n == '/')
58309 + return 0;
58310 +
58311 + return 1;
58312 +}
58313 +
58314 +static struct acl_object_label *
58315 +chk_glob_label(struct acl_object_label *globbed,
58316 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
58317 +{
58318 + struct acl_object_label *tmp;
58319 +
58320 + if (*path == NULL)
58321 + *path = gr_to_filename_nolock(dentry, mnt);
58322 +
58323 + tmp = globbed;
58324 +
58325 + while (tmp) {
58326 + if (!glob_match(tmp->filename, *path))
58327 + return tmp;
58328 + tmp = tmp->next;
58329 + }
58330 +
58331 + return NULL;
58332 +}
58333 +
58334 +static struct acl_object_label *
58335 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
58336 + const ino_t curr_ino, const dev_t curr_dev,
58337 + const struct acl_subject_label *subj, char **path, const int checkglob)
58338 +{
58339 + struct acl_subject_label *tmpsubj;
58340 + struct acl_object_label *retval;
58341 + struct acl_object_label *retval2;
58342 +
58343 + tmpsubj = (struct acl_subject_label *) subj;
58344 + read_lock(&gr_inode_lock);
58345 + do {
58346 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
58347 + if (retval) {
58348 + if (checkglob && retval->globbed) {
58349 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
58350 + if (retval2)
58351 + retval = retval2;
58352 + }
58353 + break;
58354 + }
58355 + } while ((tmpsubj = tmpsubj->parent_subject));
58356 + read_unlock(&gr_inode_lock);
58357 +
58358 + return retval;
58359 +}
58360 +
58361 +static __inline__ struct acl_object_label *
58362 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
58363 + const struct dentry *curr_dentry,
58364 + const struct acl_subject_label *subj, char **path, const int checkglob)
58365 +{
58366 + int newglob = checkglob;
58367 +
58368 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
58369 + as we don't want a / * rule to match instead of the / object
58370 + don't do this for create lookups that call this function though, since they're looking up
58371 + on the parent and thus need globbing checks on all paths
58372 + */
58373 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
58374 + newglob = GR_NO_GLOB;
58375 +
58376 + return __full_lookup(orig_dentry, orig_mnt,
58377 + curr_dentry->d_inode->i_ino,
58378 + __get_dev(curr_dentry), subj, path, newglob);
58379 +}
58380 +
58381 +static struct acl_object_label *
58382 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58383 + const struct acl_subject_label *subj, char *path, const int checkglob)
58384 +{
58385 + struct dentry *dentry = (struct dentry *) l_dentry;
58386 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
58387 + struct acl_object_label *retval;
58388 +
58389 + spin_lock(&dcache_lock);
58390 + spin_lock(&vfsmount_lock);
58391 +
58392 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
58393 +#ifdef CONFIG_NET
58394 + mnt == sock_mnt ||
58395 +#endif
58396 +#ifdef CONFIG_HUGETLBFS
58397 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
58398 +#endif
58399 + /* ignore Eric Biederman */
58400 + IS_PRIVATE(l_dentry->d_inode))) {
58401 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
58402 + goto out;
58403 + }
58404 +
58405 + for (;;) {
58406 + if (dentry == real_root && mnt == real_root_mnt)
58407 + break;
58408 +
58409 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
58410 + if (mnt->mnt_parent == mnt)
58411 + break;
58412 +
58413 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58414 + if (retval != NULL)
58415 + goto out;
58416 +
58417 + dentry = mnt->mnt_mountpoint;
58418 + mnt = mnt->mnt_parent;
58419 + continue;
58420 + }
58421 +
58422 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58423 + if (retval != NULL)
58424 + goto out;
58425 +
58426 + dentry = dentry->d_parent;
58427 + }
58428 +
58429 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58430 +
58431 + if (retval == NULL)
58432 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
58433 +out:
58434 + spin_unlock(&vfsmount_lock);
58435 + spin_unlock(&dcache_lock);
58436 +
58437 + BUG_ON(retval == NULL);
58438 +
58439 + return retval;
58440 +}
58441 +
58442 +static __inline__ struct acl_object_label *
58443 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58444 + const struct acl_subject_label *subj)
58445 +{
58446 + char *path = NULL;
58447 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
58448 +}
58449 +
58450 +static __inline__ struct acl_object_label *
58451 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58452 + const struct acl_subject_label *subj)
58453 +{
58454 + char *path = NULL;
58455 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
58456 +}
58457 +
58458 +static __inline__ struct acl_object_label *
58459 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58460 + const struct acl_subject_label *subj, char *path)
58461 +{
58462 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
58463 +}
58464 +
58465 +static struct acl_subject_label *
58466 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58467 + const struct acl_role_label *role)
58468 +{
58469 + struct dentry *dentry = (struct dentry *) l_dentry;
58470 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
58471 + struct acl_subject_label *retval;
58472 +
58473 + spin_lock(&dcache_lock);
58474 + spin_lock(&vfsmount_lock);
58475 +
58476 + for (;;) {
58477 + if (dentry == real_root && mnt == real_root_mnt)
58478 + break;
58479 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
58480 + if (mnt->mnt_parent == mnt)
58481 + break;
58482 +
58483 + read_lock(&gr_inode_lock);
58484 + retval =
58485 + lookup_acl_subj_label(dentry->d_inode->i_ino,
58486 + __get_dev(dentry), role);
58487 + read_unlock(&gr_inode_lock);
58488 + if (retval != NULL)
58489 + goto out;
58490 +
58491 + dentry = mnt->mnt_mountpoint;
58492 + mnt = mnt->mnt_parent;
58493 + continue;
58494 + }
58495 +
58496 + read_lock(&gr_inode_lock);
58497 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58498 + __get_dev(dentry), role);
58499 + read_unlock(&gr_inode_lock);
58500 + if (retval != NULL)
58501 + goto out;
58502 +
58503 + dentry = dentry->d_parent;
58504 + }
58505 +
58506 + read_lock(&gr_inode_lock);
58507 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58508 + __get_dev(dentry), role);
58509 + read_unlock(&gr_inode_lock);
58510 +
58511 + if (unlikely(retval == NULL)) {
58512 + read_lock(&gr_inode_lock);
58513 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
58514 + __get_dev(real_root), role);
58515 + read_unlock(&gr_inode_lock);
58516 + }
58517 +out:
58518 + spin_unlock(&vfsmount_lock);
58519 + spin_unlock(&dcache_lock);
58520 +
58521 + BUG_ON(retval == NULL);
58522 +
58523 + return retval;
58524 +}
58525 +
58526 +static void
58527 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
58528 +{
58529 + struct task_struct *task = current;
58530 + const struct cred *cred = current_cred();
58531 +
58532 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
58533 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58534 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58535 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
58536 +
58537 + return;
58538 +}
58539 +
58540 +static void
58541 +gr_log_learn_sysctl(const char *path, const __u32 mode)
58542 +{
58543 + struct task_struct *task = current;
58544 + const struct cred *cred = current_cred();
58545 +
58546 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
58547 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58548 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58549 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
58550 +
58551 + return;
58552 +}
58553 +
58554 +static void
58555 +gr_log_learn_id_change(const char type, const unsigned int real,
58556 + const unsigned int effective, const unsigned int fs)
58557 +{
58558 + struct task_struct *task = current;
58559 + const struct cred *cred = current_cred();
58560 +
58561 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
58562 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58563 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58564 + type, real, effective, fs, &task->signal->saved_ip);
58565 +
58566 + return;
58567 +}
58568 +
58569 +__u32
58570 +gr_search_file(const struct dentry * dentry, const __u32 mode,
58571 + const struct vfsmount * mnt)
58572 +{
58573 + __u32 retval = mode;
58574 + struct acl_subject_label *curracl;
58575 + struct acl_object_label *currobj;
58576 +
58577 + if (unlikely(!(gr_status & GR_READY)))
58578 + return (mode & ~GR_AUDITS);
58579 +
58580 + curracl = current->acl;
58581 +
58582 + currobj = chk_obj_label(dentry, mnt, curracl);
58583 + retval = currobj->mode & mode;
58584 +
58585 + /* if we're opening a specified transfer file for writing
58586 + (e.g. /dev/initctl), then transfer our role to init
58587 + */
58588 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
58589 + current->role->roletype & GR_ROLE_PERSIST)) {
58590 + struct task_struct *task = init_pid_ns.child_reaper;
58591 +
58592 + if (task->role != current->role) {
58593 + task->acl_sp_role = 0;
58594 + task->acl_role_id = current->acl_role_id;
58595 + task->role = current->role;
58596 + rcu_read_lock();
58597 + read_lock(&grsec_exec_file_lock);
58598 + gr_apply_subject_to_task(task);
58599 + read_unlock(&grsec_exec_file_lock);
58600 + rcu_read_unlock();
58601 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
58602 + }
58603 + }
58604 +
58605 + if (unlikely
58606 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
58607 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
58608 + __u32 new_mode = mode;
58609 +
58610 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58611 +
58612 + retval = new_mode;
58613 +
58614 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
58615 + new_mode |= GR_INHERIT;
58616 +
58617 + if (!(mode & GR_NOLEARN))
58618 + gr_log_learn(dentry, mnt, new_mode);
58619 + }
58620 +
58621 + return retval;
58622 +}
58623 +
58624 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
58625 + const struct dentry *parent,
58626 + const struct vfsmount *mnt)
58627 +{
58628 + struct name_entry *match;
58629 + struct acl_object_label *matchpo;
58630 + struct acl_subject_label *curracl;
58631 + char *path;
58632 +
58633 + if (unlikely(!(gr_status & GR_READY)))
58634 + return NULL;
58635 +
58636 + preempt_disable();
58637 + path = gr_to_filename_rbac(new_dentry, mnt);
58638 + match = lookup_name_entry_create(path);
58639 +
58640 + curracl = current->acl;
58641 +
58642 + if (match) {
58643 + read_lock(&gr_inode_lock);
58644 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
58645 + read_unlock(&gr_inode_lock);
58646 +
58647 + if (matchpo) {
58648 + preempt_enable();
58649 + return matchpo;
58650 + }
58651 + }
58652 +
58653 + // lookup parent
58654 +
58655 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
58656 +
58657 + preempt_enable();
58658 + return matchpo;
58659 +}
58660 +
58661 +__u32
58662 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
58663 + const struct vfsmount * mnt, const __u32 mode)
58664 +{
58665 + struct acl_object_label *matchpo;
58666 + __u32 retval;
58667 +
58668 + if (unlikely(!(gr_status & GR_READY)))
58669 + return (mode & ~GR_AUDITS);
58670 +
58671 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
58672 +
58673 + retval = matchpo->mode & mode;
58674 +
58675 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
58676 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58677 + __u32 new_mode = mode;
58678 +
58679 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58680 +
58681 + gr_log_learn(new_dentry, mnt, new_mode);
58682 + return new_mode;
58683 + }
58684 +
58685 + return retval;
58686 +}
58687 +
58688 +__u32
58689 +gr_check_link(const struct dentry * new_dentry,
58690 + const struct dentry * parent_dentry,
58691 + const struct vfsmount * parent_mnt,
58692 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
58693 +{
58694 + struct acl_object_label *obj;
58695 + __u32 oldmode, newmode;
58696 + __u32 needmode;
58697 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
58698 + GR_DELETE | GR_INHERIT;
58699 +
58700 + if (unlikely(!(gr_status & GR_READY)))
58701 + return (GR_CREATE | GR_LINK);
58702 +
58703 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
58704 + oldmode = obj->mode;
58705 +
58706 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
58707 + newmode = obj->mode;
58708 +
58709 + needmode = newmode & checkmodes;
58710 +
58711 + // old name for hardlink must have at least the permissions of the new name
58712 + if ((oldmode & needmode) != needmode)
58713 + goto bad;
58714 +
58715 + // if old name had restrictions/auditing, make sure the new name does as well
58716 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
58717 +
58718 + // don't allow hardlinking of suid/sgid files without permission
58719 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58720 + needmode |= GR_SETID;
58721 +
58722 + if ((newmode & needmode) != needmode)
58723 + goto bad;
58724 +
58725 + // enforce minimum permissions
58726 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
58727 + return newmode;
58728 +bad:
58729 + needmode = oldmode;
58730 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58731 + needmode |= GR_SETID;
58732 +
58733 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
58734 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
58735 + return (GR_CREATE | GR_LINK);
58736 + } else if (newmode & GR_SUPPRESS)
58737 + return GR_SUPPRESS;
58738 + else
58739 + return 0;
58740 +}
58741 +
58742 +int
58743 +gr_check_hidden_task(const struct task_struct *task)
58744 +{
58745 + if (unlikely(!(gr_status & GR_READY)))
58746 + return 0;
58747 +
58748 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
58749 + return 1;
58750 +
58751 + return 0;
58752 +}
58753 +
58754 +int
58755 +gr_check_protected_task(const struct task_struct *task)
58756 +{
58757 + if (unlikely(!(gr_status & GR_READY) || !task))
58758 + return 0;
58759 +
58760 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58761 + task->acl != current->acl)
58762 + return 1;
58763 +
58764 + return 0;
58765 +}
58766 +
58767 +int
58768 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
58769 +{
58770 + struct task_struct *p;
58771 + int ret = 0;
58772 +
58773 + if (unlikely(!(gr_status & GR_READY) || !pid))
58774 + return ret;
58775 +
58776 + read_lock(&tasklist_lock);
58777 + do_each_pid_task(pid, type, p) {
58778 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58779 + p->acl != current->acl) {
58780 + ret = 1;
58781 + goto out;
58782 + }
58783 + } while_each_pid_task(pid, type, p);
58784 +out:
58785 + read_unlock(&tasklist_lock);
58786 +
58787 + return ret;
58788 +}
58789 +
58790 +void
58791 +gr_copy_label(struct task_struct *tsk)
58792 +{
58793 + /* plain copying of fields is already done by dup_task_struct */
58794 + tsk->signal->used_accept = 0;
58795 + tsk->acl_sp_role = 0;
58796 + //tsk->acl_role_id = current->acl_role_id;
58797 + //tsk->acl = current->acl;
58798 + //tsk->role = current->role;
58799 + tsk->signal->curr_ip = current->signal->curr_ip;
58800 + tsk->signal->saved_ip = current->signal->saved_ip;
58801 + if (current->exec_file)
58802 + get_file(current->exec_file);
58803 + //tsk->exec_file = current->exec_file;
58804 + //tsk->is_writable = current->is_writable;
58805 + if (unlikely(current->signal->used_accept)) {
58806 + current->signal->curr_ip = 0;
58807 + current->signal->saved_ip = 0;
58808 + }
58809 +
58810 + return;
58811 +}
58812 +
58813 +static void
58814 +gr_set_proc_res(struct task_struct *task)
58815 +{
58816 + struct acl_subject_label *proc;
58817 + unsigned short i;
58818 +
58819 + proc = task->acl;
58820 +
58821 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
58822 + return;
58823 +
58824 + for (i = 0; i < RLIM_NLIMITS; i++) {
58825 + if (!(proc->resmask & (1 << i)))
58826 + continue;
58827 +
58828 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
58829 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
58830 + }
58831 +
58832 + return;
58833 +}
58834 +
58835 +extern int __gr_process_user_ban(struct user_struct *user);
58836 +
58837 +int
58838 +gr_check_user_change(int real, int effective, int fs)
58839 +{
58840 + unsigned int i;
58841 + __u16 num;
58842 + uid_t *uidlist;
58843 + int curuid;
58844 + int realok = 0;
58845 + int effectiveok = 0;
58846 + int fsok = 0;
58847 +
58848 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58849 + struct user_struct *user;
58850 +
58851 + if (real == -1)
58852 + goto skipit;
58853 +
58854 + user = find_user(real);
58855 + if (user == NULL)
58856 + goto skipit;
58857 +
58858 + if (__gr_process_user_ban(user)) {
58859 + /* for find_user */
58860 + free_uid(user);
58861 + return 1;
58862 + }
58863 +
58864 + /* for find_user */
58865 + free_uid(user);
58866 +
58867 +skipit:
58868 +#endif
58869 +
58870 + if (unlikely(!(gr_status & GR_READY)))
58871 + return 0;
58872 +
58873 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58874 + gr_log_learn_id_change('u', real, effective, fs);
58875 +
58876 + num = current->acl->user_trans_num;
58877 + uidlist = current->acl->user_transitions;
58878 +
58879 + if (uidlist == NULL)
58880 + return 0;
58881 +
58882 + if (real == -1)
58883 + realok = 1;
58884 + if (effective == -1)
58885 + effectiveok = 1;
58886 + if (fs == -1)
58887 + fsok = 1;
58888 +
58889 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
58890 + for (i = 0; i < num; i++) {
58891 + curuid = (int)uidlist[i];
58892 + if (real == curuid)
58893 + realok = 1;
58894 + if (effective == curuid)
58895 + effectiveok = 1;
58896 + if (fs == curuid)
58897 + fsok = 1;
58898 + }
58899 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
58900 + for (i = 0; i < num; i++) {
58901 + curuid = (int)uidlist[i];
58902 + if (real == curuid)
58903 + break;
58904 + if (effective == curuid)
58905 + break;
58906 + if (fs == curuid)
58907 + break;
58908 + }
58909 + /* not in deny list */
58910 + if (i == num) {
58911 + realok = 1;
58912 + effectiveok = 1;
58913 + fsok = 1;
58914 + }
58915 + }
58916 +
58917 + if (realok && effectiveok && fsok)
58918 + return 0;
58919 + else {
58920 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58921 + return 1;
58922 + }
58923 +}
58924 +
58925 +int
58926 +gr_check_group_change(int real, int effective, int fs)
58927 +{
58928 + unsigned int i;
58929 + __u16 num;
58930 + gid_t *gidlist;
58931 + int curgid;
58932 + int realok = 0;
58933 + int effectiveok = 0;
58934 + int fsok = 0;
58935 +
58936 + if (unlikely(!(gr_status & GR_READY)))
58937 + return 0;
58938 +
58939 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58940 + gr_log_learn_id_change('g', real, effective, fs);
58941 +
58942 + num = current->acl->group_trans_num;
58943 + gidlist = current->acl->group_transitions;
58944 +
58945 + if (gidlist == NULL)
58946 + return 0;
58947 +
58948 + if (real == -1)
58949 + realok = 1;
58950 + if (effective == -1)
58951 + effectiveok = 1;
58952 + if (fs == -1)
58953 + fsok = 1;
58954 +
58955 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
58956 + for (i = 0; i < num; i++) {
58957 + curgid = (int)gidlist[i];
58958 + if (real == curgid)
58959 + realok = 1;
58960 + if (effective == curgid)
58961 + effectiveok = 1;
58962 + if (fs == curgid)
58963 + fsok = 1;
58964 + }
58965 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
58966 + for (i = 0; i < num; i++) {
58967 + curgid = (int)gidlist[i];
58968 + if (real == curgid)
58969 + break;
58970 + if (effective == curgid)
58971 + break;
58972 + if (fs == curgid)
58973 + break;
58974 + }
58975 + /* not in deny list */
58976 + if (i == num) {
58977 + realok = 1;
58978 + effectiveok = 1;
58979 + fsok = 1;
58980 + }
58981 + }
58982 +
58983 + if (realok && effectiveok && fsok)
58984 + return 0;
58985 + else {
58986 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58987 + return 1;
58988 + }
58989 +}
58990 +
58991 +extern int gr_acl_is_capable(const int cap);
58992 +
58993 +void
58994 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
58995 +{
58996 + struct acl_role_label *role = task->role;
58997 + struct acl_subject_label *subj = NULL;
58998 + struct acl_object_label *obj;
58999 + struct file *filp;
59000 +
59001 + if (unlikely(!(gr_status & GR_READY)))
59002 + return;
59003 +
59004 + filp = task->exec_file;
59005 +
59006 + /* kernel process, we'll give them the kernel role */
59007 + if (unlikely(!filp)) {
59008 + task->role = kernel_role;
59009 + task->acl = kernel_role->root_label;
59010 + return;
59011 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
59012 + role = lookup_acl_role_label(task, uid, gid);
59013 +
59014 + /* don't change the role if we're not a privileged process */
59015 + if (role && task->role != role &&
59016 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
59017 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
59018 + return;
59019 +
59020 + /* perform subject lookup in possibly new role
59021 + we can use this result below in the case where role == task->role
59022 + */
59023 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
59024 +
59025 + /* if we changed uid/gid, but result in the same role
59026 + and are using inheritance, don't lose the inherited subject
59027 + if current subject is other than what normal lookup
59028 + would result in, we arrived via inheritance, don't
59029 + lose subject
59030 + */
59031 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
59032 + (subj == task->acl)))
59033 + task->acl = subj;
59034 +
59035 + task->role = role;
59036 +
59037 + task->is_writable = 0;
59038 +
59039 + /* ignore additional mmap checks for processes that are writable
59040 + by the default ACL */
59041 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59042 + if (unlikely(obj->mode & GR_WRITE))
59043 + task->is_writable = 1;
59044 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
59045 + if (unlikely(obj->mode & GR_WRITE))
59046 + task->is_writable = 1;
59047 +
59048 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59049 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
59050 +#endif
59051 +
59052 + gr_set_proc_res(task);
59053 +
59054 + return;
59055 +}
59056 +
59057 +int
59058 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
59059 + const int unsafe_flags)
59060 +{
59061 + struct task_struct *task = current;
59062 + struct acl_subject_label *newacl;
59063 + struct acl_object_label *obj;
59064 + __u32 retmode;
59065 +
59066 + if (unlikely(!(gr_status & GR_READY)))
59067 + return 0;
59068 +
59069 + newacl = chk_subj_label(dentry, mnt, task->role);
59070 +
59071 + task_lock(task);
59072 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
59073 + !(task->role->roletype & GR_ROLE_GOD) &&
59074 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
59075 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
59076 + task_unlock(task);
59077 + if (unsafe_flags & LSM_UNSAFE_SHARE)
59078 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
59079 + else
59080 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
59081 + return -EACCES;
59082 + }
59083 + task_unlock(task);
59084 +
59085 + obj = chk_obj_label(dentry, mnt, task->acl);
59086 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
59087 +
59088 + if (!(task->acl->mode & GR_INHERITLEARN) &&
59089 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
59090 + if (obj->nested)
59091 + task->acl = obj->nested;
59092 + else
59093 + task->acl = newacl;
59094 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
59095 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
59096 +
59097 + task->is_writable = 0;
59098 +
59099 + /* ignore additional mmap checks for processes that are writable
59100 + by the default ACL */
59101 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
59102 + if (unlikely(obj->mode & GR_WRITE))
59103 + task->is_writable = 1;
59104 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
59105 + if (unlikely(obj->mode & GR_WRITE))
59106 + task->is_writable = 1;
59107 +
59108 + gr_set_proc_res(task);
59109 +
59110 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59111 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
59112 +#endif
59113 + return 0;
59114 +}
59115 +
59116 +/* always called with valid inodev ptr */
59117 +static void
59118 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
59119 +{
59120 + struct acl_object_label *matchpo;
59121 + struct acl_subject_label *matchps;
59122 + struct acl_subject_label *subj;
59123 + struct acl_role_label *role;
59124 + unsigned int x;
59125 +
59126 + FOR_EACH_ROLE_START(role)
59127 + FOR_EACH_SUBJECT_START(role, subj, x)
59128 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
59129 + matchpo->mode |= GR_DELETED;
59130 + FOR_EACH_SUBJECT_END(subj,x)
59131 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
59132 + if (subj->inode == ino && subj->device == dev)
59133 + subj->mode |= GR_DELETED;
59134 + FOR_EACH_NESTED_SUBJECT_END(subj)
59135 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
59136 + matchps->mode |= GR_DELETED;
59137 + FOR_EACH_ROLE_END(role)
59138 +
59139 + inodev->nentry->deleted = 1;
59140 +
59141 + return;
59142 +}
59143 +
59144 +void
59145 +gr_handle_delete(const ino_t ino, const dev_t dev)
59146 +{
59147 + struct inodev_entry *inodev;
59148 +
59149 + if (unlikely(!(gr_status & GR_READY)))
59150 + return;
59151 +
59152 + write_lock(&gr_inode_lock);
59153 + inodev = lookup_inodev_entry(ino, dev);
59154 + if (inodev != NULL)
59155 + do_handle_delete(inodev, ino, dev);
59156 + write_unlock(&gr_inode_lock);
59157 +
59158 + return;
59159 +}
59160 +
59161 +static void
59162 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
59163 + const ino_t newinode, const dev_t newdevice,
59164 + struct acl_subject_label *subj)
59165 +{
59166 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
59167 + struct acl_object_label *match;
59168 +
59169 + match = subj->obj_hash[index];
59170 +
59171 + while (match && (match->inode != oldinode ||
59172 + match->device != olddevice ||
59173 + !(match->mode & GR_DELETED)))
59174 + match = match->next;
59175 +
59176 + if (match && (match->inode == oldinode)
59177 + && (match->device == olddevice)
59178 + && (match->mode & GR_DELETED)) {
59179 + if (match->prev == NULL) {
59180 + subj->obj_hash[index] = match->next;
59181 + if (match->next != NULL)
59182 + match->next->prev = NULL;
59183 + } else {
59184 + match->prev->next = match->next;
59185 + if (match->next != NULL)
59186 + match->next->prev = match->prev;
59187 + }
59188 + match->prev = NULL;
59189 + match->next = NULL;
59190 + match->inode = newinode;
59191 + match->device = newdevice;
59192 + match->mode &= ~GR_DELETED;
59193 +
59194 + insert_acl_obj_label(match, subj);
59195 + }
59196 +
59197 + return;
59198 +}
59199 +
59200 +static void
59201 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
59202 + const ino_t newinode, const dev_t newdevice,
59203 + struct acl_role_label *role)
59204 +{
59205 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
59206 + struct acl_subject_label *match;
59207 +
59208 + match = role->subj_hash[index];
59209 +
59210 + while (match && (match->inode != oldinode ||
59211 + match->device != olddevice ||
59212 + !(match->mode & GR_DELETED)))
59213 + match = match->next;
59214 +
59215 + if (match && (match->inode == oldinode)
59216 + && (match->device == olddevice)
59217 + && (match->mode & GR_DELETED)) {
59218 + if (match->prev == NULL) {
59219 + role->subj_hash[index] = match->next;
59220 + if (match->next != NULL)
59221 + match->next->prev = NULL;
59222 + } else {
59223 + match->prev->next = match->next;
59224 + if (match->next != NULL)
59225 + match->next->prev = match->prev;
59226 + }
59227 + match->prev = NULL;
59228 + match->next = NULL;
59229 + match->inode = newinode;
59230 + match->device = newdevice;
59231 + match->mode &= ~GR_DELETED;
59232 +
59233 + insert_acl_subj_label(match, role);
59234 + }
59235 +
59236 + return;
59237 +}
59238 +
59239 +static void
59240 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
59241 + const ino_t newinode, const dev_t newdevice)
59242 +{
59243 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
59244 + struct inodev_entry *match;
59245 +
59246 + match = inodev_set.i_hash[index];
59247 +
59248 + while (match && (match->nentry->inode != oldinode ||
59249 + match->nentry->device != olddevice || !match->nentry->deleted))
59250 + match = match->next;
59251 +
59252 + if (match && (match->nentry->inode == oldinode)
59253 + && (match->nentry->device == olddevice) &&
59254 + match->nentry->deleted) {
59255 + if (match->prev == NULL) {
59256 + inodev_set.i_hash[index] = match->next;
59257 + if (match->next != NULL)
59258 + match->next->prev = NULL;
59259 + } else {
59260 + match->prev->next = match->next;
59261 + if (match->next != NULL)
59262 + match->next->prev = match->prev;
59263 + }
59264 + match->prev = NULL;
59265 + match->next = NULL;
59266 + match->nentry->inode = newinode;
59267 + match->nentry->device = newdevice;
59268 + match->nentry->deleted = 0;
59269 +
59270 + insert_inodev_entry(match);
59271 + }
59272 +
59273 + return;
59274 +}
59275 +
59276 +static void
59277 +__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
59278 +{
59279 + struct acl_subject_label *subj;
59280 + struct acl_role_label *role;
59281 + unsigned int x;
59282 +
59283 + FOR_EACH_ROLE_START(role)
59284 + update_acl_subj_label(matchn->inode, matchn->device,
59285 + inode, dev, role);
59286 +
59287 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
59288 + if ((subj->inode == inode) && (subj->device == dev)) {
59289 + subj->inode = inode;
59290 + subj->device = dev;
59291 + }
59292 + FOR_EACH_NESTED_SUBJECT_END(subj)
59293 + FOR_EACH_SUBJECT_START(role, subj, x)
59294 + update_acl_obj_label(matchn->inode, matchn->device,
59295 + inode, dev, subj);
59296 + FOR_EACH_SUBJECT_END(subj,x)
59297 + FOR_EACH_ROLE_END(role)
59298 +
59299 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
59300 +
59301 + return;
59302 +}
59303 +
59304 +static void
59305 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
59306 + const struct vfsmount *mnt)
59307 +{
59308 + ino_t ino = dentry->d_inode->i_ino;
59309 + dev_t dev = __get_dev(dentry);
59310 +
59311 + __do_handle_create(matchn, ino, dev);
59312 +
59313 + return;
59314 +}
59315 +
59316 +void
59317 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
59318 +{
59319 + struct name_entry *matchn;
59320 +
59321 + if (unlikely(!(gr_status & GR_READY)))
59322 + return;
59323 +
59324 + preempt_disable();
59325 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
59326 +
59327 + if (unlikely((unsigned long)matchn)) {
59328 + write_lock(&gr_inode_lock);
59329 + do_handle_create(matchn, dentry, mnt);
59330 + write_unlock(&gr_inode_lock);
59331 + }
59332 + preempt_enable();
59333 +
59334 + return;
59335 +}
59336 +
59337 +void
59338 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
59339 +{
59340 + struct name_entry *matchn;
59341 +
59342 + if (unlikely(!(gr_status & GR_READY)))
59343 + return;
59344 +
59345 + preempt_disable();
59346 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
59347 +
59348 + if (unlikely((unsigned long)matchn)) {
59349 + write_lock(&gr_inode_lock);
59350 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
59351 + write_unlock(&gr_inode_lock);
59352 + }
59353 + preempt_enable();
59354 +
59355 + return;
59356 +}
59357 +
59358 +void
59359 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59360 + struct dentry *old_dentry,
59361 + struct dentry *new_dentry,
59362 + struct vfsmount *mnt, const __u8 replace)
59363 +{
59364 + struct name_entry *matchn;
59365 + struct inodev_entry *inodev;
59366 + struct inode *inode = new_dentry->d_inode;
59367 + ino_t oldinode = old_dentry->d_inode->i_ino;
59368 + dev_t olddev = __get_dev(old_dentry);
59369 +
59370 + /* vfs_rename swaps the name and parent link for old_dentry and
59371 + new_dentry
59372 + at this point, old_dentry has the new name, parent link, and inode
59373 + for the renamed file
59374 + if a file is being replaced by a rename, new_dentry has the inode
59375 + and name for the replaced file
59376 + */
59377 +
59378 + if (unlikely(!(gr_status & GR_READY)))
59379 + return;
59380 +
59381 + preempt_disable();
59382 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
59383 +
59384 + /* we wouldn't have to check d_inode if it weren't for
59385 + NFS silly-renaming
59386 + */
59387 +
59388 + write_lock(&gr_inode_lock);
59389 + if (unlikely(replace && inode)) {
59390 + ino_t newinode = inode->i_ino;
59391 + dev_t newdev = __get_dev(new_dentry);
59392 + inodev = lookup_inodev_entry(newinode, newdev);
59393 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
59394 + do_handle_delete(inodev, newinode, newdev);
59395 + }
59396 +
59397 + inodev = lookup_inodev_entry(oldinode, olddev);
59398 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
59399 + do_handle_delete(inodev, oldinode, olddev);
59400 +
59401 + if (unlikely((unsigned long)matchn))
59402 + do_handle_create(matchn, old_dentry, mnt);
59403 +
59404 + write_unlock(&gr_inode_lock);
59405 + preempt_enable();
59406 +
59407 + return;
59408 +}
59409 +
59410 +static int
59411 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
59412 + unsigned char **sum)
59413 +{
59414 + struct acl_role_label *r;
59415 + struct role_allowed_ip *ipp;
59416 + struct role_transition *trans;
59417 + unsigned int i;
59418 + int found = 0;
59419 + u32 curr_ip = current->signal->curr_ip;
59420 +
59421 + current->signal->saved_ip = curr_ip;
59422 +
59423 + /* check transition table */
59424 +
59425 + for (trans = current->role->transitions; trans; trans = trans->next) {
59426 + if (!strcmp(rolename, trans->rolename)) {
59427 + found = 1;
59428 + break;
59429 + }
59430 + }
59431 +
59432 + if (!found)
59433 + return 0;
59434 +
59435 + /* handle special roles that do not require authentication
59436 + and check ip */
59437 +
59438 + FOR_EACH_ROLE_START(r)
59439 + if (!strcmp(rolename, r->rolename) &&
59440 + (r->roletype & GR_ROLE_SPECIAL)) {
59441 + found = 0;
59442 + if (r->allowed_ips != NULL) {
59443 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
59444 + if ((ntohl(curr_ip) & ipp->netmask) ==
59445 + (ntohl(ipp->addr) & ipp->netmask))
59446 + found = 1;
59447 + }
59448 + } else
59449 + found = 2;
59450 + if (!found)
59451 + return 0;
59452 +
59453 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
59454 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
59455 + *salt = NULL;
59456 + *sum = NULL;
59457 + return 1;
59458 + }
59459 + }
59460 + FOR_EACH_ROLE_END(r)
59461 +
59462 + for (i = 0; i < num_sprole_pws; i++) {
59463 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
59464 + *salt = acl_special_roles[i]->salt;
59465 + *sum = acl_special_roles[i]->sum;
59466 + return 1;
59467 + }
59468 + }
59469 +
59470 + return 0;
59471 +}
59472 +
59473 +static void
59474 +assign_special_role(char *rolename)
59475 +{
59476 + struct acl_object_label *obj;
59477 + struct acl_role_label *r;
59478 + struct acl_role_label *assigned = NULL;
59479 + struct task_struct *tsk;
59480 + struct file *filp;
59481 +
59482 + FOR_EACH_ROLE_START(r)
59483 + if (!strcmp(rolename, r->rolename) &&
59484 + (r->roletype & GR_ROLE_SPECIAL)) {
59485 + assigned = r;
59486 + break;
59487 + }
59488 + FOR_EACH_ROLE_END(r)
59489 +
59490 + if (!assigned)
59491 + return;
59492 +
59493 + read_lock(&tasklist_lock);
59494 + read_lock(&grsec_exec_file_lock);
59495 +
59496 + tsk = current->real_parent;
59497 + if (tsk == NULL)
59498 + goto out_unlock;
59499 +
59500 + filp = tsk->exec_file;
59501 + if (filp == NULL)
59502 + goto out_unlock;
59503 +
59504 + tsk->is_writable = 0;
59505 +
59506 + tsk->acl_sp_role = 1;
59507 + tsk->acl_role_id = ++acl_sp_role_value;
59508 + tsk->role = assigned;
59509 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
59510 +
59511 + /* ignore additional mmap checks for processes that are writable
59512 + by the default ACL */
59513 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59514 + if (unlikely(obj->mode & GR_WRITE))
59515 + tsk->is_writable = 1;
59516 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
59517 + if (unlikely(obj->mode & GR_WRITE))
59518 + tsk->is_writable = 1;
59519 +
59520 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59521 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
59522 +#endif
59523 +
59524 +out_unlock:
59525 + read_unlock(&grsec_exec_file_lock);
59526 + read_unlock(&tasklist_lock);
59527 + return;
59528 +}
59529 +
59530 +int gr_check_secure_terminal(struct task_struct *task)
59531 +{
59532 + struct task_struct *p, *p2, *p3;
59533 + struct files_struct *files;
59534 + struct fdtable *fdt;
59535 + struct file *our_file = NULL, *file;
59536 + int i;
59537 +
59538 + if (task->signal->tty == NULL)
59539 + return 1;
59540 +
59541 + files = get_files_struct(task);
59542 + if (files != NULL) {
59543 + rcu_read_lock();
59544 + fdt = files_fdtable(files);
59545 + for (i=0; i < fdt->max_fds; i++) {
59546 + file = fcheck_files(files, i);
59547 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
59548 + get_file(file);
59549 + our_file = file;
59550 + }
59551 + }
59552 + rcu_read_unlock();
59553 + put_files_struct(files);
59554 + }
59555 +
59556 + if (our_file == NULL)
59557 + return 1;
59558 +
59559 + read_lock(&tasklist_lock);
59560 + do_each_thread(p2, p) {
59561 + files = get_files_struct(p);
59562 + if (files == NULL ||
59563 + (p->signal && p->signal->tty == task->signal->tty)) {
59564 + if (files != NULL)
59565 + put_files_struct(files);
59566 + continue;
59567 + }
59568 + rcu_read_lock();
59569 + fdt = files_fdtable(files);
59570 + for (i=0; i < fdt->max_fds; i++) {
59571 + file = fcheck_files(files, i);
59572 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
59573 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
59574 + p3 = task;
59575 + while (p3->pid > 0) {
59576 + if (p3 == p)
59577 + break;
59578 + p3 = p3->real_parent;
59579 + }
59580 + if (p3 == p)
59581 + break;
59582 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
59583 + gr_handle_alertkill(p);
59584 + rcu_read_unlock();
59585 + put_files_struct(files);
59586 + read_unlock(&tasklist_lock);
59587 + fput(our_file);
59588 + return 0;
59589 + }
59590 + }
59591 + rcu_read_unlock();
59592 + put_files_struct(files);
59593 + } while_each_thread(p2, p);
59594 + read_unlock(&tasklist_lock);
59595 +
59596 + fput(our_file);
59597 + return 1;
59598 +}
59599 +
59600 +ssize_t
59601 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
59602 +{
59603 + struct gr_arg_wrapper uwrap;
59604 + unsigned char *sprole_salt = NULL;
59605 + unsigned char *sprole_sum = NULL;
59606 + int error = sizeof (struct gr_arg_wrapper);
59607 + int error2 = 0;
59608 +
59609 + mutex_lock(&gr_dev_mutex);
59610 +
59611 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
59612 + error = -EPERM;
59613 + goto out;
59614 + }
59615 +
59616 + if (count != sizeof (struct gr_arg_wrapper)) {
59617 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
59618 + error = -EINVAL;
59619 + goto out;
59620 + }
59621 +
59622 +
59623 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
59624 + gr_auth_expires = 0;
59625 + gr_auth_attempts = 0;
59626 + }
59627 +
59628 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
59629 + error = -EFAULT;
59630 + goto out;
59631 + }
59632 +
59633 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
59634 + error = -EINVAL;
59635 + goto out;
59636 + }
59637 +
59638 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
59639 + error = -EFAULT;
59640 + goto out;
59641 + }
59642 +
59643 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59644 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59645 + time_after(gr_auth_expires, get_seconds())) {
59646 + error = -EBUSY;
59647 + goto out;
59648 + }
59649 +
59650 + /* if non-root trying to do anything other than use a special role,
59651 + do not attempt authentication, do not count towards authentication
59652 + locking
59653 + */
59654 +
59655 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
59656 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59657 + current_uid()) {
59658 + error = -EPERM;
59659 + goto out;
59660 + }
59661 +
59662 + /* ensure pw and special role name are null terminated */
59663 +
59664 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
59665 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
59666 +
59667 + /* Okay.
59668 + * We have our enough of the argument structure..(we have yet
59669 + * to copy_from_user the tables themselves) . Copy the tables
59670 + * only if we need them, i.e. for loading operations. */
59671 +
59672 + switch (gr_usermode->mode) {
59673 + case GR_STATUS:
59674 + if (gr_status & GR_READY) {
59675 + error = 1;
59676 + if (!gr_check_secure_terminal(current))
59677 + error = 3;
59678 + } else
59679 + error = 2;
59680 + goto out;
59681 + case GR_SHUTDOWN:
59682 + if ((gr_status & GR_READY)
59683 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59684 + pax_open_kernel();
59685 + gr_status &= ~GR_READY;
59686 + pax_close_kernel();
59687 +
59688 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
59689 + free_variables();
59690 + memset(gr_usermode, 0, sizeof (struct gr_arg));
59691 + memset(gr_system_salt, 0, GR_SALT_LEN);
59692 + memset(gr_system_sum, 0, GR_SHA_LEN);
59693 + } else if (gr_status & GR_READY) {
59694 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
59695 + error = -EPERM;
59696 + } else {
59697 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
59698 + error = -EAGAIN;
59699 + }
59700 + break;
59701 + case GR_ENABLE:
59702 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
59703 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
59704 + else {
59705 + if (gr_status & GR_READY)
59706 + error = -EAGAIN;
59707 + else
59708 + error = error2;
59709 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
59710 + }
59711 + break;
59712 + case GR_RELOAD:
59713 + if (!(gr_status & GR_READY)) {
59714 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
59715 + error = -EAGAIN;
59716 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59717 + lock_kernel();
59718 +
59719 + pax_open_kernel();
59720 + gr_status &= ~GR_READY;
59721 + pax_close_kernel();
59722 +
59723 + free_variables();
59724 + if (!(error2 = gracl_init(gr_usermode))) {
59725 + unlock_kernel();
59726 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
59727 + } else {
59728 + unlock_kernel();
59729 + error = error2;
59730 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59731 + }
59732 + } else {
59733 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59734 + error = -EPERM;
59735 + }
59736 + break;
59737 + case GR_SEGVMOD:
59738 + if (unlikely(!(gr_status & GR_READY))) {
59739 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
59740 + error = -EAGAIN;
59741 + break;
59742 + }
59743 +
59744 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59745 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
59746 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
59747 + struct acl_subject_label *segvacl;
59748 + segvacl =
59749 + lookup_acl_subj_label(gr_usermode->segv_inode,
59750 + gr_usermode->segv_device,
59751 + current->role);
59752 + if (segvacl) {
59753 + segvacl->crashes = 0;
59754 + segvacl->expires = 0;
59755 + }
59756 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
59757 + gr_remove_uid(gr_usermode->segv_uid);
59758 + }
59759 + } else {
59760 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
59761 + error = -EPERM;
59762 + }
59763 + break;
59764 + case GR_SPROLE:
59765 + case GR_SPROLEPAM:
59766 + if (unlikely(!(gr_status & GR_READY))) {
59767 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
59768 + error = -EAGAIN;
59769 + break;
59770 + }
59771 +
59772 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
59773 + current->role->expires = 0;
59774 + current->role->auth_attempts = 0;
59775 + }
59776 +
59777 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59778 + time_after(current->role->expires, get_seconds())) {
59779 + error = -EBUSY;
59780 + goto out;
59781 + }
59782 +
59783 + if (lookup_special_role_auth
59784 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
59785 + && ((!sprole_salt && !sprole_sum)
59786 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
59787 + char *p = "";
59788 + assign_special_role(gr_usermode->sp_role);
59789 + read_lock(&tasklist_lock);
59790 + if (current->real_parent)
59791 + p = current->real_parent->role->rolename;
59792 + read_unlock(&tasklist_lock);
59793 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
59794 + p, acl_sp_role_value);
59795 + } else {
59796 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
59797 + error = -EPERM;
59798 + if(!(current->role->auth_attempts++))
59799 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59800 +
59801 + goto out;
59802 + }
59803 + break;
59804 + case GR_UNSPROLE:
59805 + if (unlikely(!(gr_status & GR_READY))) {
59806 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
59807 + error = -EAGAIN;
59808 + break;
59809 + }
59810 +
59811 + if (current->role->roletype & GR_ROLE_SPECIAL) {
59812 + char *p = "";
59813 + int i = 0;
59814 +
59815 + read_lock(&tasklist_lock);
59816 + if (current->real_parent) {
59817 + p = current->real_parent->role->rolename;
59818 + i = current->real_parent->acl_role_id;
59819 + }
59820 + read_unlock(&tasklist_lock);
59821 +
59822 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
59823 + gr_set_acls(1);
59824 + } else {
59825 + error = -EPERM;
59826 + goto out;
59827 + }
59828 + break;
59829 + default:
59830 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
59831 + error = -EINVAL;
59832 + break;
59833 + }
59834 +
59835 + if (error != -EPERM)
59836 + goto out;
59837 +
59838 + if(!(gr_auth_attempts++))
59839 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59840 +
59841 + out:
59842 + mutex_unlock(&gr_dev_mutex);
59843 + return error;
59844 +}
59845 +
59846 +/* must be called with
59847 + rcu_read_lock();
59848 + read_lock(&tasklist_lock);
59849 + read_lock(&grsec_exec_file_lock);
59850 +*/
59851 +int gr_apply_subject_to_task(struct task_struct *task)
59852 +{
59853 + struct acl_object_label *obj;
59854 + char *tmpname;
59855 + struct acl_subject_label *tmpsubj;
59856 + struct file *filp;
59857 + struct name_entry *nmatch;
59858 +
59859 + filp = task->exec_file;
59860 + if (filp == NULL)
59861 + return 0;
59862 +
59863 + /* the following is to apply the correct subject
59864 + on binaries running when the RBAC system
59865 + is enabled, when the binaries have been
59866 + replaced or deleted since their execution
59867 + -----
59868 + when the RBAC system starts, the inode/dev
59869 + from exec_file will be one the RBAC system
59870 + is unaware of. It only knows the inode/dev
59871 + of the present file on disk, or the absence
59872 + of it.
59873 + */
59874 + preempt_disable();
59875 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
59876 +
59877 + nmatch = lookup_name_entry(tmpname);
59878 + preempt_enable();
59879 + tmpsubj = NULL;
59880 + if (nmatch) {
59881 + if (nmatch->deleted)
59882 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
59883 + else
59884 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
59885 + if (tmpsubj != NULL)
59886 + task->acl = tmpsubj;
59887 + }
59888 + if (tmpsubj == NULL)
59889 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
59890 + task->role);
59891 + if (task->acl) {
59892 + task->is_writable = 0;
59893 + /* ignore additional mmap checks for processes that are writable
59894 + by the default ACL */
59895 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59896 + if (unlikely(obj->mode & GR_WRITE))
59897 + task->is_writable = 1;
59898 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
59899 + if (unlikely(obj->mode & GR_WRITE))
59900 + task->is_writable = 1;
59901 +
59902 + gr_set_proc_res(task);
59903 +
59904 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59905 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
59906 +#endif
59907 + } else {
59908 + return 1;
59909 + }
59910 +
59911 + return 0;
59912 +}
59913 +
59914 +int
59915 +gr_set_acls(const int type)
59916 +{
59917 + struct task_struct *task, *task2;
59918 + struct acl_role_label *role = current->role;
59919 + __u16 acl_role_id = current->acl_role_id;
59920 + const struct cred *cred;
59921 + int ret;
59922 +
59923 + rcu_read_lock();
59924 + read_lock(&tasklist_lock);
59925 + read_lock(&grsec_exec_file_lock);
59926 + do_each_thread(task2, task) {
59927 + /* check to see if we're called from the exit handler,
59928 + if so, only replace ACLs that have inherited the admin
59929 + ACL */
59930 +
59931 + if (type && (task->role != role ||
59932 + task->acl_role_id != acl_role_id))
59933 + continue;
59934 +
59935 + task->acl_role_id = 0;
59936 + task->acl_sp_role = 0;
59937 +
59938 + if (task->exec_file) {
59939 + cred = __task_cred(task);
59940 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
59941 +
59942 + ret = gr_apply_subject_to_task(task);
59943 + if (ret) {
59944 + read_unlock(&grsec_exec_file_lock);
59945 + read_unlock(&tasklist_lock);
59946 + rcu_read_unlock();
59947 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
59948 + return ret;
59949 + }
59950 + } else {
59951 + // it's a kernel process
59952 + task->role = kernel_role;
59953 + task->acl = kernel_role->root_label;
59954 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
59955 + task->acl->mode &= ~GR_PROCFIND;
59956 +#endif
59957 + }
59958 + } while_each_thread(task2, task);
59959 + read_unlock(&grsec_exec_file_lock);
59960 + read_unlock(&tasklist_lock);
59961 + rcu_read_unlock();
59962 +
59963 + return 0;
59964 +}
59965 +
59966 +void
59967 +gr_learn_resource(const struct task_struct *task,
59968 + const int res, const unsigned long wanted, const int gt)
59969 +{
59970 + struct acl_subject_label *acl;
59971 + const struct cred *cred;
59972 +
59973 + if (unlikely((gr_status & GR_READY) &&
59974 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
59975 + goto skip_reslog;
59976 +
59977 +#ifdef CONFIG_GRKERNSEC_RESLOG
59978 + gr_log_resource(task, res, wanted, gt);
59979 +#endif
59980 + skip_reslog:
59981 +
59982 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
59983 + return;
59984 +
59985 + acl = task->acl;
59986 +
59987 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
59988 + !(acl->resmask & (1 << (unsigned short) res))))
59989 + return;
59990 +
59991 + if (wanted >= acl->res[res].rlim_cur) {
59992 + unsigned long res_add;
59993 +
59994 + res_add = wanted;
59995 + switch (res) {
59996 + case RLIMIT_CPU:
59997 + res_add += GR_RLIM_CPU_BUMP;
59998 + break;
59999 + case RLIMIT_FSIZE:
60000 + res_add += GR_RLIM_FSIZE_BUMP;
60001 + break;
60002 + case RLIMIT_DATA:
60003 + res_add += GR_RLIM_DATA_BUMP;
60004 + break;
60005 + case RLIMIT_STACK:
60006 + res_add += GR_RLIM_STACK_BUMP;
60007 + break;
60008 + case RLIMIT_CORE:
60009 + res_add += GR_RLIM_CORE_BUMP;
60010 + break;
60011 + case RLIMIT_RSS:
60012 + res_add += GR_RLIM_RSS_BUMP;
60013 + break;
60014 + case RLIMIT_NPROC:
60015 + res_add += GR_RLIM_NPROC_BUMP;
60016 + break;
60017 + case RLIMIT_NOFILE:
60018 + res_add += GR_RLIM_NOFILE_BUMP;
60019 + break;
60020 + case RLIMIT_MEMLOCK:
60021 + res_add += GR_RLIM_MEMLOCK_BUMP;
60022 + break;
60023 + case RLIMIT_AS:
60024 + res_add += GR_RLIM_AS_BUMP;
60025 + break;
60026 + case RLIMIT_LOCKS:
60027 + res_add += GR_RLIM_LOCKS_BUMP;
60028 + break;
60029 + case RLIMIT_SIGPENDING:
60030 + res_add += GR_RLIM_SIGPENDING_BUMP;
60031 + break;
60032 + case RLIMIT_MSGQUEUE:
60033 + res_add += GR_RLIM_MSGQUEUE_BUMP;
60034 + break;
60035 + case RLIMIT_NICE:
60036 + res_add += GR_RLIM_NICE_BUMP;
60037 + break;
60038 + case RLIMIT_RTPRIO:
60039 + res_add += GR_RLIM_RTPRIO_BUMP;
60040 + break;
60041 + case RLIMIT_RTTIME:
60042 + res_add += GR_RLIM_RTTIME_BUMP;
60043 + break;
60044 + }
60045 +
60046 + acl->res[res].rlim_cur = res_add;
60047 +
60048 + if (wanted > acl->res[res].rlim_max)
60049 + acl->res[res].rlim_max = res_add;
60050 +
60051 + /* only log the subject filename, since resource logging is supported for
60052 + single-subject learning only */
60053 + rcu_read_lock();
60054 + cred = __task_cred(task);
60055 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60056 + task->role->roletype, cred->uid, cred->gid, acl->filename,
60057 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
60058 + "", (unsigned long) res, &task->signal->saved_ip);
60059 + rcu_read_unlock();
60060 + }
60061 +
60062 + return;
60063 +}
60064 +
60065 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
60066 +void
60067 +pax_set_initial_flags(struct linux_binprm *bprm)
60068 +{
60069 + struct task_struct *task = current;
60070 + struct acl_subject_label *proc;
60071 + unsigned long flags;
60072 +
60073 + if (unlikely(!(gr_status & GR_READY)))
60074 + return;
60075 +
60076 + flags = pax_get_flags(task);
60077 +
60078 + proc = task->acl;
60079 +
60080 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
60081 + flags &= ~MF_PAX_PAGEEXEC;
60082 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
60083 + flags &= ~MF_PAX_SEGMEXEC;
60084 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
60085 + flags &= ~MF_PAX_RANDMMAP;
60086 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
60087 + flags &= ~MF_PAX_EMUTRAMP;
60088 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
60089 + flags &= ~MF_PAX_MPROTECT;
60090 +
60091 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
60092 + flags |= MF_PAX_PAGEEXEC;
60093 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
60094 + flags |= MF_PAX_SEGMEXEC;
60095 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
60096 + flags |= MF_PAX_RANDMMAP;
60097 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
60098 + flags |= MF_PAX_EMUTRAMP;
60099 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
60100 + flags |= MF_PAX_MPROTECT;
60101 +
60102 + pax_set_flags(task, flags);
60103 +
60104 + return;
60105 +}
60106 +#endif
60107 +
60108 +#ifdef CONFIG_SYSCTL
60109 +/* Eric Biederman likes breaking userland ABI and every inode-based security
60110 + system to save 35kb of memory */
60111 +
60112 +/* we modify the passed in filename, but adjust it back before returning */
60113 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
60114 +{
60115 + struct name_entry *nmatch;
60116 + char *p, *lastp = NULL;
60117 + struct acl_object_label *obj = NULL, *tmp;
60118 + struct acl_subject_label *tmpsubj;
60119 + char c = '\0';
60120 +
60121 + read_lock(&gr_inode_lock);
60122 +
60123 + p = name + len - 1;
60124 + do {
60125 + nmatch = lookup_name_entry(name);
60126 + if (lastp != NULL)
60127 + *lastp = c;
60128 +
60129 + if (nmatch == NULL)
60130 + goto next_component;
60131 + tmpsubj = current->acl;
60132 + do {
60133 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
60134 + if (obj != NULL) {
60135 + tmp = obj->globbed;
60136 + while (tmp) {
60137 + if (!glob_match(tmp->filename, name)) {
60138 + obj = tmp;
60139 + goto found_obj;
60140 + }
60141 + tmp = tmp->next;
60142 + }
60143 + goto found_obj;
60144 + }
60145 + } while ((tmpsubj = tmpsubj->parent_subject));
60146 +next_component:
60147 + /* end case */
60148 + if (p == name)
60149 + break;
60150 +
60151 + while (*p != '/')
60152 + p--;
60153 + if (p == name)
60154 + lastp = p + 1;
60155 + else {
60156 + lastp = p;
60157 + p--;
60158 + }
60159 + c = *lastp;
60160 + *lastp = '\0';
60161 + } while (1);
60162 +found_obj:
60163 + read_unlock(&gr_inode_lock);
60164 + /* obj returned will always be non-null */
60165 + return obj;
60166 +}
60167 +
60168 +/* returns 0 when allowing, non-zero on error
60169 + op of 0 is used for readdir, so we don't log the names of hidden files
60170 +*/
60171 +__u32
60172 +gr_handle_sysctl(const struct ctl_table *table, const int op)
60173 +{
60174 + ctl_table *tmp;
60175 + const char *proc_sys = "/proc/sys";
60176 + char *path;
60177 + struct acl_object_label *obj;
60178 + unsigned short len = 0, pos = 0, depth = 0, i;
60179 + __u32 err = 0;
60180 + __u32 mode = 0;
60181 +
60182 + if (unlikely(!(gr_status & GR_READY)))
60183 + return 0;
60184 +
60185 + /* for now, ignore operations on non-sysctl entries if it's not a
60186 + readdir*/
60187 + if (table->child != NULL && op != 0)
60188 + return 0;
60189 +
60190 + mode |= GR_FIND;
60191 + /* it's only a read if it's an entry, read on dirs is for readdir */
60192 + if (op & MAY_READ)
60193 + mode |= GR_READ;
60194 + if (op & MAY_WRITE)
60195 + mode |= GR_WRITE;
60196 +
60197 + preempt_disable();
60198 +
60199 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
60200 +
60201 + /* it's only a read/write if it's an actual entry, not a dir
60202 + (which are opened for readdir)
60203 + */
60204 +
60205 + /* convert the requested sysctl entry into a pathname */
60206 +
60207 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
60208 + len += strlen(tmp->procname);
60209 + len++;
60210 + depth++;
60211 + }
60212 +
60213 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
60214 + /* deny */
60215 + goto out;
60216 + }
60217 +
60218 + memset(path, 0, PAGE_SIZE);
60219 +
60220 + memcpy(path, proc_sys, strlen(proc_sys));
60221 +
60222 + pos += strlen(proc_sys);
60223 +
60224 + for (; depth > 0; depth--) {
60225 + path[pos] = '/';
60226 + pos++;
60227 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
60228 + if (depth == i) {
60229 + memcpy(path + pos, tmp->procname,
60230 + strlen(tmp->procname));
60231 + pos += strlen(tmp->procname);
60232 + }
60233 + i++;
60234 + }
60235 + }
60236 +
60237 + obj = gr_lookup_by_name(path, pos);
60238 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
60239 +
60240 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
60241 + ((err & mode) != mode))) {
60242 + __u32 new_mode = mode;
60243 +
60244 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
60245 +
60246 + err = 0;
60247 + gr_log_learn_sysctl(path, new_mode);
60248 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
60249 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
60250 + err = -ENOENT;
60251 + } else if (!(err & GR_FIND)) {
60252 + err = -ENOENT;
60253 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
60254 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
60255 + path, (mode & GR_READ) ? " reading" : "",
60256 + (mode & GR_WRITE) ? " writing" : "");
60257 + err = -EACCES;
60258 + } else if ((err & mode) != mode) {
60259 + err = -EACCES;
60260 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
60261 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
60262 + path, (mode & GR_READ) ? " reading" : "",
60263 + (mode & GR_WRITE) ? " writing" : "");
60264 + err = 0;
60265 + } else
60266 + err = 0;
60267 +
60268 + out:
60269 + preempt_enable();
60270 +
60271 + return err;
60272 +}
60273 +#endif
60274 +
60275 +int
60276 +gr_handle_proc_ptrace(struct task_struct *task)
60277 +{
60278 + struct file *filp;
60279 + struct task_struct *tmp = task;
60280 + struct task_struct *curtemp = current;
60281 + __u32 retmode;
60282 +
60283 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60284 + if (unlikely(!(gr_status & GR_READY)))
60285 + return 0;
60286 +#endif
60287 +
60288 + read_lock(&tasklist_lock);
60289 + read_lock(&grsec_exec_file_lock);
60290 + filp = task->exec_file;
60291 +
60292 + while (tmp->pid > 0) {
60293 + if (tmp == curtemp)
60294 + break;
60295 + tmp = tmp->real_parent;
60296 + }
60297 +
60298 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
60299 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
60300 + read_unlock(&grsec_exec_file_lock);
60301 + read_unlock(&tasklist_lock);
60302 + return 1;
60303 + }
60304 +
60305 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60306 + if (!(gr_status & GR_READY)) {
60307 + read_unlock(&grsec_exec_file_lock);
60308 + read_unlock(&tasklist_lock);
60309 + return 0;
60310 + }
60311 +#endif
60312 +
60313 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
60314 + read_unlock(&grsec_exec_file_lock);
60315 + read_unlock(&tasklist_lock);
60316 +
60317 + if (retmode & GR_NOPTRACE)
60318 + return 1;
60319 +
60320 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
60321 + && (current->acl != task->acl || (current->acl != current->role->root_label
60322 + && current->pid != task->pid)))
60323 + return 1;
60324 +
60325 + return 0;
60326 +}
60327 +
60328 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
60329 +{
60330 + if (unlikely(!(gr_status & GR_READY)))
60331 + return;
60332 +
60333 + if (!(current->role->roletype & GR_ROLE_GOD))
60334 + return;
60335 +
60336 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
60337 + p->role->rolename, gr_task_roletype_to_char(p),
60338 + p->acl->filename);
60339 +}
60340 +
60341 +int
60342 +gr_handle_ptrace(struct task_struct *task, const long request)
60343 +{
60344 + struct task_struct *tmp = task;
60345 + struct task_struct *curtemp = current;
60346 + __u32 retmode;
60347 +
60348 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60349 + if (unlikely(!(gr_status & GR_READY)))
60350 + return 0;
60351 +#endif
60352 +
60353 + read_lock(&tasklist_lock);
60354 + while (tmp->pid > 0) {
60355 + if (tmp == curtemp)
60356 + break;
60357 + tmp = tmp->real_parent;
60358 + }
60359 +
60360 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
60361 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
60362 + read_unlock(&tasklist_lock);
60363 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60364 + return 1;
60365 + }
60366 + read_unlock(&tasklist_lock);
60367 +
60368 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60369 + if (!(gr_status & GR_READY))
60370 + return 0;
60371 +#endif
60372 +
60373 + read_lock(&grsec_exec_file_lock);
60374 + if (unlikely(!task->exec_file)) {
60375 + read_unlock(&grsec_exec_file_lock);
60376 + return 0;
60377 + }
60378 +
60379 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
60380 + read_unlock(&grsec_exec_file_lock);
60381 +
60382 + if (retmode & GR_NOPTRACE) {
60383 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60384 + return 1;
60385 + }
60386 +
60387 + if (retmode & GR_PTRACERD) {
60388 + switch (request) {
60389 + case PTRACE_POKETEXT:
60390 + case PTRACE_POKEDATA:
60391 + case PTRACE_POKEUSR:
60392 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
60393 + case PTRACE_SETREGS:
60394 + case PTRACE_SETFPREGS:
60395 +#endif
60396 +#ifdef CONFIG_X86
60397 + case PTRACE_SETFPXREGS:
60398 +#endif
60399 +#ifdef CONFIG_ALTIVEC
60400 + case PTRACE_SETVRREGS:
60401 +#endif
60402 + return 1;
60403 + default:
60404 + return 0;
60405 + }
60406 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
60407 + !(current->role->roletype & GR_ROLE_GOD) &&
60408 + (current->acl != task->acl)) {
60409 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60410 + return 1;
60411 + }
60412 +
60413 + return 0;
60414 +}
60415 +
60416 +static int is_writable_mmap(const struct file *filp)
60417 +{
60418 + struct task_struct *task = current;
60419 + struct acl_object_label *obj, *obj2;
60420 +
60421 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
60422 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
60423 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60424 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
60425 + task->role->root_label);
60426 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
60427 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
60428 + return 1;
60429 + }
60430 + }
60431 + return 0;
60432 +}
60433 +
60434 +int
60435 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
60436 +{
60437 + __u32 mode;
60438 +
60439 + if (unlikely(!file || !(prot & PROT_EXEC)))
60440 + return 1;
60441 +
60442 + if (is_writable_mmap(file))
60443 + return 0;
60444 +
60445 + mode =
60446 + gr_search_file(file->f_path.dentry,
60447 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60448 + file->f_path.mnt);
60449 +
60450 + if (!gr_tpe_allow(file))
60451 + return 0;
60452 +
60453 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60454 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60455 + return 0;
60456 + } else if (unlikely(!(mode & GR_EXEC))) {
60457 + return 0;
60458 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60459 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60460 + return 1;
60461 + }
60462 +
60463 + return 1;
60464 +}
60465 +
60466 +int
60467 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
60468 +{
60469 + __u32 mode;
60470 +
60471 + if (unlikely(!file || !(prot & PROT_EXEC)))
60472 + return 1;
60473 +
60474 + if (is_writable_mmap(file))
60475 + return 0;
60476 +
60477 + mode =
60478 + gr_search_file(file->f_path.dentry,
60479 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60480 + file->f_path.mnt);
60481 +
60482 + if (!gr_tpe_allow(file))
60483 + return 0;
60484 +
60485 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60486 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60487 + return 0;
60488 + } else if (unlikely(!(mode & GR_EXEC))) {
60489 + return 0;
60490 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60491 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60492 + return 1;
60493 + }
60494 +
60495 + return 1;
60496 +}
60497 +
60498 +void
60499 +gr_acl_handle_psacct(struct task_struct *task, const long code)
60500 +{
60501 + unsigned long runtime;
60502 + unsigned long cputime;
60503 + unsigned int wday, cday;
60504 + __u8 whr, chr;
60505 + __u8 wmin, cmin;
60506 + __u8 wsec, csec;
60507 + struct timespec timeval;
60508 +
60509 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
60510 + !(task->acl->mode & GR_PROCACCT)))
60511 + return;
60512 +
60513 + do_posix_clock_monotonic_gettime(&timeval);
60514 + runtime = timeval.tv_sec - task->start_time.tv_sec;
60515 + wday = runtime / (3600 * 24);
60516 + runtime -= wday * (3600 * 24);
60517 + whr = runtime / 3600;
60518 + runtime -= whr * 3600;
60519 + wmin = runtime / 60;
60520 + runtime -= wmin * 60;
60521 + wsec = runtime;
60522 +
60523 + cputime = (task->utime + task->stime) / HZ;
60524 + cday = cputime / (3600 * 24);
60525 + cputime -= cday * (3600 * 24);
60526 + chr = cputime / 3600;
60527 + cputime -= chr * 3600;
60528 + cmin = cputime / 60;
60529 + cputime -= cmin * 60;
60530 + csec = cputime;
60531 +
60532 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
60533 +
60534 + return;
60535 +}
60536 +
60537 +void gr_set_kernel_label(struct task_struct *task)
60538 +{
60539 + if (gr_status & GR_READY) {
60540 + task->role = kernel_role;
60541 + task->acl = kernel_role->root_label;
60542 + }
60543 + return;
60544 +}
60545 +
60546 +#ifdef CONFIG_TASKSTATS
60547 +int gr_is_taskstats_denied(int pid)
60548 +{
60549 + struct task_struct *task;
60550 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60551 + const struct cred *cred;
60552 +#endif
60553 + int ret = 0;
60554 +
60555 + /* restrict taskstats viewing to un-chrooted root users
60556 + who have the 'view' subject flag if the RBAC system is enabled
60557 + */
60558 +
60559 + rcu_read_lock();
60560 + read_lock(&tasklist_lock);
60561 + task = find_task_by_vpid(pid);
60562 + if (task) {
60563 +#ifdef CONFIG_GRKERNSEC_CHROOT
60564 + if (proc_is_chrooted(task))
60565 + ret = -EACCES;
60566 +#endif
60567 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60568 + cred = __task_cred(task);
60569 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60570 + if (cred->uid != 0)
60571 + ret = -EACCES;
60572 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60573 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
60574 + ret = -EACCES;
60575 +#endif
60576 +#endif
60577 + if (gr_status & GR_READY) {
60578 + if (!(task->acl->mode & GR_VIEW))
60579 + ret = -EACCES;
60580 + }
60581 + } else
60582 + ret = -ENOENT;
60583 +
60584 + read_unlock(&tasklist_lock);
60585 + rcu_read_unlock();
60586 +
60587 + return ret;
60588 +}
60589 +#endif
60590 +
60591 +/* AUXV entries are filled via a descendant of search_binary_handler
60592 + after we've already applied the subject for the target
60593 +*/
60594 +int gr_acl_enable_at_secure(void)
60595 +{
60596 + if (unlikely(!(gr_status & GR_READY)))
60597 + return 0;
60598 +
60599 + if (current->acl->mode & GR_ATSECURE)
60600 + return 1;
60601 +
60602 + return 0;
60603 +}
60604 +
60605 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
60606 +{
60607 + struct task_struct *task = current;
60608 + struct dentry *dentry = file->f_path.dentry;
60609 + struct vfsmount *mnt = file->f_path.mnt;
60610 + struct acl_object_label *obj, *tmp;
60611 + struct acl_subject_label *subj;
60612 + unsigned int bufsize;
60613 + int is_not_root;
60614 + char *path;
60615 + dev_t dev = __get_dev(dentry);
60616 +
60617 + if (unlikely(!(gr_status & GR_READY)))
60618 + return 1;
60619 +
60620 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60621 + return 1;
60622 +
60623 + /* ignore Eric Biederman */
60624 + if (IS_PRIVATE(dentry->d_inode))
60625 + return 1;
60626 +
60627 + subj = task->acl;
60628 + do {
60629 + obj = lookup_acl_obj_label(ino, dev, subj);
60630 + if (obj != NULL)
60631 + return (obj->mode & GR_FIND) ? 1 : 0;
60632 + } while ((subj = subj->parent_subject));
60633 +
60634 + /* this is purely an optimization since we're looking for an object
60635 + for the directory we're doing a readdir on
60636 + if it's possible for any globbed object to match the entry we're
60637 + filling into the directory, then the object we find here will be
60638 + an anchor point with attached globbed objects
60639 + */
60640 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
60641 + if (obj->globbed == NULL)
60642 + return (obj->mode & GR_FIND) ? 1 : 0;
60643 +
60644 + is_not_root = ((obj->filename[0] == '/') &&
60645 + (obj->filename[1] == '\0')) ? 0 : 1;
60646 + bufsize = PAGE_SIZE - namelen - is_not_root;
60647 +
60648 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
60649 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
60650 + return 1;
60651 +
60652 + preempt_disable();
60653 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
60654 + bufsize);
60655 +
60656 + bufsize = strlen(path);
60657 +
60658 + /* if base is "/", don't append an additional slash */
60659 + if (is_not_root)
60660 + *(path + bufsize) = '/';
60661 + memcpy(path + bufsize + is_not_root, name, namelen);
60662 + *(path + bufsize + namelen + is_not_root) = '\0';
60663 +
60664 + tmp = obj->globbed;
60665 + while (tmp) {
60666 + if (!glob_match(tmp->filename, path)) {
60667 + preempt_enable();
60668 + return (tmp->mode & GR_FIND) ? 1 : 0;
60669 + }
60670 + tmp = tmp->next;
60671 + }
60672 + preempt_enable();
60673 + return (obj->mode & GR_FIND) ? 1 : 0;
60674 +}
60675 +
60676 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
60677 +EXPORT_SYMBOL(gr_acl_is_enabled);
60678 +#endif
60679 +EXPORT_SYMBOL(gr_learn_resource);
60680 +EXPORT_SYMBOL(gr_set_kernel_label);
60681 +#ifdef CONFIG_SECURITY
60682 +EXPORT_SYMBOL(gr_check_user_change);
60683 +EXPORT_SYMBOL(gr_check_group_change);
60684 +#endif
60685 +
60686 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
60687 new file mode 100644
60688 index 0000000..34fefda
60689 --- /dev/null
60690 +++ b/grsecurity/gracl_alloc.c
60691 @@ -0,0 +1,105 @@
60692 +#include <linux/kernel.h>
60693 +#include <linux/mm.h>
60694 +#include <linux/slab.h>
60695 +#include <linux/vmalloc.h>
60696 +#include <linux/gracl.h>
60697 +#include <linux/grsecurity.h>
60698 +
60699 +static unsigned long alloc_stack_next = 1;
60700 +static unsigned long alloc_stack_size = 1;
60701 +static void **alloc_stack;
60702 +
60703 +static __inline__ int
60704 +alloc_pop(void)
60705 +{
60706 + if (alloc_stack_next == 1)
60707 + return 0;
60708 +
60709 + kfree(alloc_stack[alloc_stack_next - 2]);
60710 +
60711 + alloc_stack_next--;
60712 +
60713 + return 1;
60714 +}
60715 +
60716 +static __inline__ int
60717 +alloc_push(void *buf)
60718 +{
60719 + if (alloc_stack_next >= alloc_stack_size)
60720 + return 1;
60721 +
60722 + alloc_stack[alloc_stack_next - 1] = buf;
60723 +
60724 + alloc_stack_next++;
60725 +
60726 + return 0;
60727 +}
60728 +
60729 +void *
60730 +acl_alloc(unsigned long len)
60731 +{
60732 + void *ret = NULL;
60733 +
60734 + if (!len || len > PAGE_SIZE)
60735 + goto out;
60736 +
60737 + ret = kmalloc(len, GFP_KERNEL);
60738 +
60739 + if (ret) {
60740 + if (alloc_push(ret)) {
60741 + kfree(ret);
60742 + ret = NULL;
60743 + }
60744 + }
60745 +
60746 +out:
60747 + return ret;
60748 +}
60749 +
60750 +void *
60751 +acl_alloc_num(unsigned long num, unsigned long len)
60752 +{
60753 + if (!len || (num > (PAGE_SIZE / len)))
60754 + return NULL;
60755 +
60756 + return acl_alloc(num * len);
60757 +}
60758 +
60759 +void
60760 +acl_free_all(void)
60761 +{
60762 + if (gr_acl_is_enabled() || !alloc_stack)
60763 + return;
60764 +
60765 + while (alloc_pop()) ;
60766 +
60767 + if (alloc_stack) {
60768 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
60769 + kfree(alloc_stack);
60770 + else
60771 + vfree(alloc_stack);
60772 + }
60773 +
60774 + alloc_stack = NULL;
60775 + alloc_stack_size = 1;
60776 + alloc_stack_next = 1;
60777 +
60778 + return;
60779 +}
60780 +
60781 +int
60782 +acl_alloc_stack_init(unsigned long size)
60783 +{
60784 + if ((size * sizeof (void *)) <= PAGE_SIZE)
60785 + alloc_stack =
60786 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
60787 + else
60788 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
60789 +
60790 + alloc_stack_size = size;
60791 +
60792 + if (!alloc_stack)
60793 + return 0;
60794 + else
60795 + return 1;
60796 +}
60797 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
60798 new file mode 100644
60799 index 0000000..955ddfb
60800 --- /dev/null
60801 +++ b/grsecurity/gracl_cap.c
60802 @@ -0,0 +1,101 @@
60803 +#include <linux/kernel.h>
60804 +#include <linux/module.h>
60805 +#include <linux/sched.h>
60806 +#include <linux/gracl.h>
60807 +#include <linux/grsecurity.h>
60808 +#include <linux/grinternal.h>
60809 +
60810 +extern const char *captab_log[];
60811 +extern int captab_log_entries;
60812 +
60813 +int
60814 +gr_acl_is_capable(const int cap)
60815 +{
60816 + struct task_struct *task = current;
60817 + const struct cred *cred = current_cred();
60818 + struct acl_subject_label *curracl;
60819 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60820 + kernel_cap_t cap_audit = __cap_empty_set;
60821 +
60822 + if (!gr_acl_is_enabled())
60823 + return 1;
60824 +
60825 + curracl = task->acl;
60826 +
60827 + cap_drop = curracl->cap_lower;
60828 + cap_mask = curracl->cap_mask;
60829 + cap_audit = curracl->cap_invert_audit;
60830 +
60831 + while ((curracl = curracl->parent_subject)) {
60832 + /* if the cap isn't specified in the current computed mask but is specified in the
60833 + current level subject, and is lowered in the current level subject, then add
60834 + it to the set of dropped capabilities
60835 + otherwise, add the current level subject's mask to the current computed mask
60836 + */
60837 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60838 + cap_raise(cap_mask, cap);
60839 + if (cap_raised(curracl->cap_lower, cap))
60840 + cap_raise(cap_drop, cap);
60841 + if (cap_raised(curracl->cap_invert_audit, cap))
60842 + cap_raise(cap_audit, cap);
60843 + }
60844 + }
60845 +
60846 + if (!cap_raised(cap_drop, cap)) {
60847 + if (cap_raised(cap_audit, cap))
60848 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
60849 + return 1;
60850 + }
60851 +
60852 + curracl = task->acl;
60853 +
60854 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
60855 + && cap_raised(cred->cap_effective, cap)) {
60856 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60857 + task->role->roletype, cred->uid,
60858 + cred->gid, task->exec_file ?
60859 + gr_to_filename(task->exec_file->f_path.dentry,
60860 + task->exec_file->f_path.mnt) : curracl->filename,
60861 + curracl->filename, 0UL,
60862 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
60863 + return 1;
60864 + }
60865 +
60866 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
60867 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
60868 + return 0;
60869 +}
60870 +
60871 +int
60872 +gr_acl_is_capable_nolog(const int cap)
60873 +{
60874 + struct acl_subject_label *curracl;
60875 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60876 +
60877 + if (!gr_acl_is_enabled())
60878 + return 1;
60879 +
60880 + curracl = current->acl;
60881 +
60882 + cap_drop = curracl->cap_lower;
60883 + cap_mask = curracl->cap_mask;
60884 +
60885 + while ((curracl = curracl->parent_subject)) {
60886 + /* if the cap isn't specified in the current computed mask but is specified in the
60887 + current level subject, and is lowered in the current level subject, then add
60888 + it to the set of dropped capabilities
60889 + otherwise, add the current level subject's mask to the current computed mask
60890 + */
60891 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60892 + cap_raise(cap_mask, cap);
60893 + if (cap_raised(curracl->cap_lower, cap))
60894 + cap_raise(cap_drop, cap);
60895 + }
60896 + }
60897 +
60898 + if (!cap_raised(cap_drop, cap))
60899 + return 1;
60900 +
60901 + return 0;
60902 +}
60903 +
60904 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
60905 new file mode 100644
60906 index 0000000..523e7e8
60907 --- /dev/null
60908 +++ b/grsecurity/gracl_fs.c
60909 @@ -0,0 +1,435 @@
60910 +#include <linux/kernel.h>
60911 +#include <linux/sched.h>
60912 +#include <linux/types.h>
60913 +#include <linux/fs.h>
60914 +#include <linux/file.h>
60915 +#include <linux/stat.h>
60916 +#include <linux/grsecurity.h>
60917 +#include <linux/grinternal.h>
60918 +#include <linux/gracl.h>
60919 +
60920 +umode_t
60921 +gr_acl_umask(void)
60922 +{
60923 + if (unlikely(!gr_acl_is_enabled()))
60924 + return 0;
60925 +
60926 + return current->role->umask;
60927 +}
60928 +
60929 +__u32
60930 +gr_acl_handle_hidden_file(const struct dentry * dentry,
60931 + const struct vfsmount * mnt)
60932 +{
60933 + __u32 mode;
60934 +
60935 + if (unlikely(!dentry->d_inode))
60936 + return GR_FIND;
60937 +
60938 + mode =
60939 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
60940 +
60941 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
60942 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
60943 + return mode;
60944 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
60945 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
60946 + return 0;
60947 + } else if (unlikely(!(mode & GR_FIND)))
60948 + return 0;
60949 +
60950 + return GR_FIND;
60951 +}
60952 +
60953 +__u32
60954 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
60955 + int acc_mode)
60956 +{
60957 + __u32 reqmode = GR_FIND;
60958 + __u32 mode;
60959 +
60960 + if (unlikely(!dentry->d_inode))
60961 + return reqmode;
60962 +
60963 + if (acc_mode & MAY_APPEND)
60964 + reqmode |= GR_APPEND;
60965 + else if (acc_mode & MAY_WRITE)
60966 + reqmode |= GR_WRITE;
60967 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
60968 + reqmode |= GR_READ;
60969 +
60970 + mode =
60971 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
60972 + mnt);
60973 +
60974 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60975 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60976 + reqmode & GR_READ ? " reading" : "",
60977 + reqmode & GR_WRITE ? " writing" : reqmode &
60978 + GR_APPEND ? " appending" : "");
60979 + return reqmode;
60980 + } else
60981 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60982 + {
60983 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60984 + reqmode & GR_READ ? " reading" : "",
60985 + reqmode & GR_WRITE ? " writing" : reqmode &
60986 + GR_APPEND ? " appending" : "");
60987 + return 0;
60988 + } else if (unlikely((mode & reqmode) != reqmode))
60989 + return 0;
60990 +
60991 + return reqmode;
60992 +}
60993 +
60994 +__u32
60995 +gr_acl_handle_creat(const struct dentry * dentry,
60996 + const struct dentry * p_dentry,
60997 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
60998 + const int imode)
60999 +{
61000 + __u32 reqmode = GR_WRITE | GR_CREATE;
61001 + __u32 mode;
61002 +
61003 + if (acc_mode & MAY_APPEND)
61004 + reqmode |= GR_APPEND;
61005 + // if a directory was required or the directory already exists, then
61006 + // don't count this open as a read
61007 + if ((acc_mode & MAY_READ) &&
61008 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
61009 + reqmode |= GR_READ;
61010 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
61011 + reqmode |= GR_SETID;
61012 +
61013 + mode =
61014 + gr_check_create(dentry, p_dentry, p_mnt,
61015 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
61016 +
61017 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61018 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
61019 + reqmode & GR_READ ? " reading" : "",
61020 + reqmode & GR_WRITE ? " writing" : reqmode &
61021 + GR_APPEND ? " appending" : "");
61022 + return reqmode;
61023 + } else
61024 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61025 + {
61026 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
61027 + reqmode & GR_READ ? " reading" : "",
61028 + reqmode & GR_WRITE ? " writing" : reqmode &
61029 + GR_APPEND ? " appending" : "");
61030 + return 0;
61031 + } else if (unlikely((mode & reqmode) != reqmode))
61032 + return 0;
61033 +
61034 + return reqmode;
61035 +}
61036 +
61037 +__u32
61038 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
61039 + const int fmode)
61040 +{
61041 + __u32 mode, reqmode = GR_FIND;
61042 +
61043 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
61044 + reqmode |= GR_EXEC;
61045 + if (fmode & S_IWOTH)
61046 + reqmode |= GR_WRITE;
61047 + if (fmode & S_IROTH)
61048 + reqmode |= GR_READ;
61049 +
61050 + mode =
61051 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
61052 + mnt);
61053 +
61054 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61055 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
61056 + reqmode & GR_READ ? " reading" : "",
61057 + reqmode & GR_WRITE ? " writing" : "",
61058 + reqmode & GR_EXEC ? " executing" : "");
61059 + return reqmode;
61060 + } else
61061 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61062 + {
61063 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
61064 + reqmode & GR_READ ? " reading" : "",
61065 + reqmode & GR_WRITE ? " writing" : "",
61066 + reqmode & GR_EXEC ? " executing" : "");
61067 + return 0;
61068 + } else if (unlikely((mode & reqmode) != reqmode))
61069 + return 0;
61070 +
61071 + return reqmode;
61072 +}
61073 +
61074 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
61075 +{
61076 + __u32 mode;
61077 +
61078 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
61079 +
61080 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
61081 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
61082 + return mode;
61083 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
61084 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
61085 + return 0;
61086 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
61087 + return 0;
61088 +
61089 + return (reqmode);
61090 +}
61091 +
61092 +__u32
61093 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
61094 +{
61095 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
61096 +}
61097 +
61098 +__u32
61099 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
61100 +{
61101 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
61102 +}
61103 +
61104 +__u32
61105 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
61106 +{
61107 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
61108 +}
61109 +
61110 +__u32
61111 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
61112 +{
61113 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
61114 +}
61115 +
61116 +__u32
61117 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
61118 + umode_t *modeptr)
61119 +{
61120 + mode_t mode;
61121 +
61122 + *modeptr &= ~(mode_t)gr_acl_umask();
61123 + mode = *modeptr;
61124 +
61125 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
61126 + return 1;
61127 +
61128 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
61129 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
61130 + GR_CHMOD_ACL_MSG);
61131 + } else {
61132 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
61133 + }
61134 +}
61135 +
61136 +__u32
61137 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
61138 +{
61139 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
61140 +}
61141 +
61142 +__u32
61143 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
61144 +{
61145 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
61146 +}
61147 +
61148 +__u32
61149 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
61150 +{
61151 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
61152 +}
61153 +
61154 +__u32
61155 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
61156 +{
61157 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
61158 + GR_UNIXCONNECT_ACL_MSG);
61159 +}
61160 +
61161 +/* hardlinks require at minimum create and link permission,
61162 + any additional privilege required is based on the
61163 + privilege of the file being linked to
61164 +*/
61165 +__u32
61166 +gr_acl_handle_link(const struct dentry * new_dentry,
61167 + const struct dentry * parent_dentry,
61168 + const struct vfsmount * parent_mnt,
61169 + const struct dentry * old_dentry,
61170 + const struct vfsmount * old_mnt, const char *to)
61171 +{
61172 + __u32 mode;
61173 + __u32 needmode = GR_CREATE | GR_LINK;
61174 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
61175 +
61176 + mode =
61177 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
61178 + old_mnt);
61179 +
61180 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
61181 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
61182 + return mode;
61183 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61184 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
61185 + return 0;
61186 + } else if (unlikely((mode & needmode) != needmode))
61187 + return 0;
61188 +
61189 + return 1;
61190 +}
61191 +
61192 +__u32
61193 +gr_acl_handle_symlink(const struct dentry * new_dentry,
61194 + const struct dentry * parent_dentry,
61195 + const struct vfsmount * parent_mnt, const char *from)
61196 +{
61197 + __u32 needmode = GR_WRITE | GR_CREATE;
61198 + __u32 mode;
61199 +
61200 + mode =
61201 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
61202 + GR_CREATE | GR_AUDIT_CREATE |
61203 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
61204 +
61205 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
61206 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
61207 + return mode;
61208 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61209 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
61210 + return 0;
61211 + } else if (unlikely((mode & needmode) != needmode))
61212 + return 0;
61213 +
61214 + return (GR_WRITE | GR_CREATE);
61215 +}
61216 +
61217 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
61218 +{
61219 + __u32 mode;
61220 +
61221 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
61222 +
61223 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
61224 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
61225 + return mode;
61226 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
61227 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
61228 + return 0;
61229 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
61230 + return 0;
61231 +
61232 + return (reqmode);
61233 +}
61234 +
61235 +__u32
61236 +gr_acl_handle_mknod(const struct dentry * new_dentry,
61237 + const struct dentry * parent_dentry,
61238 + const struct vfsmount * parent_mnt,
61239 + const int mode)
61240 +{
61241 + __u32 reqmode = GR_WRITE | GR_CREATE;
61242 + if (unlikely(mode & (S_ISUID | S_ISGID)))
61243 + reqmode |= GR_SETID;
61244 +
61245 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61246 + reqmode, GR_MKNOD_ACL_MSG);
61247 +}
61248 +
61249 +__u32
61250 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
61251 + const struct dentry *parent_dentry,
61252 + const struct vfsmount *parent_mnt)
61253 +{
61254 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61255 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
61256 +}
61257 +
61258 +#define RENAME_CHECK_SUCCESS(old, new) \
61259 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
61260 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
61261 +
61262 +int
61263 +gr_acl_handle_rename(struct dentry *new_dentry,
61264 + struct dentry *parent_dentry,
61265 + const struct vfsmount *parent_mnt,
61266 + struct dentry *old_dentry,
61267 + struct inode *old_parent_inode,
61268 + struct vfsmount *old_mnt, const char *newname)
61269 +{
61270 + __u32 comp1, comp2;
61271 + int error = 0;
61272 +
61273 + if (unlikely(!gr_acl_is_enabled()))
61274 + return 0;
61275 +
61276 + if (!new_dentry->d_inode) {
61277 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
61278 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
61279 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
61280 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
61281 + GR_DELETE | GR_AUDIT_DELETE |
61282 + GR_AUDIT_READ | GR_AUDIT_WRITE |
61283 + GR_SUPPRESS, old_mnt);
61284 + } else {
61285 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
61286 + GR_CREATE | GR_DELETE |
61287 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
61288 + GR_AUDIT_READ | GR_AUDIT_WRITE |
61289 + GR_SUPPRESS, parent_mnt);
61290 + comp2 =
61291 + gr_search_file(old_dentry,
61292 + GR_READ | GR_WRITE | GR_AUDIT_READ |
61293 + GR_DELETE | GR_AUDIT_DELETE |
61294 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
61295 + }
61296 +
61297 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
61298 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
61299 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
61300 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
61301 + && !(comp2 & GR_SUPPRESS)) {
61302 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
61303 + error = -EACCES;
61304 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
61305 + error = -EACCES;
61306 +
61307 + return error;
61308 +}
61309 +
61310 +void
61311 +gr_acl_handle_exit(void)
61312 +{
61313 + u16 id;
61314 + char *rolename;
61315 + struct file *exec_file;
61316 +
61317 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
61318 + !(current->role->roletype & GR_ROLE_PERSIST))) {
61319 + id = current->acl_role_id;
61320 + rolename = current->role->rolename;
61321 + gr_set_acls(1);
61322 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
61323 + }
61324 +
61325 + write_lock(&grsec_exec_file_lock);
61326 + exec_file = current->exec_file;
61327 + current->exec_file = NULL;
61328 + write_unlock(&grsec_exec_file_lock);
61329 +
61330 + if (exec_file)
61331 + fput(exec_file);
61332 +}
61333 +
61334 +int
61335 +gr_acl_handle_procpidmem(const struct task_struct *task)
61336 +{
61337 + if (unlikely(!gr_acl_is_enabled()))
61338 + return 0;
61339 +
61340 + if (task != current && task->acl->mode & GR_PROTPROCFD)
61341 + return -EACCES;
61342 +
61343 + return 0;
61344 +}
61345 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
61346 new file mode 100644
61347 index 0000000..cd07b96
61348 --- /dev/null
61349 +++ b/grsecurity/gracl_ip.c
61350 @@ -0,0 +1,382 @@
61351 +#include <linux/kernel.h>
61352 +#include <asm/uaccess.h>
61353 +#include <asm/errno.h>
61354 +#include <net/sock.h>
61355 +#include <linux/file.h>
61356 +#include <linux/fs.h>
61357 +#include <linux/net.h>
61358 +#include <linux/in.h>
61359 +#include <linux/skbuff.h>
61360 +#include <linux/ip.h>
61361 +#include <linux/udp.h>
61362 +#include <linux/smp_lock.h>
61363 +#include <linux/types.h>
61364 +#include <linux/sched.h>
61365 +#include <linux/netdevice.h>
61366 +#include <linux/inetdevice.h>
61367 +#include <linux/gracl.h>
61368 +#include <linux/grsecurity.h>
61369 +#include <linux/grinternal.h>
61370 +
61371 +#define GR_BIND 0x01
61372 +#define GR_CONNECT 0x02
61373 +#define GR_INVERT 0x04
61374 +#define GR_BINDOVERRIDE 0x08
61375 +#define GR_CONNECTOVERRIDE 0x10
61376 +#define GR_SOCK_FAMILY 0x20
61377 +
61378 +static const char * gr_protocols[IPPROTO_MAX] = {
61379 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
61380 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
61381 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
61382 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
61383 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
61384 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
61385 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
61386 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
61387 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
61388 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
61389 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
61390 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
61391 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
61392 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
61393 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
61394 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
61395 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
61396 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
61397 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
61398 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
61399 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
61400 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
61401 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
61402 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
61403 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
61404 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
61405 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
61406 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
61407 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
61408 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
61409 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
61410 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
61411 + };
61412 +
61413 +static const char * gr_socktypes[SOCK_MAX] = {
61414 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
61415 + "unknown:7", "unknown:8", "unknown:9", "packet"
61416 + };
61417 +
61418 +static const char * gr_sockfamilies[AF_MAX+1] = {
61419 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
61420 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
61421 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
61422 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
61423 + };
61424 +
61425 +const char *
61426 +gr_proto_to_name(unsigned char proto)
61427 +{
61428 + return gr_protocols[proto];
61429 +}
61430 +
61431 +const char *
61432 +gr_socktype_to_name(unsigned char type)
61433 +{
61434 + return gr_socktypes[type];
61435 +}
61436 +
61437 +const char *
61438 +gr_sockfamily_to_name(unsigned char family)
61439 +{
61440 + return gr_sockfamilies[family];
61441 +}
61442 +
61443 +int
61444 +gr_search_socket(const int domain, const int type, const int protocol)
61445 +{
61446 + struct acl_subject_label *curr;
61447 + const struct cred *cred = current_cred();
61448 +
61449 + if (unlikely(!gr_acl_is_enabled()))
61450 + goto exit;
61451 +
61452 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
61453 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
61454 + goto exit; // let the kernel handle it
61455 +
61456 + curr = current->acl;
61457 +
61458 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
61459 + /* the family is allowed, if this is PF_INET allow it only if
61460 + the extra sock type/protocol checks pass */
61461 + if (domain == PF_INET)
61462 + goto inet_check;
61463 + goto exit;
61464 + } else {
61465 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61466 + __u32 fakeip = 0;
61467 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61468 + current->role->roletype, cred->uid,
61469 + cred->gid, current->exec_file ?
61470 + gr_to_filename(current->exec_file->f_path.dentry,
61471 + current->exec_file->f_path.mnt) :
61472 + curr->filename, curr->filename,
61473 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
61474 + &current->signal->saved_ip);
61475 + goto exit;
61476 + }
61477 + goto exit_fail;
61478 + }
61479 +
61480 +inet_check:
61481 + /* the rest of this checking is for IPv4 only */
61482 + if (!curr->ips)
61483 + goto exit;
61484 +
61485 + if ((curr->ip_type & (1 << type)) &&
61486 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
61487 + goto exit;
61488 +
61489 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61490 + /* we don't place acls on raw sockets , and sometimes
61491 + dgram/ip sockets are opened for ioctl and not
61492 + bind/connect, so we'll fake a bind learn log */
61493 + if (type == SOCK_RAW || type == SOCK_PACKET) {
61494 + __u32 fakeip = 0;
61495 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61496 + current->role->roletype, cred->uid,
61497 + cred->gid, current->exec_file ?
61498 + gr_to_filename(current->exec_file->f_path.dentry,
61499 + current->exec_file->f_path.mnt) :
61500 + curr->filename, curr->filename,
61501 + &fakeip, 0, type,
61502 + protocol, GR_CONNECT, &current->signal->saved_ip);
61503 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
61504 + __u32 fakeip = 0;
61505 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61506 + current->role->roletype, cred->uid,
61507 + cred->gid, current->exec_file ?
61508 + gr_to_filename(current->exec_file->f_path.dentry,
61509 + current->exec_file->f_path.mnt) :
61510 + curr->filename, curr->filename,
61511 + &fakeip, 0, type,
61512 + protocol, GR_BIND, &current->signal->saved_ip);
61513 + }
61514 + /* we'll log when they use connect or bind */
61515 + goto exit;
61516 + }
61517 +
61518 +exit_fail:
61519 + if (domain == PF_INET)
61520 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
61521 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
61522 + else
61523 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
61524 + gr_socktype_to_name(type), protocol);
61525 +
61526 + return 0;
61527 +exit:
61528 + return 1;
61529 +}
61530 +
61531 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
61532 +{
61533 + if ((ip->mode & mode) &&
61534 + (ip_port >= ip->low) &&
61535 + (ip_port <= ip->high) &&
61536 + ((ntohl(ip_addr) & our_netmask) ==
61537 + (ntohl(our_addr) & our_netmask))
61538 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
61539 + && (ip->type & (1 << type))) {
61540 + if (ip->mode & GR_INVERT)
61541 + return 2; // specifically denied
61542 + else
61543 + return 1; // allowed
61544 + }
61545 +
61546 + return 0; // not specifically allowed, may continue parsing
61547 +}
61548 +
61549 +static int
61550 +gr_search_connectbind(const int full_mode, struct sock *sk,
61551 + struct sockaddr_in *addr, const int type)
61552 +{
61553 + char iface[IFNAMSIZ] = {0};
61554 + struct acl_subject_label *curr;
61555 + struct acl_ip_label *ip;
61556 + struct inet_sock *isk;
61557 + struct net_device *dev;
61558 + struct in_device *idev;
61559 + unsigned long i;
61560 + int ret;
61561 + int mode = full_mode & (GR_BIND | GR_CONNECT);
61562 + __u32 ip_addr = 0;
61563 + __u32 our_addr;
61564 + __u32 our_netmask;
61565 + char *p;
61566 + __u16 ip_port = 0;
61567 + const struct cred *cred = current_cred();
61568 +
61569 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
61570 + return 0;
61571 +
61572 + curr = current->acl;
61573 + isk = inet_sk(sk);
61574 +
61575 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
61576 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
61577 + addr->sin_addr.s_addr = curr->inaddr_any_override;
61578 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
61579 + struct sockaddr_in saddr;
61580 + int err;
61581 +
61582 + saddr.sin_family = AF_INET;
61583 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
61584 + saddr.sin_port = isk->sport;
61585 +
61586 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61587 + if (err)
61588 + return err;
61589 +
61590 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61591 + if (err)
61592 + return err;
61593 + }
61594 +
61595 + if (!curr->ips)
61596 + return 0;
61597 +
61598 + ip_addr = addr->sin_addr.s_addr;
61599 + ip_port = ntohs(addr->sin_port);
61600 +
61601 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61602 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61603 + current->role->roletype, cred->uid,
61604 + cred->gid, current->exec_file ?
61605 + gr_to_filename(current->exec_file->f_path.dentry,
61606 + current->exec_file->f_path.mnt) :
61607 + curr->filename, curr->filename,
61608 + &ip_addr, ip_port, type,
61609 + sk->sk_protocol, mode, &current->signal->saved_ip);
61610 + return 0;
61611 + }
61612 +
61613 + for (i = 0; i < curr->ip_num; i++) {
61614 + ip = *(curr->ips + i);
61615 + if (ip->iface != NULL) {
61616 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
61617 + p = strchr(iface, ':');
61618 + if (p != NULL)
61619 + *p = '\0';
61620 + dev = dev_get_by_name(sock_net(sk), iface);
61621 + if (dev == NULL)
61622 + continue;
61623 + idev = in_dev_get(dev);
61624 + if (idev == NULL) {
61625 + dev_put(dev);
61626 + continue;
61627 + }
61628 + rcu_read_lock();
61629 + for_ifa(idev) {
61630 + if (!strcmp(ip->iface, ifa->ifa_label)) {
61631 + our_addr = ifa->ifa_address;
61632 + our_netmask = 0xffffffff;
61633 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61634 + if (ret == 1) {
61635 + rcu_read_unlock();
61636 + in_dev_put(idev);
61637 + dev_put(dev);
61638 + return 0;
61639 + } else if (ret == 2) {
61640 + rcu_read_unlock();
61641 + in_dev_put(idev);
61642 + dev_put(dev);
61643 + goto denied;
61644 + }
61645 + }
61646 + } endfor_ifa(idev);
61647 + rcu_read_unlock();
61648 + in_dev_put(idev);
61649 + dev_put(dev);
61650 + } else {
61651 + our_addr = ip->addr;
61652 + our_netmask = ip->netmask;
61653 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61654 + if (ret == 1)
61655 + return 0;
61656 + else if (ret == 2)
61657 + goto denied;
61658 + }
61659 + }
61660 +
61661 +denied:
61662 + if (mode == GR_BIND)
61663 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61664 + else if (mode == GR_CONNECT)
61665 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61666 +
61667 + return -EACCES;
61668 +}
61669 +
61670 +int
61671 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
61672 +{
61673 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
61674 +}
61675 +
61676 +int
61677 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
61678 +{
61679 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
61680 +}
61681 +
61682 +int gr_search_listen(struct socket *sock)
61683 +{
61684 + struct sock *sk = sock->sk;
61685 + struct sockaddr_in addr;
61686 +
61687 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61688 + addr.sin_port = inet_sk(sk)->sport;
61689 +
61690 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61691 +}
61692 +
61693 +int gr_search_accept(struct socket *sock)
61694 +{
61695 + struct sock *sk = sock->sk;
61696 + struct sockaddr_in addr;
61697 +
61698 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61699 + addr.sin_port = inet_sk(sk)->sport;
61700 +
61701 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61702 +}
61703 +
61704 +int
61705 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
61706 +{
61707 + if (addr)
61708 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
61709 + else {
61710 + struct sockaddr_in sin;
61711 + const struct inet_sock *inet = inet_sk(sk);
61712 +
61713 + sin.sin_addr.s_addr = inet->daddr;
61714 + sin.sin_port = inet->dport;
61715 +
61716 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61717 + }
61718 +}
61719 +
61720 +int
61721 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
61722 +{
61723 + struct sockaddr_in sin;
61724 +
61725 + if (unlikely(skb->len < sizeof (struct udphdr)))
61726 + return 0; // skip this packet
61727 +
61728 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
61729 + sin.sin_port = udp_hdr(skb)->source;
61730 +
61731 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61732 +}
61733 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
61734 new file mode 100644
61735 index 0000000..34bdd46
61736 --- /dev/null
61737 +++ b/grsecurity/gracl_learn.c
61738 @@ -0,0 +1,208 @@
61739 +#include <linux/kernel.h>
61740 +#include <linux/mm.h>
61741 +#include <linux/sched.h>
61742 +#include <linux/poll.h>
61743 +#include <linux/smp_lock.h>
61744 +#include <linux/string.h>
61745 +#include <linux/file.h>
61746 +#include <linux/types.h>
61747 +#include <linux/vmalloc.h>
61748 +#include <linux/grinternal.h>
61749 +
61750 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
61751 + size_t count, loff_t *ppos);
61752 +extern int gr_acl_is_enabled(void);
61753 +
61754 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
61755 +static int gr_learn_attached;
61756 +
61757 +/* use a 512k buffer */
61758 +#define LEARN_BUFFER_SIZE (512 * 1024)
61759 +
61760 +static DEFINE_SPINLOCK(gr_learn_lock);
61761 +static DEFINE_MUTEX(gr_learn_user_mutex);
61762 +
61763 +/* we need to maintain two buffers, so that the kernel context of grlearn
61764 + uses a semaphore around the userspace copying, and the other kernel contexts
61765 + use a spinlock when copying into the buffer, since they cannot sleep
61766 +*/
61767 +static char *learn_buffer;
61768 +static char *learn_buffer_user;
61769 +static int learn_buffer_len;
61770 +static int learn_buffer_user_len;
61771 +
61772 +static ssize_t
61773 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
61774 +{
61775 + DECLARE_WAITQUEUE(wait, current);
61776 + ssize_t retval = 0;
61777 +
61778 + add_wait_queue(&learn_wait, &wait);
61779 + set_current_state(TASK_INTERRUPTIBLE);
61780 + do {
61781 + mutex_lock(&gr_learn_user_mutex);
61782 + spin_lock(&gr_learn_lock);
61783 + if (learn_buffer_len)
61784 + break;
61785 + spin_unlock(&gr_learn_lock);
61786 + mutex_unlock(&gr_learn_user_mutex);
61787 + if (file->f_flags & O_NONBLOCK) {
61788 + retval = -EAGAIN;
61789 + goto out;
61790 + }
61791 + if (signal_pending(current)) {
61792 + retval = -ERESTARTSYS;
61793 + goto out;
61794 + }
61795 +
61796 + schedule();
61797 + } while (1);
61798 +
61799 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
61800 + learn_buffer_user_len = learn_buffer_len;
61801 + retval = learn_buffer_len;
61802 + learn_buffer_len = 0;
61803 +
61804 + spin_unlock(&gr_learn_lock);
61805 +
61806 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
61807 + retval = -EFAULT;
61808 +
61809 + mutex_unlock(&gr_learn_user_mutex);
61810 +out:
61811 + set_current_state(TASK_RUNNING);
61812 + remove_wait_queue(&learn_wait, &wait);
61813 + return retval;
61814 +}
61815 +
61816 +static unsigned int
61817 +poll_learn(struct file * file, poll_table * wait)
61818 +{
61819 + poll_wait(file, &learn_wait, wait);
61820 +
61821 + if (learn_buffer_len)
61822 + return (POLLIN | POLLRDNORM);
61823 +
61824 + return 0;
61825 +}
61826 +
61827 +void
61828 +gr_clear_learn_entries(void)
61829 +{
61830 + char *tmp;
61831 +
61832 + mutex_lock(&gr_learn_user_mutex);
61833 + spin_lock(&gr_learn_lock);
61834 + tmp = learn_buffer;
61835 + learn_buffer = NULL;
61836 + spin_unlock(&gr_learn_lock);
61837 + if (tmp)
61838 + vfree(tmp);
61839 + if (learn_buffer_user != NULL) {
61840 + vfree(learn_buffer_user);
61841 + learn_buffer_user = NULL;
61842 + }
61843 + learn_buffer_len = 0;
61844 + mutex_unlock(&gr_learn_user_mutex);
61845 +
61846 + return;
61847 +}
61848 +
61849 +void
61850 +gr_add_learn_entry(const char *fmt, ...)
61851 +{
61852 + va_list args;
61853 + unsigned int len;
61854 +
61855 + if (!gr_learn_attached)
61856 + return;
61857 +
61858 + spin_lock(&gr_learn_lock);
61859 +
61860 + /* leave a gap at the end so we know when it's "full" but don't have to
61861 + compute the exact length of the string we're trying to append
61862 + */
61863 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
61864 + spin_unlock(&gr_learn_lock);
61865 + wake_up_interruptible(&learn_wait);
61866 + return;
61867 + }
61868 + if (learn_buffer == NULL) {
61869 + spin_unlock(&gr_learn_lock);
61870 + return;
61871 + }
61872 +
61873 + va_start(args, fmt);
61874 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
61875 + va_end(args);
61876 +
61877 + learn_buffer_len += len + 1;
61878 +
61879 + spin_unlock(&gr_learn_lock);
61880 + wake_up_interruptible(&learn_wait);
61881 +
61882 + return;
61883 +}
61884 +
61885 +static int
61886 +open_learn(struct inode *inode, struct file *file)
61887 +{
61888 + if (file->f_mode & FMODE_READ && gr_learn_attached)
61889 + return -EBUSY;
61890 + if (file->f_mode & FMODE_READ) {
61891 + int retval = 0;
61892 + mutex_lock(&gr_learn_user_mutex);
61893 + if (learn_buffer == NULL)
61894 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
61895 + if (learn_buffer_user == NULL)
61896 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
61897 + if (learn_buffer == NULL) {
61898 + retval = -ENOMEM;
61899 + goto out_error;
61900 + }
61901 + if (learn_buffer_user == NULL) {
61902 + retval = -ENOMEM;
61903 + goto out_error;
61904 + }
61905 + learn_buffer_len = 0;
61906 + learn_buffer_user_len = 0;
61907 + gr_learn_attached = 1;
61908 +out_error:
61909 + mutex_unlock(&gr_learn_user_mutex);
61910 + return retval;
61911 + }
61912 + return 0;
61913 +}
61914 +
61915 +static int
61916 +close_learn(struct inode *inode, struct file *file)
61917 +{
61918 + if (file->f_mode & FMODE_READ) {
61919 + char *tmp = NULL;
61920 + mutex_lock(&gr_learn_user_mutex);
61921 + spin_lock(&gr_learn_lock);
61922 + tmp = learn_buffer;
61923 + learn_buffer = NULL;
61924 + spin_unlock(&gr_learn_lock);
61925 + if (tmp)
61926 + vfree(tmp);
61927 + if (learn_buffer_user != NULL) {
61928 + vfree(learn_buffer_user);
61929 + learn_buffer_user = NULL;
61930 + }
61931 + learn_buffer_len = 0;
61932 + learn_buffer_user_len = 0;
61933 + gr_learn_attached = 0;
61934 + mutex_unlock(&gr_learn_user_mutex);
61935 + }
61936 +
61937 + return 0;
61938 +}
61939 +
61940 +const struct file_operations grsec_fops = {
61941 + .read = read_learn,
61942 + .write = write_grsec_handler,
61943 + .open = open_learn,
61944 + .release = close_learn,
61945 + .poll = poll_learn,
61946 +};
61947 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
61948 new file mode 100644
61949 index 0000000..70b2179
61950 --- /dev/null
61951 +++ b/grsecurity/gracl_res.c
61952 @@ -0,0 +1,67 @@
61953 +#include <linux/kernel.h>
61954 +#include <linux/sched.h>
61955 +#include <linux/gracl.h>
61956 +#include <linux/grinternal.h>
61957 +
61958 +static const char *restab_log[] = {
61959 + [RLIMIT_CPU] = "RLIMIT_CPU",
61960 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
61961 + [RLIMIT_DATA] = "RLIMIT_DATA",
61962 + [RLIMIT_STACK] = "RLIMIT_STACK",
61963 + [RLIMIT_CORE] = "RLIMIT_CORE",
61964 + [RLIMIT_RSS] = "RLIMIT_RSS",
61965 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
61966 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
61967 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
61968 + [RLIMIT_AS] = "RLIMIT_AS",
61969 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
61970 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
61971 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
61972 + [RLIMIT_NICE] = "RLIMIT_NICE",
61973 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
61974 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
61975 + [GR_CRASH_RES] = "RLIMIT_CRASH"
61976 +};
61977 +
61978 +void
61979 +gr_log_resource(const struct task_struct *task,
61980 + const int res, const unsigned long wanted, const int gt)
61981 +{
61982 + const struct cred *cred;
61983 + unsigned long rlim;
61984 +
61985 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
61986 + return;
61987 +
61988 + // not yet supported resource
61989 + if (unlikely(!restab_log[res]))
61990 + return;
61991 +
61992 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
61993 + rlim = task->signal->rlim[res].rlim_max;
61994 + else
61995 + rlim = task->signal->rlim[res].rlim_cur;
61996 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
61997 + return;
61998 +
61999 + rcu_read_lock();
62000 + cred = __task_cred(task);
62001 +
62002 + if (res == RLIMIT_NPROC &&
62003 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
62004 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
62005 + goto out_rcu_unlock;
62006 + else if (res == RLIMIT_MEMLOCK &&
62007 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
62008 + goto out_rcu_unlock;
62009 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
62010 + goto out_rcu_unlock;
62011 + rcu_read_unlock();
62012 +
62013 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
62014 +
62015 + return;
62016 +out_rcu_unlock:
62017 + rcu_read_unlock();
62018 + return;
62019 +}
62020 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
62021 new file mode 100644
62022 index 0000000..1d1b734
62023 --- /dev/null
62024 +++ b/grsecurity/gracl_segv.c
62025 @@ -0,0 +1,284 @@
62026 +#include <linux/kernel.h>
62027 +#include <linux/mm.h>
62028 +#include <asm/uaccess.h>
62029 +#include <asm/errno.h>
62030 +#include <asm/mman.h>
62031 +#include <net/sock.h>
62032 +#include <linux/file.h>
62033 +#include <linux/fs.h>
62034 +#include <linux/net.h>
62035 +#include <linux/in.h>
62036 +#include <linux/smp_lock.h>
62037 +#include <linux/slab.h>
62038 +#include <linux/types.h>
62039 +#include <linux/sched.h>
62040 +#include <linux/timer.h>
62041 +#include <linux/gracl.h>
62042 +#include <linux/grsecurity.h>
62043 +#include <linux/grinternal.h>
62044 +
62045 +static struct crash_uid *uid_set;
62046 +static unsigned short uid_used;
62047 +static DEFINE_SPINLOCK(gr_uid_lock);
62048 +extern rwlock_t gr_inode_lock;
62049 +extern struct acl_subject_label *
62050 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
62051 + struct acl_role_label *role);
62052 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
62053 +
62054 +int
62055 +gr_init_uidset(void)
62056 +{
62057 + uid_set =
62058 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
62059 + uid_used = 0;
62060 +
62061 + return uid_set ? 1 : 0;
62062 +}
62063 +
62064 +void
62065 +gr_free_uidset(void)
62066 +{
62067 + if (uid_set)
62068 + kfree(uid_set);
62069 +
62070 + return;
62071 +}
62072 +
62073 +int
62074 +gr_find_uid(const uid_t uid)
62075 +{
62076 + struct crash_uid *tmp = uid_set;
62077 + uid_t buid;
62078 + int low = 0, high = uid_used - 1, mid;
62079 +
62080 + while (high >= low) {
62081 + mid = (low + high) >> 1;
62082 + buid = tmp[mid].uid;
62083 + if (buid == uid)
62084 + return mid;
62085 + if (buid > uid)
62086 + high = mid - 1;
62087 + if (buid < uid)
62088 + low = mid + 1;
62089 + }
62090 +
62091 + return -1;
62092 +}
62093 +
62094 +static __inline__ void
62095 +gr_insertsort(void)
62096 +{
62097 + unsigned short i, j;
62098 + struct crash_uid index;
62099 +
62100 + for (i = 1; i < uid_used; i++) {
62101 + index = uid_set[i];
62102 + j = i;
62103 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
62104 + uid_set[j] = uid_set[j - 1];
62105 + j--;
62106 + }
62107 + uid_set[j] = index;
62108 + }
62109 +
62110 + return;
62111 +}
62112 +
62113 +static __inline__ void
62114 +gr_insert_uid(const uid_t uid, const unsigned long expires)
62115 +{
62116 + int loc;
62117 +
62118 + if (uid_used == GR_UIDTABLE_MAX)
62119 + return;
62120 +
62121 + loc = gr_find_uid(uid);
62122 +
62123 + if (loc >= 0) {
62124 + uid_set[loc].expires = expires;
62125 + return;
62126 + }
62127 +
62128 + uid_set[uid_used].uid = uid;
62129 + uid_set[uid_used].expires = expires;
62130 + uid_used++;
62131 +
62132 + gr_insertsort();
62133 +
62134 + return;
62135 +}
62136 +
62137 +void
62138 +gr_remove_uid(const unsigned short loc)
62139 +{
62140 + unsigned short i;
62141 +
62142 + for (i = loc + 1; i < uid_used; i++)
62143 + uid_set[i - 1] = uid_set[i];
62144 +
62145 + uid_used--;
62146 +
62147 + return;
62148 +}
62149 +
62150 +int
62151 +gr_check_crash_uid(const uid_t uid)
62152 +{
62153 + int loc;
62154 + int ret = 0;
62155 +
62156 + if (unlikely(!gr_acl_is_enabled()))
62157 + return 0;
62158 +
62159 + spin_lock(&gr_uid_lock);
62160 + loc = gr_find_uid(uid);
62161 +
62162 + if (loc < 0)
62163 + goto out_unlock;
62164 +
62165 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
62166 + gr_remove_uid(loc);
62167 + else
62168 + ret = 1;
62169 +
62170 +out_unlock:
62171 + spin_unlock(&gr_uid_lock);
62172 + return ret;
62173 +}
62174 +
62175 +static __inline__ int
62176 +proc_is_setxid(const struct cred *cred)
62177 +{
62178 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
62179 + cred->uid != cred->fsuid)
62180 + return 1;
62181 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
62182 + cred->gid != cred->fsgid)
62183 + return 1;
62184 +
62185 + return 0;
62186 +}
62187 +
62188 +void
62189 +gr_handle_crash(struct task_struct *task, const int sig)
62190 +{
62191 + struct acl_subject_label *curr;
62192 + struct task_struct *tsk, *tsk2;
62193 + const struct cred *cred;
62194 + const struct cred *cred2;
62195 +
62196 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
62197 + return;
62198 +
62199 + if (unlikely(!gr_acl_is_enabled()))
62200 + return;
62201 +
62202 + curr = task->acl;
62203 +
62204 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
62205 + return;
62206 +
62207 + if (time_before_eq(curr->expires, get_seconds())) {
62208 + curr->expires = 0;
62209 + curr->crashes = 0;
62210 + }
62211 +
62212 + curr->crashes++;
62213 +
62214 + if (!curr->expires)
62215 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
62216 +
62217 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62218 + time_after(curr->expires, get_seconds())) {
62219 + rcu_read_lock();
62220 + cred = __task_cred(task);
62221 + if (cred->uid && proc_is_setxid(cred)) {
62222 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62223 + spin_lock(&gr_uid_lock);
62224 + gr_insert_uid(cred->uid, curr->expires);
62225 + spin_unlock(&gr_uid_lock);
62226 + curr->expires = 0;
62227 + curr->crashes = 0;
62228 + read_lock(&tasklist_lock);
62229 + do_each_thread(tsk2, tsk) {
62230 + cred2 = __task_cred(tsk);
62231 + if (tsk != task && cred2->uid == cred->uid)
62232 + gr_fake_force_sig(SIGKILL, tsk);
62233 + } while_each_thread(tsk2, tsk);
62234 + read_unlock(&tasklist_lock);
62235 + } else {
62236 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62237 + read_lock(&tasklist_lock);
62238 + read_lock(&grsec_exec_file_lock);
62239 + do_each_thread(tsk2, tsk) {
62240 + if (likely(tsk != task)) {
62241 + // if this thread has the same subject as the one that triggered
62242 + // RES_CRASH and it's the same binary, kill it
62243 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
62244 + gr_fake_force_sig(SIGKILL, tsk);
62245 + }
62246 + } while_each_thread(tsk2, tsk);
62247 + read_unlock(&grsec_exec_file_lock);
62248 + read_unlock(&tasklist_lock);
62249 + }
62250 + rcu_read_unlock();
62251 + }
62252 +
62253 + return;
62254 +}
62255 +
62256 +int
62257 +gr_check_crash_exec(const struct file *filp)
62258 +{
62259 + struct acl_subject_label *curr;
62260 +
62261 + if (unlikely(!gr_acl_is_enabled()))
62262 + return 0;
62263 +
62264 + read_lock(&gr_inode_lock);
62265 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
62266 + filp->f_path.dentry->d_inode->i_sb->s_dev,
62267 + current->role);
62268 + read_unlock(&gr_inode_lock);
62269 +
62270 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
62271 + (!curr->crashes && !curr->expires))
62272 + return 0;
62273 +
62274 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62275 + time_after(curr->expires, get_seconds()))
62276 + return 1;
62277 + else if (time_before_eq(curr->expires, get_seconds())) {
62278 + curr->crashes = 0;
62279 + curr->expires = 0;
62280 + }
62281 +
62282 + return 0;
62283 +}
62284 +
62285 +void
62286 +gr_handle_alertkill(struct task_struct *task)
62287 +{
62288 + struct acl_subject_label *curracl;
62289 + __u32 curr_ip;
62290 + struct task_struct *p, *p2;
62291 +
62292 + if (unlikely(!gr_acl_is_enabled()))
62293 + return;
62294 +
62295 + curracl = task->acl;
62296 + curr_ip = task->signal->curr_ip;
62297 +
62298 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
62299 + read_lock(&tasklist_lock);
62300 + do_each_thread(p2, p) {
62301 + if (p->signal->curr_ip == curr_ip)
62302 + gr_fake_force_sig(SIGKILL, p);
62303 + } while_each_thread(p2, p);
62304 + read_unlock(&tasklist_lock);
62305 + } else if (curracl->mode & GR_KILLPROC)
62306 + gr_fake_force_sig(SIGKILL, task);
62307 +
62308 + return;
62309 +}
62310 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
62311 new file mode 100644
62312 index 0000000..9d83a69
62313 --- /dev/null
62314 +++ b/grsecurity/gracl_shm.c
62315 @@ -0,0 +1,40 @@
62316 +#include <linux/kernel.h>
62317 +#include <linux/mm.h>
62318 +#include <linux/sched.h>
62319 +#include <linux/file.h>
62320 +#include <linux/ipc.h>
62321 +#include <linux/gracl.h>
62322 +#include <linux/grsecurity.h>
62323 +#include <linux/grinternal.h>
62324 +
62325 +int
62326 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62327 + const time_t shm_createtime, const uid_t cuid, const int shmid)
62328 +{
62329 + struct task_struct *task;
62330 +
62331 + if (!gr_acl_is_enabled())
62332 + return 1;
62333 +
62334 + rcu_read_lock();
62335 + read_lock(&tasklist_lock);
62336 +
62337 + task = find_task_by_vpid(shm_cprid);
62338 +
62339 + if (unlikely(!task))
62340 + task = find_task_by_vpid(shm_lapid);
62341 +
62342 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
62343 + (task->pid == shm_lapid)) &&
62344 + (task->acl->mode & GR_PROTSHM) &&
62345 + (task->acl != current->acl))) {
62346 + read_unlock(&tasklist_lock);
62347 + rcu_read_unlock();
62348 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
62349 + return 0;
62350 + }
62351 + read_unlock(&tasklist_lock);
62352 + rcu_read_unlock();
62353 +
62354 + return 1;
62355 +}
62356 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
62357 new file mode 100644
62358 index 0000000..bc0be01
62359 --- /dev/null
62360 +++ b/grsecurity/grsec_chdir.c
62361 @@ -0,0 +1,19 @@
62362 +#include <linux/kernel.h>
62363 +#include <linux/sched.h>
62364 +#include <linux/fs.h>
62365 +#include <linux/file.h>
62366 +#include <linux/grsecurity.h>
62367 +#include <linux/grinternal.h>
62368 +
62369 +void
62370 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
62371 +{
62372 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
62373 + if ((grsec_enable_chdir && grsec_enable_group &&
62374 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
62375 + !grsec_enable_group)) {
62376 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
62377 + }
62378 +#endif
62379 + return;
62380 +}
62381 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
62382 new file mode 100644
62383 index 0000000..197bdd5
62384 --- /dev/null
62385 +++ b/grsecurity/grsec_chroot.c
62386 @@ -0,0 +1,386 @@
62387 +#include <linux/kernel.h>
62388 +#include <linux/module.h>
62389 +#include <linux/sched.h>
62390 +#include <linux/file.h>
62391 +#include <linux/fs.h>
62392 +#include <linux/mount.h>
62393 +#include <linux/types.h>
62394 +#include <linux/pid_namespace.h>
62395 +#include <linux/grsecurity.h>
62396 +#include <linux/grinternal.h>
62397 +
62398 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
62399 +{
62400 +#ifdef CONFIG_GRKERNSEC
62401 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
62402 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
62403 + task->gr_is_chrooted = 1;
62404 + else
62405 + task->gr_is_chrooted = 0;
62406 +
62407 + task->gr_chroot_dentry = path->dentry;
62408 +#endif
62409 + return;
62410 +}
62411 +
62412 +void gr_clear_chroot_entries(struct task_struct *task)
62413 +{
62414 +#ifdef CONFIG_GRKERNSEC
62415 + task->gr_is_chrooted = 0;
62416 + task->gr_chroot_dentry = NULL;
62417 +#endif
62418 + return;
62419 +}
62420 +
62421 +int
62422 +gr_handle_chroot_unix(const pid_t pid)
62423 +{
62424 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
62425 + struct task_struct *p;
62426 +
62427 + if (unlikely(!grsec_enable_chroot_unix))
62428 + return 1;
62429 +
62430 + if (likely(!proc_is_chrooted(current)))
62431 + return 1;
62432 +
62433 + rcu_read_lock();
62434 + read_lock(&tasklist_lock);
62435 +
62436 + p = find_task_by_vpid_unrestricted(pid);
62437 + if (unlikely(p && !have_same_root(current, p))) {
62438 + read_unlock(&tasklist_lock);
62439 + rcu_read_unlock();
62440 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
62441 + return 0;
62442 + }
62443 + read_unlock(&tasklist_lock);
62444 + rcu_read_unlock();
62445 +#endif
62446 + return 1;
62447 +}
62448 +
62449 +int
62450 +gr_handle_chroot_nice(void)
62451 +{
62452 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62453 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
62454 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
62455 + return -EPERM;
62456 + }
62457 +#endif
62458 + return 0;
62459 +}
62460 +
62461 +int
62462 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
62463 +{
62464 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62465 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
62466 + && proc_is_chrooted(current)) {
62467 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
62468 + return -EACCES;
62469 + }
62470 +#endif
62471 + return 0;
62472 +}
62473 +
62474 +int
62475 +gr_handle_chroot_rawio(const struct inode *inode)
62476 +{
62477 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62478 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
62479 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
62480 + return 1;
62481 +#endif
62482 + return 0;
62483 +}
62484 +
62485 +int
62486 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
62487 +{
62488 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62489 + struct task_struct *p;
62490 + int ret = 0;
62491 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
62492 + return ret;
62493 +
62494 + read_lock(&tasklist_lock);
62495 + do_each_pid_task(pid, type, p) {
62496 + if (!have_same_root(current, p)) {
62497 + ret = 1;
62498 + goto out;
62499 + }
62500 + } while_each_pid_task(pid, type, p);
62501 +out:
62502 + read_unlock(&tasklist_lock);
62503 + return ret;
62504 +#endif
62505 + return 0;
62506 +}
62507 +
62508 +int
62509 +gr_pid_is_chrooted(struct task_struct *p)
62510 +{
62511 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62512 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
62513 + return 0;
62514 +
62515 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
62516 + !have_same_root(current, p)) {
62517 + return 1;
62518 + }
62519 +#endif
62520 + return 0;
62521 +}
62522 +
62523 +EXPORT_SYMBOL(gr_pid_is_chrooted);
62524 +
62525 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
62526 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
62527 +{
62528 + struct dentry *dentry = (struct dentry *)u_dentry;
62529 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
62530 + struct dentry *realroot;
62531 + struct vfsmount *realrootmnt;
62532 + struct dentry *currentroot;
62533 + struct vfsmount *currentmnt;
62534 + struct task_struct *reaper = &init_task;
62535 + int ret = 1;
62536 +
62537 + read_lock(&reaper->fs->lock);
62538 + realrootmnt = mntget(reaper->fs->root.mnt);
62539 + realroot = dget(reaper->fs->root.dentry);
62540 + read_unlock(&reaper->fs->lock);
62541 +
62542 + read_lock(&current->fs->lock);
62543 + currentmnt = mntget(current->fs->root.mnt);
62544 + currentroot = dget(current->fs->root.dentry);
62545 + read_unlock(&current->fs->lock);
62546 +
62547 + spin_lock(&dcache_lock);
62548 + for (;;) {
62549 + if (unlikely((dentry == realroot && mnt == realrootmnt)
62550 + || (dentry == currentroot && mnt == currentmnt)))
62551 + break;
62552 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
62553 + if (mnt->mnt_parent == mnt)
62554 + break;
62555 + dentry = mnt->mnt_mountpoint;
62556 + mnt = mnt->mnt_parent;
62557 + continue;
62558 + }
62559 + dentry = dentry->d_parent;
62560 + }
62561 + spin_unlock(&dcache_lock);
62562 +
62563 + dput(currentroot);
62564 + mntput(currentmnt);
62565 +
62566 + /* access is outside of chroot */
62567 + if (dentry == realroot && mnt == realrootmnt)
62568 + ret = 0;
62569 +
62570 + dput(realroot);
62571 + mntput(realrootmnt);
62572 + return ret;
62573 +}
62574 +#endif
62575 +
62576 +int
62577 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
62578 +{
62579 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
62580 + if (!grsec_enable_chroot_fchdir)
62581 + return 1;
62582 +
62583 + if (!proc_is_chrooted(current))
62584 + return 1;
62585 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
62586 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
62587 + return 0;
62588 + }
62589 +#endif
62590 + return 1;
62591 +}
62592 +
62593 +int
62594 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62595 + const time_t shm_createtime)
62596 +{
62597 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
62598 + struct task_struct *p;
62599 + time_t starttime;
62600 +
62601 + if (unlikely(!grsec_enable_chroot_shmat))
62602 + return 1;
62603 +
62604 + if (likely(!proc_is_chrooted(current)))
62605 + return 1;
62606 +
62607 + rcu_read_lock();
62608 + read_lock(&tasklist_lock);
62609 +
62610 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
62611 + starttime = p->start_time.tv_sec;
62612 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
62613 + if (have_same_root(current, p)) {
62614 + goto allow;
62615 + } else {
62616 + read_unlock(&tasklist_lock);
62617 + rcu_read_unlock();
62618 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62619 + return 0;
62620 + }
62621 + }
62622 + /* creator exited, pid reuse, fall through to next check */
62623 + }
62624 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
62625 + if (unlikely(!have_same_root(current, p))) {
62626 + read_unlock(&tasklist_lock);
62627 + rcu_read_unlock();
62628 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62629 + return 0;
62630 + }
62631 + }
62632 +
62633 +allow:
62634 + read_unlock(&tasklist_lock);
62635 + rcu_read_unlock();
62636 +#endif
62637 + return 1;
62638 +}
62639 +
62640 +void
62641 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
62642 +{
62643 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
62644 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
62645 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
62646 +#endif
62647 + return;
62648 +}
62649 +
62650 +int
62651 +gr_handle_chroot_mknod(const struct dentry *dentry,
62652 + const struct vfsmount *mnt, const int mode)
62653 +{
62654 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
62655 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
62656 + proc_is_chrooted(current)) {
62657 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
62658 + return -EPERM;
62659 + }
62660 +#endif
62661 + return 0;
62662 +}
62663 +
62664 +int
62665 +gr_handle_chroot_mount(const struct dentry *dentry,
62666 + const struct vfsmount *mnt, const char *dev_name)
62667 +{
62668 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
62669 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
62670 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
62671 + return -EPERM;
62672 + }
62673 +#endif
62674 + return 0;
62675 +}
62676 +
62677 +int
62678 +gr_handle_chroot_pivot(void)
62679 +{
62680 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
62681 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
62682 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
62683 + return -EPERM;
62684 + }
62685 +#endif
62686 + return 0;
62687 +}
62688 +
62689 +int
62690 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
62691 +{
62692 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
62693 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
62694 + !gr_is_outside_chroot(dentry, mnt)) {
62695 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
62696 + return -EPERM;
62697 + }
62698 +#endif
62699 + return 0;
62700 +}
62701 +
62702 +extern const char *captab_log[];
62703 +extern int captab_log_entries;
62704 +
62705 +int
62706 +gr_chroot_is_capable(const int cap)
62707 +{
62708 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62709 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62710 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62711 + if (cap_raised(chroot_caps, cap)) {
62712 + const struct cred *creds = current_cred();
62713 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
62714 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
62715 + }
62716 + return 0;
62717 + }
62718 + }
62719 +#endif
62720 + return 1;
62721 +}
62722 +
62723 +int
62724 +gr_chroot_is_capable_nolog(const int cap)
62725 +{
62726 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62727 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62728 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62729 + if (cap_raised(chroot_caps, cap)) {
62730 + return 0;
62731 + }
62732 + }
62733 +#endif
62734 + return 1;
62735 +}
62736 +
62737 +int
62738 +gr_handle_chroot_sysctl(const int op)
62739 +{
62740 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
62741 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
62742 + && (op & MAY_WRITE))
62743 + return -EACCES;
62744 +#endif
62745 + return 0;
62746 +}
62747 +
62748 +void
62749 +gr_handle_chroot_chdir(struct path *path)
62750 +{
62751 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
62752 + if (grsec_enable_chroot_chdir)
62753 + set_fs_pwd(current->fs, path);
62754 +#endif
62755 + return;
62756 +}
62757 +
62758 +int
62759 +gr_handle_chroot_chmod(const struct dentry *dentry,
62760 + const struct vfsmount *mnt, const int mode)
62761 +{
62762 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
62763 + /* allow chmod +s on directories, but not on files */
62764 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
62765 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
62766 + proc_is_chrooted(current)) {
62767 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
62768 + return -EPERM;
62769 + }
62770 +#endif
62771 + return 0;
62772 +}
62773 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
62774 new file mode 100644
62775 index 0000000..40545bf
62776 --- /dev/null
62777 +++ b/grsecurity/grsec_disabled.c
62778 @@ -0,0 +1,437 @@
62779 +#include <linux/kernel.h>
62780 +#include <linux/module.h>
62781 +#include <linux/sched.h>
62782 +#include <linux/file.h>
62783 +#include <linux/fs.h>
62784 +#include <linux/kdev_t.h>
62785 +#include <linux/net.h>
62786 +#include <linux/in.h>
62787 +#include <linux/ip.h>
62788 +#include <linux/skbuff.h>
62789 +#include <linux/sysctl.h>
62790 +
62791 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62792 +void
62793 +pax_set_initial_flags(struct linux_binprm *bprm)
62794 +{
62795 + return;
62796 +}
62797 +#endif
62798 +
62799 +#ifdef CONFIG_SYSCTL
62800 +__u32
62801 +gr_handle_sysctl(const struct ctl_table * table, const int op)
62802 +{
62803 + return 0;
62804 +}
62805 +#endif
62806 +
62807 +#ifdef CONFIG_TASKSTATS
62808 +int gr_is_taskstats_denied(int pid)
62809 +{
62810 + return 0;
62811 +}
62812 +#endif
62813 +
62814 +int
62815 +gr_acl_is_enabled(void)
62816 +{
62817 + return 0;
62818 +}
62819 +
62820 +void
62821 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
62822 +{
62823 + return;
62824 +}
62825 +
62826 +int
62827 +gr_handle_rawio(const struct inode *inode)
62828 +{
62829 + return 0;
62830 +}
62831 +
62832 +void
62833 +gr_acl_handle_psacct(struct task_struct *task, const long code)
62834 +{
62835 + return;
62836 +}
62837 +
62838 +int
62839 +gr_handle_ptrace(struct task_struct *task, const long request)
62840 +{
62841 + return 0;
62842 +}
62843 +
62844 +int
62845 +gr_handle_proc_ptrace(struct task_struct *task)
62846 +{
62847 + return 0;
62848 +}
62849 +
62850 +void
62851 +gr_learn_resource(const struct task_struct *task,
62852 + const int res, const unsigned long wanted, const int gt)
62853 +{
62854 + return;
62855 +}
62856 +
62857 +int
62858 +gr_set_acls(const int type)
62859 +{
62860 + return 0;
62861 +}
62862 +
62863 +int
62864 +gr_check_hidden_task(const struct task_struct *tsk)
62865 +{
62866 + return 0;
62867 +}
62868 +
62869 +int
62870 +gr_check_protected_task(const struct task_struct *task)
62871 +{
62872 + return 0;
62873 +}
62874 +
62875 +int
62876 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
62877 +{
62878 + return 0;
62879 +}
62880 +
62881 +void
62882 +gr_copy_label(struct task_struct *tsk)
62883 +{
62884 + return;
62885 +}
62886 +
62887 +void
62888 +gr_set_pax_flags(struct task_struct *task)
62889 +{
62890 + return;
62891 +}
62892 +
62893 +int
62894 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
62895 + const int unsafe_share)
62896 +{
62897 + return 0;
62898 +}
62899 +
62900 +void
62901 +gr_handle_delete(const ino_t ino, const dev_t dev)
62902 +{
62903 + return;
62904 +}
62905 +
62906 +void
62907 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
62908 +{
62909 + return;
62910 +}
62911 +
62912 +void
62913 +gr_handle_crash(struct task_struct *task, const int sig)
62914 +{
62915 + return;
62916 +}
62917 +
62918 +int
62919 +gr_check_crash_exec(const struct file *filp)
62920 +{
62921 + return 0;
62922 +}
62923 +
62924 +int
62925 +gr_check_crash_uid(const uid_t uid)
62926 +{
62927 + return 0;
62928 +}
62929 +
62930 +void
62931 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
62932 + struct dentry *old_dentry,
62933 + struct dentry *new_dentry,
62934 + struct vfsmount *mnt, const __u8 replace)
62935 +{
62936 + return;
62937 +}
62938 +
62939 +int
62940 +gr_search_socket(const int family, const int type, const int protocol)
62941 +{
62942 + return 1;
62943 +}
62944 +
62945 +int
62946 +gr_search_connectbind(const int mode, const struct socket *sock,
62947 + const struct sockaddr_in *addr)
62948 +{
62949 + return 0;
62950 +}
62951 +
62952 +void
62953 +gr_handle_alertkill(struct task_struct *task)
62954 +{
62955 + return;
62956 +}
62957 +
62958 +__u32
62959 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
62960 +{
62961 + return 1;
62962 +}
62963 +
62964 +__u32
62965 +gr_acl_handle_hidden_file(const struct dentry * dentry,
62966 + const struct vfsmount * mnt)
62967 +{
62968 + return 1;
62969 +}
62970 +
62971 +__u32
62972 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
62973 + int acc_mode)
62974 +{
62975 + return 1;
62976 +}
62977 +
62978 +__u32
62979 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
62980 +{
62981 + return 1;
62982 +}
62983 +
62984 +__u32
62985 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
62986 +{
62987 + return 1;
62988 +}
62989 +
62990 +int
62991 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
62992 + unsigned int *vm_flags)
62993 +{
62994 + return 1;
62995 +}
62996 +
62997 +__u32
62998 +gr_acl_handle_truncate(const struct dentry * dentry,
62999 + const struct vfsmount * mnt)
63000 +{
63001 + return 1;
63002 +}
63003 +
63004 +__u32
63005 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
63006 +{
63007 + return 1;
63008 +}
63009 +
63010 +__u32
63011 +gr_acl_handle_access(const struct dentry * dentry,
63012 + const struct vfsmount * mnt, const int fmode)
63013 +{
63014 + return 1;
63015 +}
63016 +
63017 +__u32
63018 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
63019 + umode_t *mode)
63020 +{
63021 + return 1;
63022 +}
63023 +
63024 +__u32
63025 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
63026 +{
63027 + return 1;
63028 +}
63029 +
63030 +__u32
63031 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
63032 +{
63033 + return 1;
63034 +}
63035 +
63036 +void
63037 +grsecurity_init(void)
63038 +{
63039 + return;
63040 +}
63041 +
63042 +umode_t gr_acl_umask(void)
63043 +{
63044 + return 0;
63045 +}
63046 +
63047 +__u32
63048 +gr_acl_handle_mknod(const struct dentry * new_dentry,
63049 + const struct dentry * parent_dentry,
63050 + const struct vfsmount * parent_mnt,
63051 + const int mode)
63052 +{
63053 + return 1;
63054 +}
63055 +
63056 +__u32
63057 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
63058 + const struct dentry * parent_dentry,
63059 + const struct vfsmount * parent_mnt)
63060 +{
63061 + return 1;
63062 +}
63063 +
63064 +__u32
63065 +gr_acl_handle_symlink(const struct dentry * new_dentry,
63066 + const struct dentry * parent_dentry,
63067 + const struct vfsmount * parent_mnt, const char *from)
63068 +{
63069 + return 1;
63070 +}
63071 +
63072 +__u32
63073 +gr_acl_handle_link(const struct dentry * new_dentry,
63074 + const struct dentry * parent_dentry,
63075 + const struct vfsmount * parent_mnt,
63076 + const struct dentry * old_dentry,
63077 + const struct vfsmount * old_mnt, const char *to)
63078 +{
63079 + return 1;
63080 +}
63081 +
63082 +int
63083 +gr_acl_handle_rename(const struct dentry *new_dentry,
63084 + const struct dentry *parent_dentry,
63085 + const struct vfsmount *parent_mnt,
63086 + const struct dentry *old_dentry,
63087 + const struct inode *old_parent_inode,
63088 + const struct vfsmount *old_mnt, const char *newname)
63089 +{
63090 + return 0;
63091 +}
63092 +
63093 +int
63094 +gr_acl_handle_filldir(const struct file *file, const char *name,
63095 + const int namelen, const ino_t ino)
63096 +{
63097 + return 1;
63098 +}
63099 +
63100 +int
63101 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63102 + const time_t shm_createtime, const uid_t cuid, const int shmid)
63103 +{
63104 + return 1;
63105 +}
63106 +
63107 +int
63108 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
63109 +{
63110 + return 0;
63111 +}
63112 +
63113 +int
63114 +gr_search_accept(const struct socket *sock)
63115 +{
63116 + return 0;
63117 +}
63118 +
63119 +int
63120 +gr_search_listen(const struct socket *sock)
63121 +{
63122 + return 0;
63123 +}
63124 +
63125 +int
63126 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
63127 +{
63128 + return 0;
63129 +}
63130 +
63131 +__u32
63132 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
63133 +{
63134 + return 1;
63135 +}
63136 +
63137 +__u32
63138 +gr_acl_handle_creat(const struct dentry * dentry,
63139 + const struct dentry * p_dentry,
63140 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
63141 + const int imode)
63142 +{
63143 + return 1;
63144 +}
63145 +
63146 +void
63147 +gr_acl_handle_exit(void)
63148 +{
63149 + return;
63150 +}
63151 +
63152 +int
63153 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
63154 +{
63155 + return 1;
63156 +}
63157 +
63158 +void
63159 +gr_set_role_label(const uid_t uid, const gid_t gid)
63160 +{
63161 + return;
63162 +}
63163 +
63164 +int
63165 +gr_acl_handle_procpidmem(const struct task_struct *task)
63166 +{
63167 + return 0;
63168 +}
63169 +
63170 +int
63171 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
63172 +{
63173 + return 0;
63174 +}
63175 +
63176 +int
63177 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
63178 +{
63179 + return 0;
63180 +}
63181 +
63182 +void
63183 +gr_set_kernel_label(struct task_struct *task)
63184 +{
63185 + return;
63186 +}
63187 +
63188 +int
63189 +gr_check_user_change(int real, int effective, int fs)
63190 +{
63191 + return 0;
63192 +}
63193 +
63194 +int
63195 +gr_check_group_change(int real, int effective, int fs)
63196 +{
63197 + return 0;
63198 +}
63199 +
63200 +int gr_acl_enable_at_secure(void)
63201 +{
63202 + return 0;
63203 +}
63204 +
63205 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
63206 +{
63207 + return dentry->d_inode->i_sb->s_dev;
63208 +}
63209 +
63210 +EXPORT_SYMBOL(gr_learn_resource);
63211 +EXPORT_SYMBOL(gr_set_kernel_label);
63212 +#ifdef CONFIG_SECURITY
63213 +EXPORT_SYMBOL(gr_check_user_change);
63214 +EXPORT_SYMBOL(gr_check_group_change);
63215 +#endif
63216 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
63217 new file mode 100644
63218 index 0000000..a96e155
63219 --- /dev/null
63220 +++ b/grsecurity/grsec_exec.c
63221 @@ -0,0 +1,204 @@
63222 +#include <linux/kernel.h>
63223 +#include <linux/sched.h>
63224 +#include <linux/file.h>
63225 +#include <linux/binfmts.h>
63226 +#include <linux/smp_lock.h>
63227 +#include <linux/fs.h>
63228 +#include <linux/types.h>
63229 +#include <linux/grdefs.h>
63230 +#include <linux/grinternal.h>
63231 +#include <linux/capability.h>
63232 +#include <linux/compat.h>
63233 +#include <linux/module.h>
63234 +
63235 +#include <asm/uaccess.h>
63236 +
63237 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63238 +static char gr_exec_arg_buf[132];
63239 +static DEFINE_MUTEX(gr_exec_arg_mutex);
63240 +#endif
63241 +
63242 +void
63243 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
63244 +{
63245 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63246 + char *grarg = gr_exec_arg_buf;
63247 + unsigned int i, x, execlen = 0;
63248 + char c;
63249 +
63250 + if (!((grsec_enable_execlog && grsec_enable_group &&
63251 + in_group_p(grsec_audit_gid))
63252 + || (grsec_enable_execlog && !grsec_enable_group)))
63253 + return;
63254 +
63255 + mutex_lock(&gr_exec_arg_mutex);
63256 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
63257 +
63258 + if (unlikely(argv == NULL))
63259 + goto log;
63260 +
63261 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
63262 + const char __user *p;
63263 + unsigned int len;
63264 +
63265 + if (copy_from_user(&p, argv + i, sizeof(p)))
63266 + goto log;
63267 + if (!p)
63268 + goto log;
63269 + len = strnlen_user(p, 128 - execlen);
63270 + if (len > 128 - execlen)
63271 + len = 128 - execlen;
63272 + else if (len > 0)
63273 + len--;
63274 + if (copy_from_user(grarg + execlen, p, len))
63275 + goto log;
63276 +
63277 + /* rewrite unprintable characters */
63278 + for (x = 0; x < len; x++) {
63279 + c = *(grarg + execlen + x);
63280 + if (c < 32 || c > 126)
63281 + *(grarg + execlen + x) = ' ';
63282 + }
63283 +
63284 + execlen += len;
63285 + *(grarg + execlen) = ' ';
63286 + *(grarg + execlen + 1) = '\0';
63287 + execlen++;
63288 + }
63289 +
63290 + log:
63291 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
63292 + bprm->file->f_path.mnt, grarg);
63293 + mutex_unlock(&gr_exec_arg_mutex);
63294 +#endif
63295 + return;
63296 +}
63297 +
63298 +#ifdef CONFIG_COMPAT
63299 +void
63300 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
63301 +{
63302 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63303 + char *grarg = gr_exec_arg_buf;
63304 + unsigned int i, x, execlen = 0;
63305 + char c;
63306 +
63307 + if (!((grsec_enable_execlog && grsec_enable_group &&
63308 + in_group_p(grsec_audit_gid))
63309 + || (grsec_enable_execlog && !grsec_enable_group)))
63310 + return;
63311 +
63312 + mutex_lock(&gr_exec_arg_mutex);
63313 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
63314 +
63315 + if (unlikely(argv == NULL))
63316 + goto log;
63317 +
63318 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
63319 + compat_uptr_t p;
63320 + unsigned int len;
63321 +
63322 + if (get_user(p, argv + i))
63323 + goto log;
63324 + len = strnlen_user(compat_ptr(p), 128 - execlen);
63325 + if (len > 128 - execlen)
63326 + len = 128 - execlen;
63327 + else if (len > 0)
63328 + len--;
63329 + else
63330 + goto log;
63331 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
63332 + goto log;
63333 +
63334 + /* rewrite unprintable characters */
63335 + for (x = 0; x < len; x++) {
63336 + c = *(grarg + execlen + x);
63337 + if (c < 32 || c > 126)
63338 + *(grarg + execlen + x) = ' ';
63339 + }
63340 +
63341 + execlen += len;
63342 + *(grarg + execlen) = ' ';
63343 + *(grarg + execlen + 1) = '\0';
63344 + execlen++;
63345 + }
63346 +
63347 + log:
63348 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
63349 + bprm->file->f_path.mnt, grarg);
63350 + mutex_unlock(&gr_exec_arg_mutex);
63351 +#endif
63352 + return;
63353 +}
63354 +#endif
63355 +
63356 +#ifdef CONFIG_GRKERNSEC
63357 +extern int gr_acl_is_capable(const int cap);
63358 +extern int gr_acl_is_capable_nolog(const int cap);
63359 +extern int gr_chroot_is_capable(const int cap);
63360 +extern int gr_chroot_is_capable_nolog(const int cap);
63361 +#endif
63362 +
63363 +const char *captab_log[] = {
63364 + "CAP_CHOWN",
63365 + "CAP_DAC_OVERRIDE",
63366 + "CAP_DAC_READ_SEARCH",
63367 + "CAP_FOWNER",
63368 + "CAP_FSETID",
63369 + "CAP_KILL",
63370 + "CAP_SETGID",
63371 + "CAP_SETUID",
63372 + "CAP_SETPCAP",
63373 + "CAP_LINUX_IMMUTABLE",
63374 + "CAP_NET_BIND_SERVICE",
63375 + "CAP_NET_BROADCAST",
63376 + "CAP_NET_ADMIN",
63377 + "CAP_NET_RAW",
63378 + "CAP_IPC_LOCK",
63379 + "CAP_IPC_OWNER",
63380 + "CAP_SYS_MODULE",
63381 + "CAP_SYS_RAWIO",
63382 + "CAP_SYS_CHROOT",
63383 + "CAP_SYS_PTRACE",
63384 + "CAP_SYS_PACCT",
63385 + "CAP_SYS_ADMIN",
63386 + "CAP_SYS_BOOT",
63387 + "CAP_SYS_NICE",
63388 + "CAP_SYS_RESOURCE",
63389 + "CAP_SYS_TIME",
63390 + "CAP_SYS_TTY_CONFIG",
63391 + "CAP_MKNOD",
63392 + "CAP_LEASE",
63393 + "CAP_AUDIT_WRITE",
63394 + "CAP_AUDIT_CONTROL",
63395 + "CAP_SETFCAP",
63396 + "CAP_MAC_OVERRIDE",
63397 + "CAP_MAC_ADMIN"
63398 +};
63399 +
63400 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
63401 +
63402 +int gr_is_capable(const int cap)
63403 +{
63404 +#ifdef CONFIG_GRKERNSEC
63405 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
63406 + return 1;
63407 + return 0;
63408 +#else
63409 + return 1;
63410 +#endif
63411 +}
63412 +
63413 +int gr_is_capable_nolog(const int cap)
63414 +{
63415 +#ifdef CONFIG_GRKERNSEC
63416 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
63417 + return 1;
63418 + return 0;
63419 +#else
63420 + return 1;
63421 +#endif
63422 +}
63423 +
63424 +EXPORT_SYMBOL(gr_is_capable);
63425 +EXPORT_SYMBOL(gr_is_capable_nolog);
63426 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
63427 new file mode 100644
63428 index 0000000..d3ee748
63429 --- /dev/null
63430 +++ b/grsecurity/grsec_fifo.c
63431 @@ -0,0 +1,24 @@
63432 +#include <linux/kernel.h>
63433 +#include <linux/sched.h>
63434 +#include <linux/fs.h>
63435 +#include <linux/file.h>
63436 +#include <linux/grinternal.h>
63437 +
63438 +int
63439 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
63440 + const struct dentry *dir, const int flag, const int acc_mode)
63441 +{
63442 +#ifdef CONFIG_GRKERNSEC_FIFO
63443 + const struct cred *cred = current_cred();
63444 +
63445 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
63446 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
63447 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
63448 + (cred->fsuid != dentry->d_inode->i_uid)) {
63449 + if (!inode_permission(dentry->d_inode, acc_mode))
63450 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
63451 + return -EACCES;
63452 + }
63453 +#endif
63454 + return 0;
63455 +}
63456 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
63457 new file mode 100644
63458 index 0000000..8ca18bf
63459 --- /dev/null
63460 +++ b/grsecurity/grsec_fork.c
63461 @@ -0,0 +1,23 @@
63462 +#include <linux/kernel.h>
63463 +#include <linux/sched.h>
63464 +#include <linux/grsecurity.h>
63465 +#include <linux/grinternal.h>
63466 +#include <linux/errno.h>
63467 +
63468 +void
63469 +gr_log_forkfail(const int retval)
63470 +{
63471 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
63472 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
63473 + switch (retval) {
63474 + case -EAGAIN:
63475 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
63476 + break;
63477 + case -ENOMEM:
63478 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
63479 + break;
63480 + }
63481 + }
63482 +#endif
63483 + return;
63484 +}
63485 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
63486 new file mode 100644
63487 index 0000000..1e995d3
63488 --- /dev/null
63489 +++ b/grsecurity/grsec_init.c
63490 @@ -0,0 +1,278 @@
63491 +#include <linux/kernel.h>
63492 +#include <linux/sched.h>
63493 +#include <linux/mm.h>
63494 +#include <linux/smp_lock.h>
63495 +#include <linux/gracl.h>
63496 +#include <linux/slab.h>
63497 +#include <linux/vmalloc.h>
63498 +#include <linux/percpu.h>
63499 +#include <linux/module.h>
63500 +
63501 +int grsec_enable_ptrace_readexec;
63502 +int grsec_enable_setxid;
63503 +int grsec_enable_brute;
63504 +int grsec_enable_link;
63505 +int grsec_enable_dmesg;
63506 +int grsec_enable_harden_ptrace;
63507 +int grsec_enable_fifo;
63508 +int grsec_enable_execlog;
63509 +int grsec_enable_signal;
63510 +int grsec_enable_forkfail;
63511 +int grsec_enable_audit_ptrace;
63512 +int grsec_enable_time;
63513 +int grsec_enable_audit_textrel;
63514 +int grsec_enable_group;
63515 +int grsec_audit_gid;
63516 +int grsec_enable_chdir;
63517 +int grsec_enable_mount;
63518 +int grsec_enable_rofs;
63519 +int grsec_enable_chroot_findtask;
63520 +int grsec_enable_chroot_mount;
63521 +int grsec_enable_chroot_shmat;
63522 +int grsec_enable_chroot_fchdir;
63523 +int grsec_enable_chroot_double;
63524 +int grsec_enable_chroot_pivot;
63525 +int grsec_enable_chroot_chdir;
63526 +int grsec_enable_chroot_chmod;
63527 +int grsec_enable_chroot_mknod;
63528 +int grsec_enable_chroot_nice;
63529 +int grsec_enable_chroot_execlog;
63530 +int grsec_enable_chroot_caps;
63531 +int grsec_enable_chroot_sysctl;
63532 +int grsec_enable_chroot_unix;
63533 +int grsec_enable_tpe;
63534 +int grsec_tpe_gid;
63535 +int grsec_enable_blackhole;
63536 +#ifdef CONFIG_IPV6_MODULE
63537 +EXPORT_SYMBOL(grsec_enable_blackhole);
63538 +#endif
63539 +int grsec_lastack_retries;
63540 +int grsec_enable_tpe_all;
63541 +int grsec_enable_tpe_invert;
63542 +int grsec_enable_socket_all;
63543 +int grsec_socket_all_gid;
63544 +int grsec_enable_socket_client;
63545 +int grsec_socket_client_gid;
63546 +int grsec_enable_socket_server;
63547 +int grsec_socket_server_gid;
63548 +int grsec_resource_logging;
63549 +int grsec_disable_privio;
63550 +int grsec_enable_log_rwxmaps;
63551 +int grsec_lock;
63552 +
63553 +DEFINE_SPINLOCK(grsec_alert_lock);
63554 +unsigned long grsec_alert_wtime = 0;
63555 +unsigned long grsec_alert_fyet = 0;
63556 +
63557 +DEFINE_SPINLOCK(grsec_audit_lock);
63558 +
63559 +DEFINE_RWLOCK(grsec_exec_file_lock);
63560 +
63561 +char *gr_shared_page[4];
63562 +
63563 +char *gr_alert_log_fmt;
63564 +char *gr_audit_log_fmt;
63565 +char *gr_alert_log_buf;
63566 +char *gr_audit_log_buf;
63567 +
63568 +extern struct gr_arg *gr_usermode;
63569 +extern unsigned char *gr_system_salt;
63570 +extern unsigned char *gr_system_sum;
63571 +
63572 +void __init
63573 +grsecurity_init(void)
63574 +{
63575 + int j;
63576 + /* create the per-cpu shared pages */
63577 +
63578 +#ifdef CONFIG_X86
63579 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
63580 +#endif
63581 +
63582 + for (j = 0; j < 4; j++) {
63583 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
63584 + if (gr_shared_page[j] == NULL) {
63585 + panic("Unable to allocate grsecurity shared page");
63586 + return;
63587 + }
63588 + }
63589 +
63590 + /* allocate log buffers */
63591 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
63592 + if (!gr_alert_log_fmt) {
63593 + panic("Unable to allocate grsecurity alert log format buffer");
63594 + return;
63595 + }
63596 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
63597 + if (!gr_audit_log_fmt) {
63598 + panic("Unable to allocate grsecurity audit log format buffer");
63599 + return;
63600 + }
63601 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63602 + if (!gr_alert_log_buf) {
63603 + panic("Unable to allocate grsecurity alert log buffer");
63604 + return;
63605 + }
63606 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63607 + if (!gr_audit_log_buf) {
63608 + panic("Unable to allocate grsecurity audit log buffer");
63609 + return;
63610 + }
63611 +
63612 + /* allocate memory for authentication structure */
63613 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
63614 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
63615 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
63616 +
63617 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
63618 + panic("Unable to allocate grsecurity authentication structure");
63619 + return;
63620 + }
63621 +
63622 +
63623 +#ifdef CONFIG_GRKERNSEC_IO
63624 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
63625 + grsec_disable_privio = 1;
63626 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63627 + grsec_disable_privio = 1;
63628 +#else
63629 + grsec_disable_privio = 0;
63630 +#endif
63631 +#endif
63632 +
63633 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
63634 + /* for backward compatibility, tpe_invert always defaults to on if
63635 + enabled in the kernel
63636 + */
63637 + grsec_enable_tpe_invert = 1;
63638 +#endif
63639 +
63640 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63641 +#ifndef CONFIG_GRKERNSEC_SYSCTL
63642 + grsec_lock = 1;
63643 +#endif
63644 +
63645 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63646 + grsec_enable_audit_textrel = 1;
63647 +#endif
63648 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63649 + grsec_enable_log_rwxmaps = 1;
63650 +#endif
63651 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
63652 + grsec_enable_group = 1;
63653 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
63654 +#endif
63655 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
63656 + grsec_enable_chdir = 1;
63657 +#endif
63658 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
63659 + grsec_enable_harden_ptrace = 1;
63660 +#endif
63661 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63662 + grsec_enable_mount = 1;
63663 +#endif
63664 +#ifdef CONFIG_GRKERNSEC_LINK
63665 + grsec_enable_link = 1;
63666 +#endif
63667 +#ifdef CONFIG_GRKERNSEC_BRUTE
63668 + grsec_enable_brute = 1;
63669 +#endif
63670 +#ifdef CONFIG_GRKERNSEC_DMESG
63671 + grsec_enable_dmesg = 1;
63672 +#endif
63673 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
63674 + grsec_enable_blackhole = 1;
63675 + grsec_lastack_retries = 4;
63676 +#endif
63677 +#ifdef CONFIG_GRKERNSEC_FIFO
63678 + grsec_enable_fifo = 1;
63679 +#endif
63680 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63681 + grsec_enable_execlog = 1;
63682 +#endif
63683 +#ifdef CONFIG_GRKERNSEC_SETXID
63684 + grsec_enable_setxid = 1;
63685 +#endif
63686 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
63687 + grsec_enable_ptrace_readexec = 1;
63688 +#endif
63689 +#ifdef CONFIG_GRKERNSEC_SIGNAL
63690 + grsec_enable_signal = 1;
63691 +#endif
63692 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
63693 + grsec_enable_forkfail = 1;
63694 +#endif
63695 +#ifdef CONFIG_GRKERNSEC_TIME
63696 + grsec_enable_time = 1;
63697 +#endif
63698 +#ifdef CONFIG_GRKERNSEC_RESLOG
63699 + grsec_resource_logging = 1;
63700 +#endif
63701 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63702 + grsec_enable_chroot_findtask = 1;
63703 +#endif
63704 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63705 + grsec_enable_chroot_unix = 1;
63706 +#endif
63707 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63708 + grsec_enable_chroot_mount = 1;
63709 +#endif
63710 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63711 + grsec_enable_chroot_fchdir = 1;
63712 +#endif
63713 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63714 + grsec_enable_chroot_shmat = 1;
63715 +#endif
63716 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63717 + grsec_enable_audit_ptrace = 1;
63718 +#endif
63719 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63720 + grsec_enable_chroot_double = 1;
63721 +#endif
63722 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63723 + grsec_enable_chroot_pivot = 1;
63724 +#endif
63725 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63726 + grsec_enable_chroot_chdir = 1;
63727 +#endif
63728 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63729 + grsec_enable_chroot_chmod = 1;
63730 +#endif
63731 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63732 + grsec_enable_chroot_mknod = 1;
63733 +#endif
63734 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63735 + grsec_enable_chroot_nice = 1;
63736 +#endif
63737 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63738 + grsec_enable_chroot_execlog = 1;
63739 +#endif
63740 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63741 + grsec_enable_chroot_caps = 1;
63742 +#endif
63743 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63744 + grsec_enable_chroot_sysctl = 1;
63745 +#endif
63746 +#ifdef CONFIG_GRKERNSEC_TPE
63747 + grsec_enable_tpe = 1;
63748 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
63749 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
63750 + grsec_enable_tpe_all = 1;
63751 +#endif
63752 +#endif
63753 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63754 + grsec_enable_socket_all = 1;
63755 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
63756 +#endif
63757 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63758 + grsec_enable_socket_client = 1;
63759 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
63760 +#endif
63761 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63762 + grsec_enable_socket_server = 1;
63763 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
63764 +#endif
63765 +#endif
63766 +
63767 + return;
63768 +}
63769 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
63770 new file mode 100644
63771 index 0000000..3efe141
63772 --- /dev/null
63773 +++ b/grsecurity/grsec_link.c
63774 @@ -0,0 +1,43 @@
63775 +#include <linux/kernel.h>
63776 +#include <linux/sched.h>
63777 +#include <linux/fs.h>
63778 +#include <linux/file.h>
63779 +#include <linux/grinternal.h>
63780 +
63781 +int
63782 +gr_handle_follow_link(const struct inode *parent,
63783 + const struct inode *inode,
63784 + const struct dentry *dentry, const struct vfsmount *mnt)
63785 +{
63786 +#ifdef CONFIG_GRKERNSEC_LINK
63787 + const struct cred *cred = current_cred();
63788 +
63789 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
63790 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
63791 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
63792 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
63793 + return -EACCES;
63794 + }
63795 +#endif
63796 + return 0;
63797 +}
63798 +
63799 +int
63800 +gr_handle_hardlink(const struct dentry *dentry,
63801 + const struct vfsmount *mnt,
63802 + struct inode *inode, const int mode, const char *to)
63803 +{
63804 +#ifdef CONFIG_GRKERNSEC_LINK
63805 + const struct cred *cred = current_cred();
63806 +
63807 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
63808 + (!S_ISREG(mode) || (mode & S_ISUID) ||
63809 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
63810 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
63811 + !capable(CAP_FOWNER) && cred->uid) {
63812 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
63813 + return -EPERM;
63814 + }
63815 +#endif
63816 + return 0;
63817 +}
63818 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
63819 new file mode 100644
63820 index 0000000..a45d2e9
63821 --- /dev/null
63822 +++ b/grsecurity/grsec_log.c
63823 @@ -0,0 +1,322 @@
63824 +#include <linux/kernel.h>
63825 +#include <linux/sched.h>
63826 +#include <linux/file.h>
63827 +#include <linux/tty.h>
63828 +#include <linux/fs.h>
63829 +#include <linux/grinternal.h>
63830 +
63831 +#ifdef CONFIG_TREE_PREEMPT_RCU
63832 +#define DISABLE_PREEMPT() preempt_disable()
63833 +#define ENABLE_PREEMPT() preempt_enable()
63834 +#else
63835 +#define DISABLE_PREEMPT()
63836 +#define ENABLE_PREEMPT()
63837 +#endif
63838 +
63839 +#define BEGIN_LOCKS(x) \
63840 + DISABLE_PREEMPT(); \
63841 + rcu_read_lock(); \
63842 + read_lock(&tasklist_lock); \
63843 + read_lock(&grsec_exec_file_lock); \
63844 + if (x != GR_DO_AUDIT) \
63845 + spin_lock(&grsec_alert_lock); \
63846 + else \
63847 + spin_lock(&grsec_audit_lock)
63848 +
63849 +#define END_LOCKS(x) \
63850 + if (x != GR_DO_AUDIT) \
63851 + spin_unlock(&grsec_alert_lock); \
63852 + else \
63853 + spin_unlock(&grsec_audit_lock); \
63854 + read_unlock(&grsec_exec_file_lock); \
63855 + read_unlock(&tasklist_lock); \
63856 + rcu_read_unlock(); \
63857 + ENABLE_PREEMPT(); \
63858 + if (x == GR_DONT_AUDIT) \
63859 + gr_handle_alertkill(current)
63860 +
63861 +enum {
63862 + FLOODING,
63863 + NO_FLOODING
63864 +};
63865 +
63866 +extern char *gr_alert_log_fmt;
63867 +extern char *gr_audit_log_fmt;
63868 +extern char *gr_alert_log_buf;
63869 +extern char *gr_audit_log_buf;
63870 +
63871 +static int gr_log_start(int audit)
63872 +{
63873 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
63874 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
63875 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63876 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
63877 + unsigned long curr_secs = get_seconds();
63878 +
63879 + if (audit == GR_DO_AUDIT)
63880 + goto set_fmt;
63881 +
63882 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
63883 + grsec_alert_wtime = curr_secs;
63884 + grsec_alert_fyet = 0;
63885 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
63886 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
63887 + grsec_alert_fyet++;
63888 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
63889 + grsec_alert_wtime = curr_secs;
63890 + grsec_alert_fyet++;
63891 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
63892 + return FLOODING;
63893 + }
63894 + else return FLOODING;
63895 +
63896 +set_fmt:
63897 +#endif
63898 + memset(buf, 0, PAGE_SIZE);
63899 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
63900 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
63901 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63902 + } else if (current->signal->curr_ip) {
63903 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
63904 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
63905 + } else if (gr_acl_is_enabled()) {
63906 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
63907 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63908 + } else {
63909 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
63910 + strcpy(buf, fmt);
63911 + }
63912 +
63913 + return NO_FLOODING;
63914 +}
63915 +
63916 +static void gr_log_middle(int audit, const char *msg, va_list ap)
63917 + __attribute__ ((format (printf, 2, 0)));
63918 +
63919 +static void gr_log_middle(int audit, const char *msg, va_list ap)
63920 +{
63921 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63922 + unsigned int len = strlen(buf);
63923 +
63924 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63925 +
63926 + return;
63927 +}
63928 +
63929 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
63930 + __attribute__ ((format (printf, 2, 3)));
63931 +
63932 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
63933 +{
63934 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63935 + unsigned int len = strlen(buf);
63936 + va_list ap;
63937 +
63938 + va_start(ap, msg);
63939 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63940 + va_end(ap);
63941 +
63942 + return;
63943 +}
63944 +
63945 +static void gr_log_end(int audit, int append_default)
63946 +{
63947 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63948 +
63949 + if (append_default) {
63950 + unsigned int len = strlen(buf);
63951 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
63952 + }
63953 +
63954 + printk("%s\n", buf);
63955 +
63956 + return;
63957 +}
63958 +
63959 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
63960 +{
63961 + int logtype;
63962 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
63963 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
63964 + void *voidptr = NULL;
63965 + int num1 = 0, num2 = 0;
63966 + unsigned long ulong1 = 0, ulong2 = 0;
63967 + struct dentry *dentry = NULL;
63968 + struct vfsmount *mnt = NULL;
63969 + struct file *file = NULL;
63970 + struct task_struct *task = NULL;
63971 + const struct cred *cred, *pcred;
63972 + va_list ap;
63973 +
63974 + BEGIN_LOCKS(audit);
63975 + logtype = gr_log_start(audit);
63976 + if (logtype == FLOODING) {
63977 + END_LOCKS(audit);
63978 + return;
63979 + }
63980 + va_start(ap, argtypes);
63981 + switch (argtypes) {
63982 + case GR_TTYSNIFF:
63983 + task = va_arg(ap, struct task_struct *);
63984 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
63985 + break;
63986 + case GR_SYSCTL_HIDDEN:
63987 + str1 = va_arg(ap, char *);
63988 + gr_log_middle_varargs(audit, msg, result, str1);
63989 + break;
63990 + case GR_RBAC:
63991 + dentry = va_arg(ap, struct dentry *);
63992 + mnt = va_arg(ap, struct vfsmount *);
63993 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
63994 + break;
63995 + case GR_RBAC_STR:
63996 + dentry = va_arg(ap, struct dentry *);
63997 + mnt = va_arg(ap, struct vfsmount *);
63998 + str1 = va_arg(ap, char *);
63999 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
64000 + break;
64001 + case GR_STR_RBAC:
64002 + str1 = va_arg(ap, char *);
64003 + dentry = va_arg(ap, struct dentry *);
64004 + mnt = va_arg(ap, struct vfsmount *);
64005 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
64006 + break;
64007 + case GR_RBAC_MODE2:
64008 + dentry = va_arg(ap, struct dentry *);
64009 + mnt = va_arg(ap, struct vfsmount *);
64010 + str1 = va_arg(ap, char *);
64011 + str2 = va_arg(ap, char *);
64012 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
64013 + break;
64014 + case GR_RBAC_MODE3:
64015 + dentry = va_arg(ap, struct dentry *);
64016 + mnt = va_arg(ap, struct vfsmount *);
64017 + str1 = va_arg(ap, char *);
64018 + str2 = va_arg(ap, char *);
64019 + str3 = va_arg(ap, char *);
64020 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
64021 + break;
64022 + case GR_FILENAME:
64023 + dentry = va_arg(ap, struct dentry *);
64024 + mnt = va_arg(ap, struct vfsmount *);
64025 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
64026 + break;
64027 + case GR_STR_FILENAME:
64028 + str1 = va_arg(ap, char *);
64029 + dentry = va_arg(ap, struct dentry *);
64030 + mnt = va_arg(ap, struct vfsmount *);
64031 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
64032 + break;
64033 + case GR_FILENAME_STR:
64034 + dentry = va_arg(ap, struct dentry *);
64035 + mnt = va_arg(ap, struct vfsmount *);
64036 + str1 = va_arg(ap, char *);
64037 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
64038 + break;
64039 + case GR_FILENAME_TWO_INT:
64040 + dentry = va_arg(ap, struct dentry *);
64041 + mnt = va_arg(ap, struct vfsmount *);
64042 + num1 = va_arg(ap, int);
64043 + num2 = va_arg(ap, int);
64044 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
64045 + break;
64046 + case GR_FILENAME_TWO_INT_STR:
64047 + dentry = va_arg(ap, struct dentry *);
64048 + mnt = va_arg(ap, struct vfsmount *);
64049 + num1 = va_arg(ap, int);
64050 + num2 = va_arg(ap, int);
64051 + str1 = va_arg(ap, char *);
64052 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
64053 + break;
64054 + case GR_TEXTREL:
64055 + file = va_arg(ap, struct file *);
64056 + ulong1 = va_arg(ap, unsigned long);
64057 + ulong2 = va_arg(ap, unsigned long);
64058 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
64059 + break;
64060 + case GR_PTRACE:
64061 + task = va_arg(ap, struct task_struct *);
64062 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
64063 + break;
64064 + case GR_RESOURCE:
64065 + task = va_arg(ap, struct task_struct *);
64066 + cred = __task_cred(task);
64067 + pcred = __task_cred(task->real_parent);
64068 + ulong1 = va_arg(ap, unsigned long);
64069 + str1 = va_arg(ap, char *);
64070 + ulong2 = va_arg(ap, unsigned long);
64071 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
64072 + break;
64073 + case GR_CAP:
64074 + task = va_arg(ap, struct task_struct *);
64075 + cred = __task_cred(task);
64076 + pcred = __task_cred(task->real_parent);
64077 + str1 = va_arg(ap, char *);
64078 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
64079 + break;
64080 + case GR_SIG:
64081 + str1 = va_arg(ap, char *);
64082 + voidptr = va_arg(ap, void *);
64083 + gr_log_middle_varargs(audit, msg, str1, voidptr);
64084 + break;
64085 + case GR_SIG2:
64086 + task = va_arg(ap, struct task_struct *);
64087 + cred = __task_cred(task);
64088 + pcred = __task_cred(task->real_parent);
64089 + num1 = va_arg(ap, int);
64090 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
64091 + break;
64092 + case GR_CRASH1:
64093 + task = va_arg(ap, struct task_struct *);
64094 + cred = __task_cred(task);
64095 + pcred = __task_cred(task->real_parent);
64096 + ulong1 = va_arg(ap, unsigned long);
64097 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
64098 + break;
64099 + case GR_CRASH2:
64100 + task = va_arg(ap, struct task_struct *);
64101 + cred = __task_cred(task);
64102 + pcred = __task_cred(task->real_parent);
64103 + ulong1 = va_arg(ap, unsigned long);
64104 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
64105 + break;
64106 + case GR_RWXMAP:
64107 + file = va_arg(ap, struct file *);
64108 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
64109 + break;
64110 + case GR_PSACCT:
64111 + {
64112 + unsigned int wday, cday;
64113 + __u8 whr, chr;
64114 + __u8 wmin, cmin;
64115 + __u8 wsec, csec;
64116 + char cur_tty[64] = { 0 };
64117 + char parent_tty[64] = { 0 };
64118 +
64119 + task = va_arg(ap, struct task_struct *);
64120 + wday = va_arg(ap, unsigned int);
64121 + cday = va_arg(ap, unsigned int);
64122 + whr = va_arg(ap, int);
64123 + chr = va_arg(ap, int);
64124 + wmin = va_arg(ap, int);
64125 + cmin = va_arg(ap, int);
64126 + wsec = va_arg(ap, int);
64127 + csec = va_arg(ap, int);
64128 + ulong1 = va_arg(ap, unsigned long);
64129 + cred = __task_cred(task);
64130 + pcred = __task_cred(task->real_parent);
64131 +
64132 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
64133 + }
64134 + break;
64135 + default:
64136 + gr_log_middle(audit, msg, ap);
64137 + }
64138 + va_end(ap);
64139 + // these don't need DEFAULTSECARGS printed on the end
64140 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
64141 + gr_log_end(audit, 0);
64142 + else
64143 + gr_log_end(audit, 1);
64144 + END_LOCKS(audit);
64145 +}
64146 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
64147 new file mode 100644
64148 index 0000000..f536303
64149 --- /dev/null
64150 +++ b/grsecurity/grsec_mem.c
64151 @@ -0,0 +1,40 @@
64152 +#include <linux/kernel.h>
64153 +#include <linux/sched.h>
64154 +#include <linux/mm.h>
64155 +#include <linux/mman.h>
64156 +#include <linux/grinternal.h>
64157 +
64158 +void
64159 +gr_handle_ioperm(void)
64160 +{
64161 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
64162 + return;
64163 +}
64164 +
64165 +void
64166 +gr_handle_iopl(void)
64167 +{
64168 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
64169 + return;
64170 +}
64171 +
64172 +void
64173 +gr_handle_mem_readwrite(u64 from, u64 to)
64174 +{
64175 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
64176 + return;
64177 +}
64178 +
64179 +void
64180 +gr_handle_vm86(void)
64181 +{
64182 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
64183 + return;
64184 +}
64185 +
64186 +void
64187 +gr_log_badprocpid(const char *entry)
64188 +{
64189 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
64190 + return;
64191 +}
64192 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
64193 new file mode 100644
64194 index 0000000..2131422
64195 --- /dev/null
64196 +++ b/grsecurity/grsec_mount.c
64197 @@ -0,0 +1,62 @@
64198 +#include <linux/kernel.h>
64199 +#include <linux/sched.h>
64200 +#include <linux/mount.h>
64201 +#include <linux/grsecurity.h>
64202 +#include <linux/grinternal.h>
64203 +
64204 +void
64205 +gr_log_remount(const char *devname, const int retval)
64206 +{
64207 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64208 + if (grsec_enable_mount && (retval >= 0))
64209 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
64210 +#endif
64211 + return;
64212 +}
64213 +
64214 +void
64215 +gr_log_unmount(const char *devname, const int retval)
64216 +{
64217 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64218 + if (grsec_enable_mount && (retval >= 0))
64219 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
64220 +#endif
64221 + return;
64222 +}
64223 +
64224 +void
64225 +gr_log_mount(const char *from, const char *to, const int retval)
64226 +{
64227 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64228 + if (grsec_enable_mount && (retval >= 0))
64229 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
64230 +#endif
64231 + return;
64232 +}
64233 +
64234 +int
64235 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
64236 +{
64237 +#ifdef CONFIG_GRKERNSEC_ROFS
64238 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
64239 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
64240 + return -EPERM;
64241 + } else
64242 + return 0;
64243 +#endif
64244 + return 0;
64245 +}
64246 +
64247 +int
64248 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
64249 +{
64250 +#ifdef CONFIG_GRKERNSEC_ROFS
64251 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
64252 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
64253 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
64254 + return -EPERM;
64255 + } else
64256 + return 0;
64257 +#endif
64258 + return 0;
64259 +}
64260 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
64261 new file mode 100644
64262 index 0000000..a3b12a0
64263 --- /dev/null
64264 +++ b/grsecurity/grsec_pax.c
64265 @@ -0,0 +1,36 @@
64266 +#include <linux/kernel.h>
64267 +#include <linux/sched.h>
64268 +#include <linux/mm.h>
64269 +#include <linux/file.h>
64270 +#include <linux/grinternal.h>
64271 +#include <linux/grsecurity.h>
64272 +
64273 +void
64274 +gr_log_textrel(struct vm_area_struct * vma)
64275 +{
64276 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64277 + if (grsec_enable_audit_textrel)
64278 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
64279 +#endif
64280 + return;
64281 +}
64282 +
64283 +void
64284 +gr_log_rwxmmap(struct file *file)
64285 +{
64286 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64287 + if (grsec_enable_log_rwxmaps)
64288 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
64289 +#endif
64290 + return;
64291 +}
64292 +
64293 +void
64294 +gr_log_rwxmprotect(struct file *file)
64295 +{
64296 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64297 + if (grsec_enable_log_rwxmaps)
64298 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
64299 +#endif
64300 + return;
64301 +}
64302 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
64303 new file mode 100644
64304 index 0000000..78f8733
64305 --- /dev/null
64306 +++ b/grsecurity/grsec_ptrace.c
64307 @@ -0,0 +1,30 @@
64308 +#include <linux/kernel.h>
64309 +#include <linux/sched.h>
64310 +#include <linux/grinternal.h>
64311 +#include <linux/security.h>
64312 +
64313 +void
64314 +gr_audit_ptrace(struct task_struct *task)
64315 +{
64316 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64317 + if (grsec_enable_audit_ptrace)
64318 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
64319 +#endif
64320 + return;
64321 +}
64322 +
64323 +int
64324 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
64325 +{
64326 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64327 + const struct dentry *dentry = file->f_path.dentry;
64328 + const struct vfsmount *mnt = file->f_path.mnt;
64329 +
64330 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
64331 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
64332 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
64333 + return -EACCES;
64334 + }
64335 +#endif
64336 + return 0;
64337 +}
64338 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
64339 new file mode 100644
64340 index 0000000..c648492
64341 --- /dev/null
64342 +++ b/grsecurity/grsec_sig.c
64343 @@ -0,0 +1,206 @@
64344 +#include <linux/kernel.h>
64345 +#include <linux/sched.h>
64346 +#include <linux/delay.h>
64347 +#include <linux/grsecurity.h>
64348 +#include <linux/grinternal.h>
64349 +#include <linux/hardirq.h>
64350 +
64351 +char *signames[] = {
64352 + [SIGSEGV] = "Segmentation fault",
64353 + [SIGILL] = "Illegal instruction",
64354 + [SIGABRT] = "Abort",
64355 + [SIGBUS] = "Invalid alignment/Bus error"
64356 +};
64357 +
64358 +void
64359 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
64360 +{
64361 +#ifdef CONFIG_GRKERNSEC_SIGNAL
64362 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
64363 + (sig == SIGABRT) || (sig == SIGBUS))) {
64364 + if (t->pid == current->pid) {
64365 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
64366 + } else {
64367 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
64368 + }
64369 + }
64370 +#endif
64371 + return;
64372 +}
64373 +
64374 +int
64375 +gr_handle_signal(const struct task_struct *p, const int sig)
64376 +{
64377 +#ifdef CONFIG_GRKERNSEC
64378 + /* ignore the 0 signal for protected task checks */
64379 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
64380 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
64381 + return -EPERM;
64382 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
64383 + return -EPERM;
64384 + }
64385 +#endif
64386 + return 0;
64387 +}
64388 +
64389 +#ifdef CONFIG_GRKERNSEC
64390 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
64391 +
64392 +int gr_fake_force_sig(int sig, struct task_struct *t)
64393 +{
64394 + unsigned long int flags;
64395 + int ret, blocked, ignored;
64396 + struct k_sigaction *action;
64397 +
64398 + spin_lock_irqsave(&t->sighand->siglock, flags);
64399 + action = &t->sighand->action[sig-1];
64400 + ignored = action->sa.sa_handler == SIG_IGN;
64401 + blocked = sigismember(&t->blocked, sig);
64402 + if (blocked || ignored) {
64403 + action->sa.sa_handler = SIG_DFL;
64404 + if (blocked) {
64405 + sigdelset(&t->blocked, sig);
64406 + recalc_sigpending_and_wake(t);
64407 + }
64408 + }
64409 + if (action->sa.sa_handler == SIG_DFL)
64410 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
64411 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
64412 +
64413 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
64414 +
64415 + return ret;
64416 +}
64417 +#endif
64418 +
64419 +#ifdef CONFIG_GRKERNSEC_BRUTE
64420 +#define GR_USER_BAN_TIME (15 * 60)
64421 +
64422 +static int __get_dumpable(unsigned long mm_flags)
64423 +{
64424 + int ret;
64425 +
64426 + ret = mm_flags & MMF_DUMPABLE_MASK;
64427 + return (ret >= 2) ? 2 : ret;
64428 +}
64429 +#endif
64430 +
64431 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
64432 +{
64433 +#ifdef CONFIG_GRKERNSEC_BRUTE
64434 + uid_t uid = 0;
64435 +
64436 + if (!grsec_enable_brute)
64437 + return;
64438 +
64439 + rcu_read_lock();
64440 + read_lock(&tasklist_lock);
64441 + read_lock(&grsec_exec_file_lock);
64442 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
64443 + p->real_parent->brute = 1;
64444 + else {
64445 + const struct cred *cred = __task_cred(p), *cred2;
64446 + struct task_struct *tsk, *tsk2;
64447 +
64448 + if (!__get_dumpable(mm_flags) && cred->uid) {
64449 + struct user_struct *user;
64450 +
64451 + uid = cred->uid;
64452 +
64453 + /* this is put upon execution past expiration */
64454 + user = find_user(uid);
64455 + if (user == NULL)
64456 + goto unlock;
64457 + user->banned = 1;
64458 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
64459 + if (user->ban_expires == ~0UL)
64460 + user->ban_expires--;
64461 +
64462 + do_each_thread(tsk2, tsk) {
64463 + cred2 = __task_cred(tsk);
64464 + if (tsk != p && cred2->uid == uid)
64465 + gr_fake_force_sig(SIGKILL, tsk);
64466 + } while_each_thread(tsk2, tsk);
64467 + }
64468 + }
64469 +unlock:
64470 + read_unlock(&grsec_exec_file_lock);
64471 + read_unlock(&tasklist_lock);
64472 + rcu_read_unlock();
64473 +
64474 + if (uid)
64475 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
64476 +#endif
64477 + return;
64478 +}
64479 +
64480 +void gr_handle_brute_check(void)
64481 +{
64482 +#ifdef CONFIG_GRKERNSEC_BRUTE
64483 + if (current->brute)
64484 + msleep(30 * 1000);
64485 +#endif
64486 + return;
64487 +}
64488 +
64489 +void gr_handle_kernel_exploit(void)
64490 +{
64491 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
64492 + const struct cred *cred;
64493 + struct task_struct *tsk, *tsk2;
64494 + struct user_struct *user;
64495 + uid_t uid;
64496 +
64497 + if (in_irq() || in_serving_softirq() || in_nmi())
64498 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
64499 +
64500 + uid = current_uid();
64501 +
64502 + if (uid == 0)
64503 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
64504 + else {
64505 + /* kill all the processes of this user, hold a reference
64506 + to their creds struct, and prevent them from creating
64507 + another process until system reset
64508 + */
64509 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
64510 + /* we intentionally leak this ref */
64511 + user = get_uid(current->cred->user);
64512 + if (user) {
64513 + user->banned = 1;
64514 + user->ban_expires = ~0UL;
64515 + }
64516 +
64517 + read_lock(&tasklist_lock);
64518 + do_each_thread(tsk2, tsk) {
64519 + cred = __task_cred(tsk);
64520 + if (cred->uid == uid)
64521 + gr_fake_force_sig(SIGKILL, tsk);
64522 + } while_each_thread(tsk2, tsk);
64523 + read_unlock(&tasklist_lock);
64524 + }
64525 +#endif
64526 +}
64527 +
64528 +int __gr_process_user_ban(struct user_struct *user)
64529 +{
64530 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64531 + if (unlikely(user->banned)) {
64532 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
64533 + user->banned = 0;
64534 + user->ban_expires = 0;
64535 + free_uid(user);
64536 + } else
64537 + return -EPERM;
64538 + }
64539 +#endif
64540 + return 0;
64541 +}
64542 +
64543 +int gr_process_user_ban(void)
64544 +{
64545 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64546 + return __gr_process_user_ban(current->cred->user);
64547 +#endif
64548 + return 0;
64549 +}
64550 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
64551 new file mode 100644
64552 index 0000000..7512ea9
64553 --- /dev/null
64554 +++ b/grsecurity/grsec_sock.c
64555 @@ -0,0 +1,275 @@
64556 +#include <linux/kernel.h>
64557 +#include <linux/module.h>
64558 +#include <linux/sched.h>
64559 +#include <linux/file.h>
64560 +#include <linux/net.h>
64561 +#include <linux/in.h>
64562 +#include <linux/ip.h>
64563 +#include <net/sock.h>
64564 +#include <net/inet_sock.h>
64565 +#include <linux/grsecurity.h>
64566 +#include <linux/grinternal.h>
64567 +#include <linux/gracl.h>
64568 +
64569 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
64570 +EXPORT_SYMBOL(gr_cap_rtnetlink);
64571 +
64572 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
64573 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
64574 +
64575 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
64576 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
64577 +
64578 +#ifdef CONFIG_UNIX_MODULE
64579 +EXPORT_SYMBOL(gr_acl_handle_unix);
64580 +EXPORT_SYMBOL(gr_acl_handle_mknod);
64581 +EXPORT_SYMBOL(gr_handle_chroot_unix);
64582 +EXPORT_SYMBOL(gr_handle_create);
64583 +#endif
64584 +
64585 +#ifdef CONFIG_GRKERNSEC
64586 +#define gr_conn_table_size 32749
64587 +struct conn_table_entry {
64588 + struct conn_table_entry *next;
64589 + struct signal_struct *sig;
64590 +};
64591 +
64592 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
64593 +DEFINE_SPINLOCK(gr_conn_table_lock);
64594 +
64595 +extern const char * gr_socktype_to_name(unsigned char type);
64596 +extern const char * gr_proto_to_name(unsigned char proto);
64597 +extern const char * gr_sockfamily_to_name(unsigned char family);
64598 +
64599 +static __inline__ int
64600 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
64601 +{
64602 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
64603 +}
64604 +
64605 +static __inline__ int
64606 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
64607 + __u16 sport, __u16 dport)
64608 +{
64609 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
64610 + sig->gr_sport == sport && sig->gr_dport == dport))
64611 + return 1;
64612 + else
64613 + return 0;
64614 +}
64615 +
64616 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
64617 +{
64618 + struct conn_table_entry **match;
64619 + unsigned int index;
64620 +
64621 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64622 + sig->gr_sport, sig->gr_dport,
64623 + gr_conn_table_size);
64624 +
64625 + newent->sig = sig;
64626 +
64627 + match = &gr_conn_table[index];
64628 + newent->next = *match;
64629 + *match = newent;
64630 +
64631 + return;
64632 +}
64633 +
64634 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
64635 +{
64636 + struct conn_table_entry *match, *last = NULL;
64637 + unsigned int index;
64638 +
64639 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64640 + sig->gr_sport, sig->gr_dport,
64641 + gr_conn_table_size);
64642 +
64643 + match = gr_conn_table[index];
64644 + while (match && !conn_match(match->sig,
64645 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
64646 + sig->gr_dport)) {
64647 + last = match;
64648 + match = match->next;
64649 + }
64650 +
64651 + if (match) {
64652 + if (last)
64653 + last->next = match->next;
64654 + else
64655 + gr_conn_table[index] = NULL;
64656 + kfree(match);
64657 + }
64658 +
64659 + return;
64660 +}
64661 +
64662 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
64663 + __u16 sport, __u16 dport)
64664 +{
64665 + struct conn_table_entry *match;
64666 + unsigned int index;
64667 +
64668 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
64669 +
64670 + match = gr_conn_table[index];
64671 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
64672 + match = match->next;
64673 +
64674 + if (match)
64675 + return match->sig;
64676 + else
64677 + return NULL;
64678 +}
64679 +
64680 +#endif
64681 +
64682 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
64683 +{
64684 +#ifdef CONFIG_GRKERNSEC
64685 + struct signal_struct *sig = task->signal;
64686 + struct conn_table_entry *newent;
64687 +
64688 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
64689 + if (newent == NULL)
64690 + return;
64691 + /* no bh lock needed since we are called with bh disabled */
64692 + spin_lock(&gr_conn_table_lock);
64693 + gr_del_task_from_ip_table_nolock(sig);
64694 + sig->gr_saddr = inet->rcv_saddr;
64695 + sig->gr_daddr = inet->daddr;
64696 + sig->gr_sport = inet->sport;
64697 + sig->gr_dport = inet->dport;
64698 + gr_add_to_task_ip_table_nolock(sig, newent);
64699 + spin_unlock(&gr_conn_table_lock);
64700 +#endif
64701 + return;
64702 +}
64703 +
64704 +void gr_del_task_from_ip_table(struct task_struct *task)
64705 +{
64706 +#ifdef CONFIG_GRKERNSEC
64707 + spin_lock_bh(&gr_conn_table_lock);
64708 + gr_del_task_from_ip_table_nolock(task->signal);
64709 + spin_unlock_bh(&gr_conn_table_lock);
64710 +#endif
64711 + return;
64712 +}
64713 +
64714 +void
64715 +gr_attach_curr_ip(const struct sock *sk)
64716 +{
64717 +#ifdef CONFIG_GRKERNSEC
64718 + struct signal_struct *p, *set;
64719 + const struct inet_sock *inet = inet_sk(sk);
64720 +
64721 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
64722 + return;
64723 +
64724 + set = current->signal;
64725 +
64726 + spin_lock_bh(&gr_conn_table_lock);
64727 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
64728 + inet->dport, inet->sport);
64729 + if (unlikely(p != NULL)) {
64730 + set->curr_ip = p->curr_ip;
64731 + set->used_accept = 1;
64732 + gr_del_task_from_ip_table_nolock(p);
64733 + spin_unlock_bh(&gr_conn_table_lock);
64734 + return;
64735 + }
64736 + spin_unlock_bh(&gr_conn_table_lock);
64737 +
64738 + set->curr_ip = inet->daddr;
64739 + set->used_accept = 1;
64740 +#endif
64741 + return;
64742 +}
64743 +
64744 +int
64745 +gr_handle_sock_all(const int family, const int type, const int protocol)
64746 +{
64747 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64748 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
64749 + (family != AF_UNIX)) {
64750 + if (family == AF_INET)
64751 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
64752 + else
64753 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
64754 + return -EACCES;
64755 + }
64756 +#endif
64757 + return 0;
64758 +}
64759 +
64760 +int
64761 +gr_handle_sock_server(const struct sockaddr *sck)
64762 +{
64763 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64764 + if (grsec_enable_socket_server &&
64765 + in_group_p(grsec_socket_server_gid) &&
64766 + sck && (sck->sa_family != AF_UNIX) &&
64767 + (sck->sa_family != AF_LOCAL)) {
64768 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64769 + return -EACCES;
64770 + }
64771 +#endif
64772 + return 0;
64773 +}
64774 +
64775 +int
64776 +gr_handle_sock_server_other(const struct sock *sck)
64777 +{
64778 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64779 + if (grsec_enable_socket_server &&
64780 + in_group_p(grsec_socket_server_gid) &&
64781 + sck && (sck->sk_family != AF_UNIX) &&
64782 + (sck->sk_family != AF_LOCAL)) {
64783 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64784 + return -EACCES;
64785 + }
64786 +#endif
64787 + return 0;
64788 +}
64789 +
64790 +int
64791 +gr_handle_sock_client(const struct sockaddr *sck)
64792 +{
64793 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64794 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
64795 + sck && (sck->sa_family != AF_UNIX) &&
64796 + (sck->sa_family != AF_LOCAL)) {
64797 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
64798 + return -EACCES;
64799 + }
64800 +#endif
64801 + return 0;
64802 +}
64803 +
64804 +kernel_cap_t
64805 +gr_cap_rtnetlink(struct sock *sock)
64806 +{
64807 +#ifdef CONFIG_GRKERNSEC
64808 + if (!gr_acl_is_enabled())
64809 + return current_cap();
64810 + else if (sock->sk_protocol == NETLINK_ISCSI &&
64811 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
64812 + gr_is_capable(CAP_SYS_ADMIN))
64813 + return current_cap();
64814 + else if (sock->sk_protocol == NETLINK_AUDIT &&
64815 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
64816 + gr_is_capable(CAP_AUDIT_WRITE) &&
64817 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
64818 + gr_is_capable(CAP_AUDIT_CONTROL))
64819 + return current_cap();
64820 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
64821 + ((sock->sk_protocol == NETLINK_ROUTE) ?
64822 + gr_is_capable_nolog(CAP_NET_ADMIN) :
64823 + gr_is_capable(CAP_NET_ADMIN)))
64824 + return current_cap();
64825 + else
64826 + return __cap_empty_set;
64827 +#else
64828 + return current_cap();
64829 +#endif
64830 +}
64831 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
64832 new file mode 100644
64833 index 0000000..31f3258
64834 --- /dev/null
64835 +++ b/grsecurity/grsec_sysctl.c
64836 @@ -0,0 +1,499 @@
64837 +#include <linux/kernel.h>
64838 +#include <linux/sched.h>
64839 +#include <linux/sysctl.h>
64840 +#include <linux/grsecurity.h>
64841 +#include <linux/grinternal.h>
64842 +
64843 +int
64844 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
64845 +{
64846 +#ifdef CONFIG_GRKERNSEC_SYSCTL
64847 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
64848 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
64849 + return -EACCES;
64850 + }
64851 +#endif
64852 + return 0;
64853 +}
64854 +
64855 +#ifdef CONFIG_GRKERNSEC_ROFS
64856 +static int __maybe_unused one = 1;
64857 +#endif
64858 +
64859 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
64860 +ctl_table grsecurity_table[] = {
64861 +#ifdef CONFIG_GRKERNSEC_SYSCTL
64862 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
64863 +#ifdef CONFIG_GRKERNSEC_IO
64864 + {
64865 + .ctl_name = CTL_UNNUMBERED,
64866 + .procname = "disable_priv_io",
64867 + .data = &grsec_disable_privio,
64868 + .maxlen = sizeof(int),
64869 + .mode = 0600,
64870 + .proc_handler = &proc_dointvec,
64871 + },
64872 +#endif
64873 +#endif
64874 +#ifdef CONFIG_GRKERNSEC_LINK
64875 + {
64876 + .ctl_name = CTL_UNNUMBERED,
64877 + .procname = "linking_restrictions",
64878 + .data = &grsec_enable_link,
64879 + .maxlen = sizeof(int),
64880 + .mode = 0600,
64881 + .proc_handler = &proc_dointvec,
64882 + },
64883 +#endif
64884 +#ifdef CONFIG_GRKERNSEC_BRUTE
64885 + {
64886 + .ctl_name = CTL_UNNUMBERED,
64887 + .procname = "deter_bruteforce",
64888 + .data = &grsec_enable_brute,
64889 + .maxlen = sizeof(int),
64890 + .mode = 0600,
64891 + .proc_handler = &proc_dointvec,
64892 + },
64893 +#endif
64894 +#ifdef CONFIG_GRKERNSEC_FIFO
64895 + {
64896 + .ctl_name = CTL_UNNUMBERED,
64897 + .procname = "fifo_restrictions",
64898 + .data = &grsec_enable_fifo,
64899 + .maxlen = sizeof(int),
64900 + .mode = 0600,
64901 + .proc_handler = &proc_dointvec,
64902 + },
64903 +#endif
64904 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64905 + {
64906 + .ctl_name = CTL_UNNUMBERED,
64907 + .procname = "ptrace_readexec",
64908 + .data = &grsec_enable_ptrace_readexec,
64909 + .maxlen = sizeof(int),
64910 + .mode = 0600,
64911 + .proc_handler = &proc_dointvec,
64912 + },
64913 +#endif
64914 +#ifdef CONFIG_GRKERNSEC_SETXID
64915 + {
64916 + .ctl_name = CTL_UNNUMBERED,
64917 + .procname = "consistent_setxid",
64918 + .data = &grsec_enable_setxid,
64919 + .maxlen = sizeof(int),
64920 + .mode = 0600,
64921 + .proc_handler = &proc_dointvec,
64922 + },
64923 +#endif
64924 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
64925 + {
64926 + .ctl_name = CTL_UNNUMBERED,
64927 + .procname = "ip_blackhole",
64928 + .data = &grsec_enable_blackhole,
64929 + .maxlen = sizeof(int),
64930 + .mode = 0600,
64931 + .proc_handler = &proc_dointvec,
64932 + },
64933 + {
64934 + .ctl_name = CTL_UNNUMBERED,
64935 + .procname = "lastack_retries",
64936 + .data = &grsec_lastack_retries,
64937 + .maxlen = sizeof(int),
64938 + .mode = 0600,
64939 + .proc_handler = &proc_dointvec,
64940 + },
64941 +#endif
64942 +#ifdef CONFIG_GRKERNSEC_EXECLOG
64943 + {
64944 + .ctl_name = CTL_UNNUMBERED,
64945 + .procname = "exec_logging",
64946 + .data = &grsec_enable_execlog,
64947 + .maxlen = sizeof(int),
64948 + .mode = 0600,
64949 + .proc_handler = &proc_dointvec,
64950 + },
64951 +#endif
64952 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64953 + {
64954 + .ctl_name = CTL_UNNUMBERED,
64955 + .procname = "rwxmap_logging",
64956 + .data = &grsec_enable_log_rwxmaps,
64957 + .maxlen = sizeof(int),
64958 + .mode = 0600,
64959 + .proc_handler = &proc_dointvec,
64960 + },
64961 +#endif
64962 +#ifdef CONFIG_GRKERNSEC_SIGNAL
64963 + {
64964 + .ctl_name = CTL_UNNUMBERED,
64965 + .procname = "signal_logging",
64966 + .data = &grsec_enable_signal,
64967 + .maxlen = sizeof(int),
64968 + .mode = 0600,
64969 + .proc_handler = &proc_dointvec,
64970 + },
64971 +#endif
64972 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
64973 + {
64974 + .ctl_name = CTL_UNNUMBERED,
64975 + .procname = "forkfail_logging",
64976 + .data = &grsec_enable_forkfail,
64977 + .maxlen = sizeof(int),
64978 + .mode = 0600,
64979 + .proc_handler = &proc_dointvec,
64980 + },
64981 +#endif
64982 +#ifdef CONFIG_GRKERNSEC_TIME
64983 + {
64984 + .ctl_name = CTL_UNNUMBERED,
64985 + .procname = "timechange_logging",
64986 + .data = &grsec_enable_time,
64987 + .maxlen = sizeof(int),
64988 + .mode = 0600,
64989 + .proc_handler = &proc_dointvec,
64990 + },
64991 +#endif
64992 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
64993 + {
64994 + .ctl_name = CTL_UNNUMBERED,
64995 + .procname = "chroot_deny_shmat",
64996 + .data = &grsec_enable_chroot_shmat,
64997 + .maxlen = sizeof(int),
64998 + .mode = 0600,
64999 + .proc_handler = &proc_dointvec,
65000 + },
65001 +#endif
65002 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
65003 + {
65004 + .ctl_name = CTL_UNNUMBERED,
65005 + .procname = "chroot_deny_unix",
65006 + .data = &grsec_enable_chroot_unix,
65007 + .maxlen = sizeof(int),
65008 + .mode = 0600,
65009 + .proc_handler = &proc_dointvec,
65010 + },
65011 +#endif
65012 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
65013 + {
65014 + .ctl_name = CTL_UNNUMBERED,
65015 + .procname = "chroot_deny_mount",
65016 + .data = &grsec_enable_chroot_mount,
65017 + .maxlen = sizeof(int),
65018 + .mode = 0600,
65019 + .proc_handler = &proc_dointvec,
65020 + },
65021 +#endif
65022 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
65023 + {
65024 + .ctl_name = CTL_UNNUMBERED,
65025 + .procname = "chroot_deny_fchdir",
65026 + .data = &grsec_enable_chroot_fchdir,
65027 + .maxlen = sizeof(int),
65028 + .mode = 0600,
65029 + .proc_handler = &proc_dointvec,
65030 + },
65031 +#endif
65032 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
65033 + {
65034 + .ctl_name = CTL_UNNUMBERED,
65035 + .procname = "chroot_deny_chroot",
65036 + .data = &grsec_enable_chroot_double,
65037 + .maxlen = sizeof(int),
65038 + .mode = 0600,
65039 + .proc_handler = &proc_dointvec,
65040 + },
65041 +#endif
65042 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
65043 + {
65044 + .ctl_name = CTL_UNNUMBERED,
65045 + .procname = "chroot_deny_pivot",
65046 + .data = &grsec_enable_chroot_pivot,
65047 + .maxlen = sizeof(int),
65048 + .mode = 0600,
65049 + .proc_handler = &proc_dointvec,
65050 + },
65051 +#endif
65052 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
65053 + {
65054 + .ctl_name = CTL_UNNUMBERED,
65055 + .procname = "chroot_enforce_chdir",
65056 + .data = &grsec_enable_chroot_chdir,
65057 + .maxlen = sizeof(int),
65058 + .mode = 0600,
65059 + .proc_handler = &proc_dointvec,
65060 + },
65061 +#endif
65062 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
65063 + {
65064 + .ctl_name = CTL_UNNUMBERED,
65065 + .procname = "chroot_deny_chmod",
65066 + .data = &grsec_enable_chroot_chmod,
65067 + .maxlen = sizeof(int),
65068 + .mode = 0600,
65069 + .proc_handler = &proc_dointvec,
65070 + },
65071 +#endif
65072 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
65073 + {
65074 + .ctl_name = CTL_UNNUMBERED,
65075 + .procname = "chroot_deny_mknod",
65076 + .data = &grsec_enable_chroot_mknod,
65077 + .maxlen = sizeof(int),
65078 + .mode = 0600,
65079 + .proc_handler = &proc_dointvec,
65080 + },
65081 +#endif
65082 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
65083 + {
65084 + .ctl_name = CTL_UNNUMBERED,
65085 + .procname = "chroot_restrict_nice",
65086 + .data = &grsec_enable_chroot_nice,
65087 + .maxlen = sizeof(int),
65088 + .mode = 0600,
65089 + .proc_handler = &proc_dointvec,
65090 + },
65091 +#endif
65092 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
65093 + {
65094 + .ctl_name = CTL_UNNUMBERED,
65095 + .procname = "chroot_execlog",
65096 + .data = &grsec_enable_chroot_execlog,
65097 + .maxlen = sizeof(int),
65098 + .mode = 0600,
65099 + .proc_handler = &proc_dointvec,
65100 + },
65101 +#endif
65102 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
65103 + {
65104 + .ctl_name = CTL_UNNUMBERED,
65105 + .procname = "chroot_caps",
65106 + .data = &grsec_enable_chroot_caps,
65107 + .maxlen = sizeof(int),
65108 + .mode = 0600,
65109 + .proc_handler = &proc_dointvec,
65110 + },
65111 +#endif
65112 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
65113 + {
65114 + .ctl_name = CTL_UNNUMBERED,
65115 + .procname = "chroot_deny_sysctl",
65116 + .data = &grsec_enable_chroot_sysctl,
65117 + .maxlen = sizeof(int),
65118 + .mode = 0600,
65119 + .proc_handler = &proc_dointvec,
65120 + },
65121 +#endif
65122 +#ifdef CONFIG_GRKERNSEC_TPE
65123 + {
65124 + .ctl_name = CTL_UNNUMBERED,
65125 + .procname = "tpe",
65126 + .data = &grsec_enable_tpe,
65127 + .maxlen = sizeof(int),
65128 + .mode = 0600,
65129 + .proc_handler = &proc_dointvec,
65130 + },
65131 + {
65132 + .ctl_name = CTL_UNNUMBERED,
65133 + .procname = "tpe_gid",
65134 + .data = &grsec_tpe_gid,
65135 + .maxlen = sizeof(int),
65136 + .mode = 0600,
65137 + .proc_handler = &proc_dointvec,
65138 + },
65139 +#endif
65140 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65141 + {
65142 + .ctl_name = CTL_UNNUMBERED,
65143 + .procname = "tpe_invert",
65144 + .data = &grsec_enable_tpe_invert,
65145 + .maxlen = sizeof(int),
65146 + .mode = 0600,
65147 + .proc_handler = &proc_dointvec,
65148 + },
65149 +#endif
65150 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
65151 + {
65152 + .ctl_name = CTL_UNNUMBERED,
65153 + .procname = "tpe_restrict_all",
65154 + .data = &grsec_enable_tpe_all,
65155 + .maxlen = sizeof(int),
65156 + .mode = 0600,
65157 + .proc_handler = &proc_dointvec,
65158 + },
65159 +#endif
65160 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65161 + {
65162 + .ctl_name = CTL_UNNUMBERED,
65163 + .procname = "socket_all",
65164 + .data = &grsec_enable_socket_all,
65165 + .maxlen = sizeof(int),
65166 + .mode = 0600,
65167 + .proc_handler = &proc_dointvec,
65168 + },
65169 + {
65170 + .ctl_name = CTL_UNNUMBERED,
65171 + .procname = "socket_all_gid",
65172 + .data = &grsec_socket_all_gid,
65173 + .maxlen = sizeof(int),
65174 + .mode = 0600,
65175 + .proc_handler = &proc_dointvec,
65176 + },
65177 +#endif
65178 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65179 + {
65180 + .ctl_name = CTL_UNNUMBERED,
65181 + .procname = "socket_client",
65182 + .data = &grsec_enable_socket_client,
65183 + .maxlen = sizeof(int),
65184 + .mode = 0600,
65185 + .proc_handler = &proc_dointvec,
65186 + },
65187 + {
65188 + .ctl_name = CTL_UNNUMBERED,
65189 + .procname = "socket_client_gid",
65190 + .data = &grsec_socket_client_gid,
65191 + .maxlen = sizeof(int),
65192 + .mode = 0600,
65193 + .proc_handler = &proc_dointvec,
65194 + },
65195 +#endif
65196 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65197 + {
65198 + .ctl_name = CTL_UNNUMBERED,
65199 + .procname = "socket_server",
65200 + .data = &grsec_enable_socket_server,
65201 + .maxlen = sizeof(int),
65202 + .mode = 0600,
65203 + .proc_handler = &proc_dointvec,
65204 + },
65205 + {
65206 + .ctl_name = CTL_UNNUMBERED,
65207 + .procname = "socket_server_gid",
65208 + .data = &grsec_socket_server_gid,
65209 + .maxlen = sizeof(int),
65210 + .mode = 0600,
65211 + .proc_handler = &proc_dointvec,
65212 + },
65213 +#endif
65214 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
65215 + {
65216 + .ctl_name = CTL_UNNUMBERED,
65217 + .procname = "audit_group",
65218 + .data = &grsec_enable_group,
65219 + .maxlen = sizeof(int),
65220 + .mode = 0600,
65221 + .proc_handler = &proc_dointvec,
65222 + },
65223 + {
65224 + .ctl_name = CTL_UNNUMBERED,
65225 + .procname = "audit_gid",
65226 + .data = &grsec_audit_gid,
65227 + .maxlen = sizeof(int),
65228 + .mode = 0600,
65229 + .proc_handler = &proc_dointvec,
65230 + },
65231 +#endif
65232 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
65233 + {
65234 + .ctl_name = CTL_UNNUMBERED,
65235 + .procname = "audit_chdir",
65236 + .data = &grsec_enable_chdir,
65237 + .maxlen = sizeof(int),
65238 + .mode = 0600,
65239 + .proc_handler = &proc_dointvec,
65240 + },
65241 +#endif
65242 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65243 + {
65244 + .ctl_name = CTL_UNNUMBERED,
65245 + .procname = "audit_mount",
65246 + .data = &grsec_enable_mount,
65247 + .maxlen = sizeof(int),
65248 + .mode = 0600,
65249 + .proc_handler = &proc_dointvec,
65250 + },
65251 +#endif
65252 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65253 + {
65254 + .ctl_name = CTL_UNNUMBERED,
65255 + .procname = "audit_textrel",
65256 + .data = &grsec_enable_audit_textrel,
65257 + .maxlen = sizeof(int),
65258 + .mode = 0600,
65259 + .proc_handler = &proc_dointvec,
65260 + },
65261 +#endif
65262 +#ifdef CONFIG_GRKERNSEC_DMESG
65263 + {
65264 + .ctl_name = CTL_UNNUMBERED,
65265 + .procname = "dmesg",
65266 + .data = &grsec_enable_dmesg,
65267 + .maxlen = sizeof(int),
65268 + .mode = 0600,
65269 + .proc_handler = &proc_dointvec,
65270 + },
65271 +#endif
65272 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65273 + {
65274 + .ctl_name = CTL_UNNUMBERED,
65275 + .procname = "chroot_findtask",
65276 + .data = &grsec_enable_chroot_findtask,
65277 + .maxlen = sizeof(int),
65278 + .mode = 0600,
65279 + .proc_handler = &proc_dointvec,
65280 + },
65281 +#endif
65282 +#ifdef CONFIG_GRKERNSEC_RESLOG
65283 + {
65284 + .ctl_name = CTL_UNNUMBERED,
65285 + .procname = "resource_logging",
65286 + .data = &grsec_resource_logging,
65287 + .maxlen = sizeof(int),
65288 + .mode = 0600,
65289 + .proc_handler = &proc_dointvec,
65290 + },
65291 +#endif
65292 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65293 + {
65294 + .ctl_name = CTL_UNNUMBERED,
65295 + .procname = "audit_ptrace",
65296 + .data = &grsec_enable_audit_ptrace,
65297 + .maxlen = sizeof(int),
65298 + .mode = 0600,
65299 + .proc_handler = &proc_dointvec,
65300 + },
65301 +#endif
65302 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65303 + {
65304 + .ctl_name = CTL_UNNUMBERED,
65305 + .procname = "harden_ptrace",
65306 + .data = &grsec_enable_harden_ptrace,
65307 + .maxlen = sizeof(int),
65308 + .mode = 0600,
65309 + .proc_handler = &proc_dointvec,
65310 + },
65311 +#endif
65312 + {
65313 + .ctl_name = CTL_UNNUMBERED,
65314 + .procname = "grsec_lock",
65315 + .data = &grsec_lock,
65316 + .maxlen = sizeof(int),
65317 + .mode = 0600,
65318 + .proc_handler = &proc_dointvec,
65319 + },
65320 +#endif
65321 +#ifdef CONFIG_GRKERNSEC_ROFS
65322 + {
65323 + .ctl_name = CTL_UNNUMBERED,
65324 + .procname = "romount_protect",
65325 + .data = &grsec_enable_rofs,
65326 + .maxlen = sizeof(int),
65327 + .mode = 0600,
65328 + .proc_handler = &proc_dointvec_minmax,
65329 + .extra1 = &one,
65330 + .extra2 = &one,
65331 + },
65332 +#endif
65333 + { .ctl_name = 0 }
65334 +};
65335 +#endif
65336 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
65337 new file mode 100644
65338 index 0000000..0dc13c3
65339 --- /dev/null
65340 +++ b/grsecurity/grsec_time.c
65341 @@ -0,0 +1,16 @@
65342 +#include <linux/kernel.h>
65343 +#include <linux/sched.h>
65344 +#include <linux/grinternal.h>
65345 +#include <linux/module.h>
65346 +
65347 +void
65348 +gr_log_timechange(void)
65349 +{
65350 +#ifdef CONFIG_GRKERNSEC_TIME
65351 + if (grsec_enable_time)
65352 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
65353 +#endif
65354 + return;
65355 +}
65356 +
65357 +EXPORT_SYMBOL(gr_log_timechange);
65358 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
65359 new file mode 100644
65360 index 0000000..07e0dc0
65361 --- /dev/null
65362 +++ b/grsecurity/grsec_tpe.c
65363 @@ -0,0 +1,73 @@
65364 +#include <linux/kernel.h>
65365 +#include <linux/sched.h>
65366 +#include <linux/file.h>
65367 +#include <linux/fs.h>
65368 +#include <linux/grinternal.h>
65369 +
65370 +extern int gr_acl_tpe_check(void);
65371 +
65372 +int
65373 +gr_tpe_allow(const struct file *file)
65374 +{
65375 +#ifdef CONFIG_GRKERNSEC
65376 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
65377 + const struct cred *cred = current_cred();
65378 + char *msg = NULL;
65379 + char *msg2 = NULL;
65380 +
65381 + // never restrict root
65382 + if (!cred->uid)
65383 + return 1;
65384 +
65385 + if (grsec_enable_tpe) {
65386 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65387 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
65388 + msg = "not being in trusted group";
65389 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
65390 + msg = "being in untrusted group";
65391 +#else
65392 + if (in_group_p(grsec_tpe_gid))
65393 + msg = "being in untrusted group";
65394 +#endif
65395 + }
65396 + if (!msg && gr_acl_tpe_check())
65397 + msg = "being in untrusted role";
65398 +
65399 + // not in any affected group/role
65400 + if (!msg)
65401 + goto next_check;
65402 +
65403 + if (inode->i_uid)
65404 + msg2 = "file in non-root-owned directory";
65405 + else if (inode->i_mode & S_IWOTH)
65406 + msg2 = "file in world-writable directory";
65407 + else if (inode->i_mode & S_IWGRP)
65408 + msg2 = "file in group-writable directory";
65409 +
65410 + if (msg && msg2) {
65411 + char fullmsg[70] = {0};
65412 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
65413 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
65414 + return 0;
65415 + }
65416 + msg = NULL;
65417 +next_check:
65418 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
65419 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
65420 + return 1;
65421 +
65422 + if (inode->i_uid && (inode->i_uid != cred->uid))
65423 + msg = "directory not owned by user";
65424 + else if (inode->i_mode & S_IWOTH)
65425 + msg = "file in world-writable directory";
65426 + else if (inode->i_mode & S_IWGRP)
65427 + msg = "file in group-writable directory";
65428 +
65429 + if (msg) {
65430 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
65431 + return 0;
65432 + }
65433 +#endif
65434 +#endif
65435 + return 1;
65436 +}
65437 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
65438 new file mode 100644
65439 index 0000000..9f7b1ac
65440 --- /dev/null
65441 +++ b/grsecurity/grsum.c
65442 @@ -0,0 +1,61 @@
65443 +#include <linux/err.h>
65444 +#include <linux/kernel.h>
65445 +#include <linux/sched.h>
65446 +#include <linux/mm.h>
65447 +#include <linux/scatterlist.h>
65448 +#include <linux/crypto.h>
65449 +#include <linux/gracl.h>
65450 +
65451 +
65452 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
65453 +#error "crypto and sha256 must be built into the kernel"
65454 +#endif
65455 +
65456 +int
65457 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
65458 +{
65459 + char *p;
65460 + struct crypto_hash *tfm;
65461 + struct hash_desc desc;
65462 + struct scatterlist sg;
65463 + unsigned char temp_sum[GR_SHA_LEN];
65464 + volatile int retval = 0;
65465 + volatile int dummy = 0;
65466 + unsigned int i;
65467 +
65468 + sg_init_table(&sg, 1);
65469 +
65470 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
65471 + if (IS_ERR(tfm)) {
65472 + /* should never happen, since sha256 should be built in */
65473 + return 1;
65474 + }
65475 +
65476 + desc.tfm = tfm;
65477 + desc.flags = 0;
65478 +
65479 + crypto_hash_init(&desc);
65480 +
65481 + p = salt;
65482 + sg_set_buf(&sg, p, GR_SALT_LEN);
65483 + crypto_hash_update(&desc, &sg, sg.length);
65484 +
65485 + p = entry->pw;
65486 + sg_set_buf(&sg, p, strlen(p));
65487 +
65488 + crypto_hash_update(&desc, &sg, sg.length);
65489 +
65490 + crypto_hash_final(&desc, temp_sum);
65491 +
65492 + memset(entry->pw, 0, GR_PW_LEN);
65493 +
65494 + for (i = 0; i < GR_SHA_LEN; i++)
65495 + if (sum[i] != temp_sum[i])
65496 + retval = 1;
65497 + else
65498 + dummy = 1; // waste a cycle
65499 +
65500 + crypto_free_hash(tfm);
65501 +
65502 + return retval;
65503 +}
65504 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
65505 index 3cd9ccd..fe16d47 100644
65506 --- a/include/acpi/acpi_bus.h
65507 +++ b/include/acpi/acpi_bus.h
65508 @@ -107,7 +107,7 @@ struct acpi_device_ops {
65509 acpi_op_bind bind;
65510 acpi_op_unbind unbind;
65511 acpi_op_notify notify;
65512 -};
65513 +} __no_const;
65514
65515 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
65516
65517 diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
65518 index f4906f6..71feb73 100644
65519 --- a/include/acpi/acpi_drivers.h
65520 +++ b/include/acpi/acpi_drivers.h
65521 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
65522 Dock Station
65523 -------------------------------------------------------------------------- */
65524 struct acpi_dock_ops {
65525 - acpi_notify_handler handler;
65526 - acpi_notify_handler uevent;
65527 + const acpi_notify_handler handler;
65528 + const acpi_notify_handler uevent;
65529 };
65530
65531 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
65532 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
65533 extern int register_dock_notifier(struct notifier_block *nb);
65534 extern void unregister_dock_notifier(struct notifier_block *nb);
65535 extern int register_hotplug_dock_device(acpi_handle handle,
65536 - struct acpi_dock_ops *ops,
65537 + const struct acpi_dock_ops *ops,
65538 void *context);
65539 extern void unregister_hotplug_dock_device(acpi_handle handle);
65540 #else
65541 @@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
65542 {
65543 }
65544 static inline int register_hotplug_dock_device(acpi_handle handle,
65545 - struct acpi_dock_ops *ops,
65546 + const struct acpi_dock_ops *ops,
65547 void *context)
65548 {
65549 return -ENODEV;
65550 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
65551 index b7babf0..a9ac9fc 100644
65552 --- a/include/asm-generic/atomic-long.h
65553 +++ b/include/asm-generic/atomic-long.h
65554 @@ -22,6 +22,12 @@
65555
65556 typedef atomic64_t atomic_long_t;
65557
65558 +#ifdef CONFIG_PAX_REFCOUNT
65559 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
65560 +#else
65561 +typedef atomic64_t atomic_long_unchecked_t;
65562 +#endif
65563 +
65564 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
65565
65566 static inline long atomic_long_read(atomic_long_t *l)
65567 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65568 return (long)atomic64_read(v);
65569 }
65570
65571 +#ifdef CONFIG_PAX_REFCOUNT
65572 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65573 +{
65574 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65575 +
65576 + return (long)atomic64_read_unchecked(v);
65577 +}
65578 +#endif
65579 +
65580 static inline void atomic_long_set(atomic_long_t *l, long i)
65581 {
65582 atomic64_t *v = (atomic64_t *)l;
65583 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65584 atomic64_set(v, i);
65585 }
65586
65587 +#ifdef CONFIG_PAX_REFCOUNT
65588 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65589 +{
65590 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65591 +
65592 + atomic64_set_unchecked(v, i);
65593 +}
65594 +#endif
65595 +
65596 static inline void atomic_long_inc(atomic_long_t *l)
65597 {
65598 atomic64_t *v = (atomic64_t *)l;
65599 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65600 atomic64_inc(v);
65601 }
65602
65603 +#ifdef CONFIG_PAX_REFCOUNT
65604 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65605 +{
65606 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65607 +
65608 + atomic64_inc_unchecked(v);
65609 +}
65610 +#endif
65611 +
65612 static inline void atomic_long_dec(atomic_long_t *l)
65613 {
65614 atomic64_t *v = (atomic64_t *)l;
65615 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65616 atomic64_dec(v);
65617 }
65618
65619 +#ifdef CONFIG_PAX_REFCOUNT
65620 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65621 +{
65622 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65623 +
65624 + atomic64_dec_unchecked(v);
65625 +}
65626 +#endif
65627 +
65628 static inline void atomic_long_add(long i, atomic_long_t *l)
65629 {
65630 atomic64_t *v = (atomic64_t *)l;
65631 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65632 atomic64_add(i, v);
65633 }
65634
65635 +#ifdef CONFIG_PAX_REFCOUNT
65636 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65637 +{
65638 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65639 +
65640 + atomic64_add_unchecked(i, v);
65641 +}
65642 +#endif
65643 +
65644 static inline void atomic_long_sub(long i, atomic_long_t *l)
65645 {
65646 atomic64_t *v = (atomic64_t *)l;
65647 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65648 return (long)atomic64_inc_return(v);
65649 }
65650
65651 +#ifdef CONFIG_PAX_REFCOUNT
65652 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65653 +{
65654 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65655 +
65656 + return (long)atomic64_inc_return_unchecked(v);
65657 +}
65658 +#endif
65659 +
65660 static inline long atomic_long_dec_return(atomic_long_t *l)
65661 {
65662 atomic64_t *v = (atomic64_t *)l;
65663 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65664
65665 typedef atomic_t atomic_long_t;
65666
65667 +#ifdef CONFIG_PAX_REFCOUNT
65668 +typedef atomic_unchecked_t atomic_long_unchecked_t;
65669 +#else
65670 +typedef atomic_t atomic_long_unchecked_t;
65671 +#endif
65672 +
65673 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
65674 static inline long atomic_long_read(atomic_long_t *l)
65675 {
65676 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65677 return (long)atomic_read(v);
65678 }
65679
65680 +#ifdef CONFIG_PAX_REFCOUNT
65681 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65682 +{
65683 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65684 +
65685 + return (long)atomic_read_unchecked(v);
65686 +}
65687 +#endif
65688 +
65689 static inline void atomic_long_set(atomic_long_t *l, long i)
65690 {
65691 atomic_t *v = (atomic_t *)l;
65692 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65693 atomic_set(v, i);
65694 }
65695
65696 +#ifdef CONFIG_PAX_REFCOUNT
65697 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65698 +{
65699 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65700 +
65701 + atomic_set_unchecked(v, i);
65702 +}
65703 +#endif
65704 +
65705 static inline void atomic_long_inc(atomic_long_t *l)
65706 {
65707 atomic_t *v = (atomic_t *)l;
65708 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65709 atomic_inc(v);
65710 }
65711
65712 +#ifdef CONFIG_PAX_REFCOUNT
65713 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65714 +{
65715 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65716 +
65717 + atomic_inc_unchecked(v);
65718 +}
65719 +#endif
65720 +
65721 static inline void atomic_long_dec(atomic_long_t *l)
65722 {
65723 atomic_t *v = (atomic_t *)l;
65724 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65725 atomic_dec(v);
65726 }
65727
65728 +#ifdef CONFIG_PAX_REFCOUNT
65729 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65730 +{
65731 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65732 +
65733 + atomic_dec_unchecked(v);
65734 +}
65735 +#endif
65736 +
65737 static inline void atomic_long_add(long i, atomic_long_t *l)
65738 {
65739 atomic_t *v = (atomic_t *)l;
65740 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65741 atomic_add(i, v);
65742 }
65743
65744 +#ifdef CONFIG_PAX_REFCOUNT
65745 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65746 +{
65747 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65748 +
65749 + atomic_add_unchecked(i, v);
65750 +}
65751 +#endif
65752 +
65753 static inline void atomic_long_sub(long i, atomic_long_t *l)
65754 {
65755 atomic_t *v = (atomic_t *)l;
65756 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65757 return (long)atomic_inc_return(v);
65758 }
65759
65760 +#ifdef CONFIG_PAX_REFCOUNT
65761 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65762 +{
65763 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65764 +
65765 + return (long)atomic_inc_return_unchecked(v);
65766 +}
65767 +#endif
65768 +
65769 static inline long atomic_long_dec_return(atomic_long_t *l)
65770 {
65771 atomic_t *v = (atomic_t *)l;
65772 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65773
65774 #endif /* BITS_PER_LONG == 64 */
65775
65776 +#ifdef CONFIG_PAX_REFCOUNT
65777 +static inline void pax_refcount_needs_these_functions(void)
65778 +{
65779 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
65780 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
65781 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
65782 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
65783 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
65784 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
65785 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
65786 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
65787 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
65788 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
65789 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
65790 +
65791 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
65792 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
65793 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
65794 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
65795 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
65796 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
65797 +}
65798 +#else
65799 +#define atomic_read_unchecked(v) atomic_read(v)
65800 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
65801 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
65802 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
65803 +#define atomic_inc_unchecked(v) atomic_inc(v)
65804 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
65805 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
65806 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
65807 +#define atomic_dec_unchecked(v) atomic_dec(v)
65808 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
65809 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
65810 +
65811 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
65812 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
65813 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
65814 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
65815 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
65816 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
65817 +#endif
65818 +
65819 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
65820 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
65821 index b18ce4f..2ee2843 100644
65822 --- a/include/asm-generic/atomic64.h
65823 +++ b/include/asm-generic/atomic64.h
65824 @@ -16,6 +16,8 @@ typedef struct {
65825 long long counter;
65826 } atomic64_t;
65827
65828 +typedef atomic64_t atomic64_unchecked_t;
65829 +
65830 #define ATOMIC64_INIT(i) { (i) }
65831
65832 extern long long atomic64_read(const atomic64_t *v);
65833 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
65834 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
65835 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
65836
65837 +#define atomic64_read_unchecked(v) atomic64_read(v)
65838 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
65839 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
65840 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
65841 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
65842 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
65843 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
65844 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
65845 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
65846 +
65847 #endif /* _ASM_GENERIC_ATOMIC64_H */
65848 diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
65849 index d48ddf0..656a0ac 100644
65850 --- a/include/asm-generic/bug.h
65851 +++ b/include/asm-generic/bug.h
65852 @@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
65853
65854 #else /* !CONFIG_BUG */
65855 #ifndef HAVE_ARCH_BUG
65856 -#define BUG() do {} while(0)
65857 +#define BUG() do { for (;;) ; } while(0)
65858 #endif
65859
65860 #ifndef HAVE_ARCH_BUG_ON
65861 -#define BUG_ON(condition) do { if (condition) ; } while(0)
65862 +#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
65863 #endif
65864
65865 #ifndef HAVE_ARCH_WARN_ON
65866 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
65867 index 1bfcfe5..e04c5c9 100644
65868 --- a/include/asm-generic/cache.h
65869 +++ b/include/asm-generic/cache.h
65870 @@ -6,7 +6,7 @@
65871 * cache lines need to provide their own cache.h.
65872 */
65873
65874 -#define L1_CACHE_SHIFT 5
65875 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
65876 +#define L1_CACHE_SHIFT 5UL
65877 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
65878
65879 #endif /* __ASM_GENERIC_CACHE_H */
65880 diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
65881 index 6920695..41038bc 100644
65882 --- a/include/asm-generic/dma-mapping-common.h
65883 +++ b/include/asm-generic/dma-mapping-common.h
65884 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
65885 enum dma_data_direction dir,
65886 struct dma_attrs *attrs)
65887 {
65888 - struct dma_map_ops *ops = get_dma_ops(dev);
65889 + const struct dma_map_ops *ops = get_dma_ops(dev);
65890 dma_addr_t addr;
65891
65892 kmemcheck_mark_initialized(ptr, size);
65893 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
65894 enum dma_data_direction dir,
65895 struct dma_attrs *attrs)
65896 {
65897 - struct dma_map_ops *ops = get_dma_ops(dev);
65898 + const struct dma_map_ops *ops = get_dma_ops(dev);
65899
65900 BUG_ON(!valid_dma_direction(dir));
65901 if (ops->unmap_page)
65902 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
65903 int nents, enum dma_data_direction dir,
65904 struct dma_attrs *attrs)
65905 {
65906 - struct dma_map_ops *ops = get_dma_ops(dev);
65907 + const struct dma_map_ops *ops = get_dma_ops(dev);
65908 int i, ents;
65909 struct scatterlist *s;
65910
65911 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
65912 int nents, enum dma_data_direction dir,
65913 struct dma_attrs *attrs)
65914 {
65915 - struct dma_map_ops *ops = get_dma_ops(dev);
65916 + const struct dma_map_ops *ops = get_dma_ops(dev);
65917
65918 BUG_ON(!valid_dma_direction(dir));
65919 debug_dma_unmap_sg(dev, sg, nents, dir);
65920 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65921 size_t offset, size_t size,
65922 enum dma_data_direction dir)
65923 {
65924 - struct dma_map_ops *ops = get_dma_ops(dev);
65925 + const struct dma_map_ops *ops = get_dma_ops(dev);
65926 dma_addr_t addr;
65927
65928 kmemcheck_mark_initialized(page_address(page) + offset, size);
65929 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65930 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
65931 size_t size, enum dma_data_direction dir)
65932 {
65933 - struct dma_map_ops *ops = get_dma_ops(dev);
65934 + const struct dma_map_ops *ops = get_dma_ops(dev);
65935
65936 BUG_ON(!valid_dma_direction(dir));
65937 if (ops->unmap_page)
65938 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
65939 size_t size,
65940 enum dma_data_direction dir)
65941 {
65942 - struct dma_map_ops *ops = get_dma_ops(dev);
65943 + const struct dma_map_ops *ops = get_dma_ops(dev);
65944
65945 BUG_ON(!valid_dma_direction(dir));
65946 if (ops->sync_single_for_cpu)
65947 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
65948 dma_addr_t addr, size_t size,
65949 enum dma_data_direction dir)
65950 {
65951 - struct dma_map_ops *ops = get_dma_ops(dev);
65952 + const struct dma_map_ops *ops = get_dma_ops(dev);
65953
65954 BUG_ON(!valid_dma_direction(dir));
65955 if (ops->sync_single_for_device)
65956 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
65957 size_t size,
65958 enum dma_data_direction dir)
65959 {
65960 - struct dma_map_ops *ops = get_dma_ops(dev);
65961 + const struct dma_map_ops *ops = get_dma_ops(dev);
65962
65963 BUG_ON(!valid_dma_direction(dir));
65964 if (ops->sync_single_range_for_cpu) {
65965 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
65966 size_t size,
65967 enum dma_data_direction dir)
65968 {
65969 - struct dma_map_ops *ops = get_dma_ops(dev);
65970 + const struct dma_map_ops *ops = get_dma_ops(dev);
65971
65972 BUG_ON(!valid_dma_direction(dir));
65973 if (ops->sync_single_range_for_device) {
65974 @@ -155,7 +155,7 @@ static inline void
65975 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
65976 int nelems, enum dma_data_direction dir)
65977 {
65978 - struct dma_map_ops *ops = get_dma_ops(dev);
65979 + const struct dma_map_ops *ops = get_dma_ops(dev);
65980
65981 BUG_ON(!valid_dma_direction(dir));
65982 if (ops->sync_sg_for_cpu)
65983 @@ -167,7 +167,7 @@ static inline void
65984 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
65985 int nelems, enum dma_data_direction dir)
65986 {
65987 - struct dma_map_ops *ops = get_dma_ops(dev);
65988 + const struct dma_map_ops *ops = get_dma_ops(dev);
65989
65990 BUG_ON(!valid_dma_direction(dir));
65991 if (ops->sync_sg_for_device)
65992 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
65993 index 0d68a1e..b74a761 100644
65994 --- a/include/asm-generic/emergency-restart.h
65995 +++ b/include/asm-generic/emergency-restart.h
65996 @@ -1,7 +1,7 @@
65997 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
65998 #define _ASM_GENERIC_EMERGENCY_RESTART_H
65999
66000 -static inline void machine_emergency_restart(void)
66001 +static inline __noreturn void machine_emergency_restart(void)
66002 {
66003 machine_restart(NULL);
66004 }
66005 diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
66006 index 3c2344f..4590a7d 100644
66007 --- a/include/asm-generic/futex.h
66008 +++ b/include/asm-generic/futex.h
66009 @@ -6,7 +6,7 @@
66010 #include <asm/errno.h>
66011
66012 static inline int
66013 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
66014 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
66015 {
66016 int op = (encoded_op >> 28) & 7;
66017 int cmp = (encoded_op >> 24) & 15;
66018 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
66019 }
66020
66021 static inline int
66022 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
66023 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
66024 {
66025 return -ENOSYS;
66026 }
66027 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
66028 index 1ca3efc..e3dc852 100644
66029 --- a/include/asm-generic/int-l64.h
66030 +++ b/include/asm-generic/int-l64.h
66031 @@ -46,6 +46,8 @@ typedef unsigned int u32;
66032 typedef signed long s64;
66033 typedef unsigned long u64;
66034
66035 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
66036 +
66037 #define S8_C(x) x
66038 #define U8_C(x) x ## U
66039 #define S16_C(x) x
66040 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
66041 index f394147..b6152b9 100644
66042 --- a/include/asm-generic/int-ll64.h
66043 +++ b/include/asm-generic/int-ll64.h
66044 @@ -51,6 +51,8 @@ typedef unsigned int u32;
66045 typedef signed long long s64;
66046 typedef unsigned long long u64;
66047
66048 +typedef unsigned long long intoverflow_t;
66049 +
66050 #define S8_C(x) x
66051 #define U8_C(x) x ## U
66052 #define S16_C(x) x
66053 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
66054 index e5f234a..cdb16b3 100644
66055 --- a/include/asm-generic/kmap_types.h
66056 +++ b/include/asm-generic/kmap_types.h
66057 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
66058 KMAP_D(16) KM_IRQ_PTE,
66059 KMAP_D(17) KM_NMI,
66060 KMAP_D(18) KM_NMI_PTE,
66061 -KMAP_D(19) KM_TYPE_NR
66062 +KMAP_D(19) KM_CLEARPAGE,
66063 +KMAP_D(20) KM_TYPE_NR
66064 };
66065
66066 #undef KMAP_D
66067 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
66068 index 725612b..9cc513a 100644
66069 --- a/include/asm-generic/pgtable-nopmd.h
66070 +++ b/include/asm-generic/pgtable-nopmd.h
66071 @@ -1,14 +1,19 @@
66072 #ifndef _PGTABLE_NOPMD_H
66073 #define _PGTABLE_NOPMD_H
66074
66075 -#ifndef __ASSEMBLY__
66076 -
66077 #include <asm-generic/pgtable-nopud.h>
66078
66079 -struct mm_struct;
66080 -
66081 #define __PAGETABLE_PMD_FOLDED
66082
66083 +#define PMD_SHIFT PUD_SHIFT
66084 +#define PTRS_PER_PMD 1
66085 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
66086 +#define PMD_MASK (~(PMD_SIZE-1))
66087 +
66088 +#ifndef __ASSEMBLY__
66089 +
66090 +struct mm_struct;
66091 +
66092 /*
66093 * Having the pmd type consist of a pud gets the size right, and allows
66094 * us to conceptually access the pud entry that this pmd is folded into
66095 @@ -16,11 +21,6 @@ struct mm_struct;
66096 */
66097 typedef struct { pud_t pud; } pmd_t;
66098
66099 -#define PMD_SHIFT PUD_SHIFT
66100 -#define PTRS_PER_PMD 1
66101 -#define PMD_SIZE (1UL << PMD_SHIFT)
66102 -#define PMD_MASK (~(PMD_SIZE-1))
66103 -
66104 /*
66105 * The "pud_xxx()" functions here are trivial for a folded two-level
66106 * setup: the pmd is never bad, and a pmd always exists (as it's folded
66107 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
66108 index 810431d..ccc3638 100644
66109 --- a/include/asm-generic/pgtable-nopud.h
66110 +++ b/include/asm-generic/pgtable-nopud.h
66111 @@ -1,10 +1,15 @@
66112 #ifndef _PGTABLE_NOPUD_H
66113 #define _PGTABLE_NOPUD_H
66114
66115 -#ifndef __ASSEMBLY__
66116 -
66117 #define __PAGETABLE_PUD_FOLDED
66118
66119 +#define PUD_SHIFT PGDIR_SHIFT
66120 +#define PTRS_PER_PUD 1
66121 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
66122 +#define PUD_MASK (~(PUD_SIZE-1))
66123 +
66124 +#ifndef __ASSEMBLY__
66125 +
66126 /*
66127 * Having the pud type consist of a pgd gets the size right, and allows
66128 * us to conceptually access the pgd entry that this pud is folded into
66129 @@ -12,11 +17,6 @@
66130 */
66131 typedef struct { pgd_t pgd; } pud_t;
66132
66133 -#define PUD_SHIFT PGDIR_SHIFT
66134 -#define PTRS_PER_PUD 1
66135 -#define PUD_SIZE (1UL << PUD_SHIFT)
66136 -#define PUD_MASK (~(PUD_SIZE-1))
66137 -
66138 /*
66139 * The "pgd_xxx()" functions here are trivial for a folded two-level
66140 * setup: the pud is never bad, and a pud always exists (as it's folded
66141 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
66142 index e2bd73e..fea8ed3 100644
66143 --- a/include/asm-generic/pgtable.h
66144 +++ b/include/asm-generic/pgtable.h
66145 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
66146 unsigned long size);
66147 #endif
66148
66149 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
66150 +static inline unsigned long pax_open_kernel(void) { return 0; }
66151 +#endif
66152 +
66153 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
66154 +static inline unsigned long pax_close_kernel(void) { return 0; }
66155 +#endif
66156 +
66157 #endif /* !__ASSEMBLY__ */
66158
66159 #endif /* _ASM_GENERIC_PGTABLE_H */
66160 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
66161 index b6e818f..21aa58a 100644
66162 --- a/include/asm-generic/vmlinux.lds.h
66163 +++ b/include/asm-generic/vmlinux.lds.h
66164 @@ -199,6 +199,7 @@
66165 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
66166 VMLINUX_SYMBOL(__start_rodata) = .; \
66167 *(.rodata) *(.rodata.*) \
66168 + *(.data.read_only) \
66169 *(__vermagic) /* Kernel version magic */ \
66170 *(__markers_strings) /* Markers: strings */ \
66171 *(__tracepoints_strings)/* Tracepoints: strings */ \
66172 @@ -656,22 +657,24 @@
66173 * section in the linker script will go there too. @phdr should have
66174 * a leading colon.
66175 *
66176 - * Note that this macros defines __per_cpu_load as an absolute symbol.
66177 + * Note that this macros defines per_cpu_load as an absolute symbol.
66178 * If there is no need to put the percpu section at a predetermined
66179 * address, use PERCPU().
66180 */
66181 #define PERCPU_VADDR(vaddr, phdr) \
66182 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
66183 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
66184 + per_cpu_load = .; \
66185 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
66186 - LOAD_OFFSET) { \
66187 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
66188 VMLINUX_SYMBOL(__per_cpu_start) = .; \
66189 *(.data.percpu.first) \
66190 - *(.data.percpu.page_aligned) \
66191 *(.data.percpu) \
66192 + . = ALIGN(PAGE_SIZE); \
66193 + *(.data.percpu.page_aligned) \
66194 *(.data.percpu.shared_aligned) \
66195 VMLINUX_SYMBOL(__per_cpu_end) = .; \
66196 } phdr \
66197 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
66198 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
66199
66200 /**
66201 * PERCPU - define output section for percpu area, simple version
66202 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
66203 index ebab6a6..351dba1 100644
66204 --- a/include/drm/drmP.h
66205 +++ b/include/drm/drmP.h
66206 @@ -71,6 +71,7 @@
66207 #include <linux/workqueue.h>
66208 #include <linux/poll.h>
66209 #include <asm/pgalloc.h>
66210 +#include <asm/local.h>
66211 #include "drm.h"
66212
66213 #include <linux/idr.h>
66214 @@ -814,7 +815,7 @@ struct drm_driver {
66215 void (*vgaarb_irq)(struct drm_device *dev, bool state);
66216
66217 /* Driver private ops for this object */
66218 - struct vm_operations_struct *gem_vm_ops;
66219 + const struct vm_operations_struct *gem_vm_ops;
66220
66221 int major;
66222 int minor;
66223 @@ -917,7 +918,7 @@ struct drm_device {
66224
66225 /** \name Usage Counters */
66226 /*@{ */
66227 - int open_count; /**< Outstanding files open */
66228 + local_t open_count; /**< Outstanding files open */
66229 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
66230 atomic_t vma_count; /**< Outstanding vma areas open */
66231 int buf_use; /**< Buffers in use -- cannot alloc */
66232 @@ -928,7 +929,7 @@ struct drm_device {
66233 /*@{ */
66234 unsigned long counters;
66235 enum drm_stat_type types[15];
66236 - atomic_t counts[15];
66237 + atomic_unchecked_t counts[15];
66238 /*@} */
66239
66240 struct list_head filelist;
66241 @@ -1016,7 +1017,7 @@ struct drm_device {
66242 struct pci_controller *hose;
66243 #endif
66244 struct drm_sg_mem *sg; /**< Scatter gather memory */
66245 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
66246 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
66247 void *dev_private; /**< device private data */
66248 void *mm_private;
66249 struct address_space *dev_mapping;
66250 @@ -1042,11 +1043,11 @@ struct drm_device {
66251 spinlock_t object_name_lock;
66252 struct idr object_name_idr;
66253 atomic_t object_count;
66254 - atomic_t object_memory;
66255 + atomic_unchecked_t object_memory;
66256 atomic_t pin_count;
66257 - atomic_t pin_memory;
66258 + atomic_unchecked_t pin_memory;
66259 atomic_t gtt_count;
66260 - atomic_t gtt_memory;
66261 + atomic_unchecked_t gtt_memory;
66262 uint32_t gtt_total;
66263 uint32_t invalidate_domains; /* domains pending invalidation */
66264 uint32_t flush_domains; /* domains pending flush */
66265 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
66266 index b29e201..3413cc9 100644
66267 --- a/include/drm/drm_crtc_helper.h
66268 +++ b/include/drm/drm_crtc_helper.h
66269 @@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
66270
66271 /* reload the current crtc LUT */
66272 void (*load_lut)(struct drm_crtc *crtc);
66273 -};
66274 +} __no_const;
66275
66276 struct drm_encoder_helper_funcs {
66277 void (*dpms)(struct drm_encoder *encoder, int mode);
66278 @@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
66279 struct drm_connector *connector);
66280 /* disable encoder when not in use - more explicit than dpms off */
66281 void (*disable)(struct drm_encoder *encoder);
66282 -};
66283 +} __no_const;
66284
66285 struct drm_connector_helper_funcs {
66286 int (*get_modes)(struct drm_connector *connector);
66287 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
66288 index b199170..6f9e64c 100644
66289 --- a/include/drm/ttm/ttm_memory.h
66290 +++ b/include/drm/ttm/ttm_memory.h
66291 @@ -47,7 +47,7 @@
66292
66293 struct ttm_mem_shrink {
66294 int (*do_shrink) (struct ttm_mem_shrink *);
66295 -};
66296 +} __no_const;
66297
66298 /**
66299 * struct ttm_mem_global - Global memory accounting structure.
66300 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
66301 index e86dfca..40cc55f 100644
66302 --- a/include/linux/a.out.h
66303 +++ b/include/linux/a.out.h
66304 @@ -39,6 +39,14 @@ enum machine_type {
66305 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
66306 };
66307
66308 +/* Constants for the N_FLAGS field */
66309 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
66310 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
66311 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
66312 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
66313 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
66314 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
66315 +
66316 #if !defined (N_MAGIC)
66317 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
66318 #endif
66319 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
66320 index 817b237..62c10bc 100644
66321 --- a/include/linux/atmdev.h
66322 +++ b/include/linux/atmdev.h
66323 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
66324 #endif
66325
66326 struct k_atm_aal_stats {
66327 -#define __HANDLE_ITEM(i) atomic_t i
66328 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
66329 __AAL_STAT_ITEMS
66330 #undef __HANDLE_ITEM
66331 };
66332 diff --git a/include/linux/backlight.h b/include/linux/backlight.h
66333 index 0f5f578..8c4f884 100644
66334 --- a/include/linux/backlight.h
66335 +++ b/include/linux/backlight.h
66336 @@ -36,18 +36,18 @@ struct backlight_device;
66337 struct fb_info;
66338
66339 struct backlight_ops {
66340 - unsigned int options;
66341 + const unsigned int options;
66342
66343 #define BL_CORE_SUSPENDRESUME (1 << 0)
66344
66345 /* Notify the backlight driver some property has changed */
66346 - int (*update_status)(struct backlight_device *);
66347 + int (* const update_status)(struct backlight_device *);
66348 /* Return the current backlight brightness (accounting for power,
66349 fb_blank etc.) */
66350 - int (*get_brightness)(struct backlight_device *);
66351 + int (* const get_brightness)(struct backlight_device *);
66352 /* Check if given framebuffer device is the one bound to this backlight;
66353 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
66354 - int (*check_fb)(struct fb_info *);
66355 + int (* const check_fb)(struct fb_info *);
66356 };
66357
66358 /* This structure defines all the properties of a backlight */
66359 @@ -86,7 +86,7 @@ struct backlight_device {
66360 registered this device has been unloaded, and if class_get_devdata()
66361 points to something in the body of that driver, it is also invalid. */
66362 struct mutex ops_lock;
66363 - struct backlight_ops *ops;
66364 + const struct backlight_ops *ops;
66365
66366 /* The framebuffer notifier block */
66367 struct notifier_block fb_notif;
66368 @@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
66369 }
66370
66371 extern struct backlight_device *backlight_device_register(const char *name,
66372 - struct device *dev, void *devdata, struct backlight_ops *ops);
66373 + struct device *dev, void *devdata, const struct backlight_ops *ops);
66374 extern void backlight_device_unregister(struct backlight_device *bd);
66375 extern void backlight_force_update(struct backlight_device *bd,
66376 enum backlight_update_reason reason);
66377 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
66378 index a3d802e..93a2ef4 100644
66379 --- a/include/linux/binfmts.h
66380 +++ b/include/linux/binfmts.h
66381 @@ -18,7 +18,7 @@ struct pt_regs;
66382 #define BINPRM_BUF_SIZE 128
66383
66384 #ifdef __KERNEL__
66385 -#include <linux/list.h>
66386 +#include <linux/sched.h>
66387
66388 #define CORENAME_MAX_SIZE 128
66389
66390 @@ -58,6 +58,7 @@ struct linux_binprm{
66391 unsigned interp_flags;
66392 unsigned interp_data;
66393 unsigned long loader, exec;
66394 + char tcomm[TASK_COMM_LEN];
66395 };
66396
66397 extern void acct_arg_size(struct linux_binprm *bprm, unsigned long pages);
66398 @@ -83,6 +84,7 @@ struct linux_binfmt {
66399 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
66400 int (*load_shlib)(struct file *);
66401 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
66402 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
66403 unsigned long min_coredump; /* minimal dump size */
66404 int hasvdso;
66405 };
66406 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
66407 index 5eb6cb0..a2906d2 100644
66408 --- a/include/linux/blkdev.h
66409 +++ b/include/linux/blkdev.h
66410 @@ -1281,7 +1281,7 @@ struct block_device_operations {
66411 int (*revalidate_disk) (struct gendisk *);
66412 int (*getgeo)(struct block_device *, struct hd_geometry *);
66413 struct module *owner;
66414 -};
66415 +} __do_const;
66416
66417 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
66418 unsigned long);
66419 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
66420 index 3b73b99..629d21b 100644
66421 --- a/include/linux/blktrace_api.h
66422 +++ b/include/linux/blktrace_api.h
66423 @@ -160,7 +160,7 @@ struct blk_trace {
66424 struct dentry *dir;
66425 struct dentry *dropped_file;
66426 struct dentry *msg_file;
66427 - atomic_t dropped;
66428 + atomic_unchecked_t dropped;
66429 };
66430
66431 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
66432 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
66433 index 83195fb..0b0f77d 100644
66434 --- a/include/linux/byteorder/little_endian.h
66435 +++ b/include/linux/byteorder/little_endian.h
66436 @@ -42,51 +42,51 @@
66437
66438 static inline __le64 __cpu_to_le64p(const __u64 *p)
66439 {
66440 - return (__force __le64)*p;
66441 + return (__force const __le64)*p;
66442 }
66443 static inline __u64 __le64_to_cpup(const __le64 *p)
66444 {
66445 - return (__force __u64)*p;
66446 + return (__force const __u64)*p;
66447 }
66448 static inline __le32 __cpu_to_le32p(const __u32 *p)
66449 {
66450 - return (__force __le32)*p;
66451 + return (__force const __le32)*p;
66452 }
66453 static inline __u32 __le32_to_cpup(const __le32 *p)
66454 {
66455 - return (__force __u32)*p;
66456 + return (__force const __u32)*p;
66457 }
66458 static inline __le16 __cpu_to_le16p(const __u16 *p)
66459 {
66460 - return (__force __le16)*p;
66461 + return (__force const __le16)*p;
66462 }
66463 static inline __u16 __le16_to_cpup(const __le16 *p)
66464 {
66465 - return (__force __u16)*p;
66466 + return (__force const __u16)*p;
66467 }
66468 static inline __be64 __cpu_to_be64p(const __u64 *p)
66469 {
66470 - return (__force __be64)__swab64p(p);
66471 + return (__force const __be64)__swab64p(p);
66472 }
66473 static inline __u64 __be64_to_cpup(const __be64 *p)
66474 {
66475 - return __swab64p((__u64 *)p);
66476 + return __swab64p((const __u64 *)p);
66477 }
66478 static inline __be32 __cpu_to_be32p(const __u32 *p)
66479 {
66480 - return (__force __be32)__swab32p(p);
66481 + return (__force const __be32)__swab32p(p);
66482 }
66483 static inline __u32 __be32_to_cpup(const __be32 *p)
66484 {
66485 - return __swab32p((__u32 *)p);
66486 + return __swab32p((const __u32 *)p);
66487 }
66488 static inline __be16 __cpu_to_be16p(const __u16 *p)
66489 {
66490 - return (__force __be16)__swab16p(p);
66491 + return (__force const __be16)__swab16p(p);
66492 }
66493 static inline __u16 __be16_to_cpup(const __be16 *p)
66494 {
66495 - return __swab16p((__u16 *)p);
66496 + return __swab16p((const __u16 *)p);
66497 }
66498 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
66499 #define __le64_to_cpus(x) do { (void)(x); } while (0)
66500 diff --git a/include/linux/cache.h b/include/linux/cache.h
66501 index 97e2488..e7576b9 100644
66502 --- a/include/linux/cache.h
66503 +++ b/include/linux/cache.h
66504 @@ -16,6 +16,10 @@
66505 #define __read_mostly
66506 #endif
66507
66508 +#ifndef __read_only
66509 +#define __read_only __read_mostly
66510 +#endif
66511 +
66512 #ifndef ____cacheline_aligned
66513 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
66514 #endif
66515 diff --git a/include/linux/capability.h b/include/linux/capability.h
66516 index c8f2a5f7..1618a5c 100644
66517 --- a/include/linux/capability.h
66518 +++ b/include/linux/capability.h
66519 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
66520 (security_real_capable_noaudit((t), (cap)) == 0)
66521
66522 extern int capable(int cap);
66523 +int capable_nolog(int cap);
66524
66525 /* audit system wants to get cap info from files as well */
66526 struct dentry;
66527 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
66528 index 450fa59..86019fb 100644
66529 --- a/include/linux/compiler-gcc4.h
66530 +++ b/include/linux/compiler-gcc4.h
66531 @@ -36,4 +36,16 @@
66532 the kernel context */
66533 #define __cold __attribute__((__cold__))
66534
66535 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
66536 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
66537 +#define __bos0(ptr) __bos((ptr), 0)
66538 +#define __bos1(ptr) __bos((ptr), 1)
66539 +
66540 +#if __GNUC_MINOR__ >= 5
66541 +#ifdef CONSTIFY_PLUGIN
66542 +#define __no_const __attribute__((no_const))
66543 +#define __do_const __attribute__((do_const))
66544 +#endif
66545 +#endif
66546 +
66547 #endif
66548 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
66549 index 04fb513..fd6477b 100644
66550 --- a/include/linux/compiler.h
66551 +++ b/include/linux/compiler.h
66552 @@ -5,11 +5,14 @@
66553
66554 #ifdef __CHECKER__
66555 # define __user __attribute__((noderef, address_space(1)))
66556 +# define __force_user __force __user
66557 # define __kernel /* default address space */
66558 +# define __force_kernel __force __kernel
66559 # define __safe __attribute__((safe))
66560 # define __force __attribute__((force))
66561 # define __nocast __attribute__((nocast))
66562 # define __iomem __attribute__((noderef, address_space(2)))
66563 +# define __force_iomem __force __iomem
66564 # define __acquires(x) __attribute__((context(x,0,1)))
66565 # define __releases(x) __attribute__((context(x,1,0)))
66566 # define __acquire(x) __context__(x,1)
66567 @@ -17,13 +20,34 @@
66568 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
66569 extern void __chk_user_ptr(const volatile void __user *);
66570 extern void __chk_io_ptr(const volatile void __iomem *);
66571 +#elif defined(CHECKER_PLUGIN)
66572 +//# define __user
66573 +//# define __force_user
66574 +//# define __kernel
66575 +//# define __force_kernel
66576 +# define __safe
66577 +# define __force
66578 +# define __nocast
66579 +# define __iomem
66580 +# define __force_iomem
66581 +# define __chk_user_ptr(x) (void)0
66582 +# define __chk_io_ptr(x) (void)0
66583 +# define __builtin_warning(x, y...) (1)
66584 +# define __acquires(x)
66585 +# define __releases(x)
66586 +# define __acquire(x) (void)0
66587 +# define __release(x) (void)0
66588 +# define __cond_lock(x,c) (c)
66589 #else
66590 # define __user
66591 +# define __force_user
66592 # define __kernel
66593 +# define __force_kernel
66594 # define __safe
66595 # define __force
66596 # define __nocast
66597 # define __iomem
66598 +# define __force_iomem
66599 # define __chk_user_ptr(x) (void)0
66600 # define __chk_io_ptr(x) (void)0
66601 # define __builtin_warning(x, y...) (1)
66602 @@ -247,6 +271,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66603 # define __attribute_const__ /* unimplemented */
66604 #endif
66605
66606 +#ifndef __no_const
66607 +# define __no_const
66608 +#endif
66609 +
66610 +#ifndef __do_const
66611 +# define __do_const
66612 +#endif
66613 +
66614 /*
66615 * Tell gcc if a function is cold. The compiler will assume any path
66616 * directly leading to the call is unlikely.
66617 @@ -256,6 +288,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66618 #define __cold
66619 #endif
66620
66621 +#ifndef __alloc_size
66622 +#define __alloc_size(...)
66623 +#endif
66624 +
66625 +#ifndef __bos
66626 +#define __bos(ptr, arg)
66627 +#endif
66628 +
66629 +#ifndef __bos0
66630 +#define __bos0(ptr)
66631 +#endif
66632 +
66633 +#ifndef __bos1
66634 +#define __bos1(ptr)
66635 +#endif
66636 +
66637 /* Simple shorthand for a section definition */
66638 #ifndef __section
66639 # define __section(S) __attribute__ ((__section__(#S)))
66640 @@ -278,6 +326,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66641 * use is to mediate communication between process-level code and irq/NMI
66642 * handlers, all running on the same CPU.
66643 */
66644 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
66645 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
66646 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
66647
66648 #endif /* __LINUX_COMPILER_H */
66649 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
66650 index fd92988..a3164bd 100644
66651 --- a/include/linux/crypto.h
66652 +++ b/include/linux/crypto.h
66653 @@ -394,7 +394,7 @@ struct cipher_tfm {
66654 const u8 *key, unsigned int keylen);
66655 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66656 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66657 -};
66658 +} __no_const;
66659
66660 struct hash_tfm {
66661 int (*init)(struct hash_desc *desc);
66662 @@ -415,13 +415,13 @@ struct compress_tfm {
66663 int (*cot_decompress)(struct crypto_tfm *tfm,
66664 const u8 *src, unsigned int slen,
66665 u8 *dst, unsigned int *dlen);
66666 -};
66667 +} __no_const;
66668
66669 struct rng_tfm {
66670 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
66671 unsigned int dlen);
66672 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
66673 -};
66674 +} __no_const;
66675
66676 #define crt_ablkcipher crt_u.ablkcipher
66677 #define crt_aead crt_u.aead
66678 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
66679 index 30b93b2..cd7a8db 100644
66680 --- a/include/linux/dcache.h
66681 +++ b/include/linux/dcache.h
66682 @@ -119,6 +119,8 @@ struct dentry {
66683 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
66684 };
66685
66686 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
66687 +
66688 /*
66689 * dentry->d_lock spinlock nesting subclasses:
66690 *
66691 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
66692 index 3e9bd6a..f4e1aa0 100644
66693 --- a/include/linux/decompress/mm.h
66694 +++ b/include/linux/decompress/mm.h
66695 @@ -78,7 +78,7 @@ static void free(void *where)
66696 * warnings when not needed (indeed large_malloc / large_free are not
66697 * needed by inflate */
66698
66699 -#define malloc(a) kmalloc(a, GFP_KERNEL)
66700 +#define malloc(a) kmalloc((a), GFP_KERNEL)
66701 #define free(a) kfree(a)
66702
66703 #define large_malloc(a) vmalloc(a)
66704 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
66705 index 91b7618..92a93d32 100644
66706 --- a/include/linux/dma-mapping.h
66707 +++ b/include/linux/dma-mapping.h
66708 @@ -16,51 +16,51 @@ enum dma_data_direction {
66709 };
66710
66711 struct dma_map_ops {
66712 - void* (*alloc_coherent)(struct device *dev, size_t size,
66713 + void* (* const alloc_coherent)(struct device *dev, size_t size,
66714 dma_addr_t *dma_handle, gfp_t gfp);
66715 - void (*free_coherent)(struct device *dev, size_t size,
66716 + void (* const free_coherent)(struct device *dev, size_t size,
66717 void *vaddr, dma_addr_t dma_handle);
66718 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
66719 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
66720 unsigned long offset, size_t size,
66721 enum dma_data_direction dir,
66722 struct dma_attrs *attrs);
66723 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
66724 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
66725 size_t size, enum dma_data_direction dir,
66726 struct dma_attrs *attrs);
66727 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
66728 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
66729 int nents, enum dma_data_direction dir,
66730 struct dma_attrs *attrs);
66731 - void (*unmap_sg)(struct device *dev,
66732 + void (* const unmap_sg)(struct device *dev,
66733 struct scatterlist *sg, int nents,
66734 enum dma_data_direction dir,
66735 struct dma_attrs *attrs);
66736 - void (*sync_single_for_cpu)(struct device *dev,
66737 + void (* const sync_single_for_cpu)(struct device *dev,
66738 dma_addr_t dma_handle, size_t size,
66739 enum dma_data_direction dir);
66740 - void (*sync_single_for_device)(struct device *dev,
66741 + void (* const sync_single_for_device)(struct device *dev,
66742 dma_addr_t dma_handle, size_t size,
66743 enum dma_data_direction dir);
66744 - void (*sync_single_range_for_cpu)(struct device *dev,
66745 + void (* const sync_single_range_for_cpu)(struct device *dev,
66746 dma_addr_t dma_handle,
66747 unsigned long offset,
66748 size_t size,
66749 enum dma_data_direction dir);
66750 - void (*sync_single_range_for_device)(struct device *dev,
66751 + void (* const sync_single_range_for_device)(struct device *dev,
66752 dma_addr_t dma_handle,
66753 unsigned long offset,
66754 size_t size,
66755 enum dma_data_direction dir);
66756 - void (*sync_sg_for_cpu)(struct device *dev,
66757 + void (* const sync_sg_for_cpu)(struct device *dev,
66758 struct scatterlist *sg, int nents,
66759 enum dma_data_direction dir);
66760 - void (*sync_sg_for_device)(struct device *dev,
66761 + void (* const sync_sg_for_device)(struct device *dev,
66762 struct scatterlist *sg, int nents,
66763 enum dma_data_direction dir);
66764 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
66765 - int (*dma_supported)(struct device *dev, u64 mask);
66766 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
66767 + int (* const dma_supported)(struct device *dev, u64 mask);
66768 int (*set_dma_mask)(struct device *dev, u64 mask);
66769 int is_phys;
66770 -};
66771 +} __do_const;
66772
66773 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
66774
66775 diff --git a/include/linux/dst.h b/include/linux/dst.h
66776 index e26fed8..b976d9f 100644
66777 --- a/include/linux/dst.h
66778 +++ b/include/linux/dst.h
66779 @@ -380,7 +380,7 @@ struct dst_node
66780 struct thread_pool *pool;
66781
66782 /* Transaction IDs live here */
66783 - atomic_long_t gen;
66784 + atomic_long_unchecked_t gen;
66785
66786 /*
66787 * How frequently and how many times transaction
66788 diff --git a/include/linux/elf.h b/include/linux/elf.h
66789 index 90a4ed0..d652617 100644
66790 --- a/include/linux/elf.h
66791 +++ b/include/linux/elf.h
66792 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
66793 #define PT_GNU_EH_FRAME 0x6474e550
66794
66795 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
66796 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
66797 +
66798 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
66799 +
66800 +/* Constants for the e_flags field */
66801 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
66802 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
66803 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
66804 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
66805 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
66806 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
66807
66808 /* These constants define the different elf file types */
66809 #define ET_NONE 0
66810 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
66811 #define DT_DEBUG 21
66812 #define DT_TEXTREL 22
66813 #define DT_JMPREL 23
66814 +#define DT_FLAGS 30
66815 + #define DF_TEXTREL 0x00000004
66816 #define DT_ENCODING 32
66817 #define OLD_DT_LOOS 0x60000000
66818 #define DT_LOOS 0x6000000d
66819 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
66820 #define PF_W 0x2
66821 #define PF_X 0x1
66822
66823 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
66824 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
66825 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
66826 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
66827 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
66828 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
66829 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
66830 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
66831 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
66832 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
66833 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
66834 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
66835 +
66836 typedef struct elf32_phdr{
66837 Elf32_Word p_type;
66838 Elf32_Off p_offset;
66839 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
66840 #define EI_OSABI 7
66841 #define EI_PAD 8
66842
66843 +#define EI_PAX 14
66844 +
66845 #define ELFMAG0 0x7f /* EI_MAG */
66846 #define ELFMAG1 'E'
66847 #define ELFMAG2 'L'
66848 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
66849 #define elf_phdr elf32_phdr
66850 #define elf_note elf32_note
66851 #define elf_addr_t Elf32_Off
66852 +#define elf_dyn Elf32_Dyn
66853
66854 #else
66855
66856 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
66857 #define elf_phdr elf64_phdr
66858 #define elf_note elf64_note
66859 #define elf_addr_t Elf64_Off
66860 +#define elf_dyn Elf64_Dyn
66861
66862 #endif
66863
66864 diff --git a/include/linux/fs.h b/include/linux/fs.h
66865 index 1b9a47a..6fe2934 100644
66866 --- a/include/linux/fs.h
66867 +++ b/include/linux/fs.h
66868 @@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
66869 unsigned long, unsigned long);
66870
66871 struct address_space_operations {
66872 - int (*writepage)(struct page *page, struct writeback_control *wbc);
66873 - int (*readpage)(struct file *, struct page *);
66874 - void (*sync_page)(struct page *);
66875 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
66876 + int (* const readpage)(struct file *, struct page *);
66877 + void (* const sync_page)(struct page *);
66878
66879 /* Write back some dirty pages from this mapping. */
66880 - int (*writepages)(struct address_space *, struct writeback_control *);
66881 + int (* const writepages)(struct address_space *, struct writeback_control *);
66882
66883 /* Set a page dirty. Return true if this dirtied it */
66884 - int (*set_page_dirty)(struct page *page);
66885 + int (* const set_page_dirty)(struct page *page);
66886
66887 - int (*readpages)(struct file *filp, struct address_space *mapping,
66888 + int (* const readpages)(struct file *filp, struct address_space *mapping,
66889 struct list_head *pages, unsigned nr_pages);
66890
66891 - int (*write_begin)(struct file *, struct address_space *mapping,
66892 + int (* const write_begin)(struct file *, struct address_space *mapping,
66893 loff_t pos, unsigned len, unsigned flags,
66894 struct page **pagep, void **fsdata);
66895 - int (*write_end)(struct file *, struct address_space *mapping,
66896 + int (* const write_end)(struct file *, struct address_space *mapping,
66897 loff_t pos, unsigned len, unsigned copied,
66898 struct page *page, void *fsdata);
66899
66900 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
66901 - sector_t (*bmap)(struct address_space *, sector_t);
66902 - void (*invalidatepage) (struct page *, unsigned long);
66903 - int (*releasepage) (struct page *, gfp_t);
66904 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
66905 + sector_t (* const bmap)(struct address_space *, sector_t);
66906 + void (* const invalidatepage) (struct page *, unsigned long);
66907 + int (* const releasepage) (struct page *, gfp_t);
66908 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
66909 loff_t offset, unsigned long nr_segs);
66910 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
66911 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
66912 void **, unsigned long *);
66913 /* migrate the contents of a page to the specified target */
66914 - int (*migratepage) (struct address_space *,
66915 + int (* const migratepage) (struct address_space *,
66916 struct page *, struct page *);
66917 - int (*launder_page) (struct page *);
66918 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
66919 + int (* const launder_page) (struct page *);
66920 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
66921 unsigned long);
66922 - int (*error_remove_page)(struct address_space *, struct page *);
66923 + int (* const error_remove_page)(struct address_space *, struct page *);
66924 };
66925
66926 /*
66927 @@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
66928 typedef struct files_struct *fl_owner_t;
66929
66930 struct file_lock_operations {
66931 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
66932 - void (*fl_release_private)(struct file_lock *);
66933 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
66934 + void (* const fl_release_private)(struct file_lock *);
66935 };
66936
66937 struct lock_manager_operations {
66938 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
66939 - void (*fl_notify)(struct file_lock *); /* unblock callback */
66940 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
66941 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
66942 - void (*fl_release_private)(struct file_lock *);
66943 - void (*fl_break)(struct file_lock *);
66944 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
66945 - int (*fl_change)(struct file_lock **, int);
66946 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
66947 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
66948 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
66949 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
66950 + void (* const fl_release_private)(struct file_lock *);
66951 + void (* const fl_break)(struct file_lock *);
66952 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
66953 + int (* const fl_change)(struct file_lock **, int);
66954 };
66955
66956 struct lock_manager {
66957 @@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
66958 unsigned int fi_flags; /* Flags as passed from user */
66959 unsigned int fi_extents_mapped; /* Number of mapped extents */
66960 unsigned int fi_extents_max; /* Size of fiemap_extent array */
66961 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
66962 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
66963 * array */
66964 };
66965 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
66966 @@ -1512,7 +1512,8 @@ struct file_operations {
66967 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
66968 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
66969 int (*setlease)(struct file *, long, struct file_lock **);
66970 -};
66971 +} __do_const;
66972 +typedef struct file_operations __no_const file_operations_no_const;
66973
66974 struct inode_operations {
66975 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
66976 @@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
66977 unsigned long, loff_t *);
66978
66979 struct super_operations {
66980 - struct inode *(*alloc_inode)(struct super_block *sb);
66981 - void (*destroy_inode)(struct inode *);
66982 + struct inode *(* const alloc_inode)(struct super_block *sb);
66983 + void (* const destroy_inode)(struct inode *);
66984
66985 - void (*dirty_inode) (struct inode *);
66986 - int (*write_inode) (struct inode *, int);
66987 - void (*drop_inode) (struct inode *);
66988 - void (*delete_inode) (struct inode *);
66989 - void (*put_super) (struct super_block *);
66990 - void (*write_super) (struct super_block *);
66991 - int (*sync_fs)(struct super_block *sb, int wait);
66992 - int (*freeze_fs) (struct super_block *);
66993 - int (*unfreeze_fs) (struct super_block *);
66994 - int (*statfs) (struct dentry *, struct kstatfs *);
66995 - int (*remount_fs) (struct super_block *, int *, char *);
66996 - void (*clear_inode) (struct inode *);
66997 - void (*umount_begin) (struct super_block *);
66998 + void (* const dirty_inode) (struct inode *);
66999 + int (* const write_inode) (struct inode *, int);
67000 + void (* const drop_inode) (struct inode *);
67001 + void (* const delete_inode) (struct inode *);
67002 + void (* const put_super) (struct super_block *);
67003 + void (* const write_super) (struct super_block *);
67004 + int (* const sync_fs)(struct super_block *sb, int wait);
67005 + int (* const freeze_fs) (struct super_block *);
67006 + int (* const unfreeze_fs) (struct super_block *);
67007 + int (* const statfs) (struct dentry *, struct kstatfs *);
67008 + int (* const remount_fs) (struct super_block *, int *, char *);
67009 + void (* const clear_inode) (struct inode *);
67010 + void (* const umount_begin) (struct super_block *);
67011
67012 - int (*show_options)(struct seq_file *, struct vfsmount *);
67013 - int (*show_stats)(struct seq_file *, struct vfsmount *);
67014 + int (* const show_options)(struct seq_file *, struct vfsmount *);
67015 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
67016 #ifdef CONFIG_QUOTA
67017 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
67018 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
67019 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
67020 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
67021 #endif
67022 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
67023 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
67024 };
67025
67026 /*
67027 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
67028 index 78a05bf..2a7d3e1 100644
67029 --- a/include/linux/fs_struct.h
67030 +++ b/include/linux/fs_struct.h
67031 @@ -4,7 +4,7 @@
67032 #include <linux/path.h>
67033
67034 struct fs_struct {
67035 - int users;
67036 + atomic_t users;
67037 rwlock_t lock;
67038 int umask;
67039 int in_exec;
67040 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
67041 index 7be0c6f..2f63a2b 100644
67042 --- a/include/linux/fscache-cache.h
67043 +++ b/include/linux/fscache-cache.h
67044 @@ -116,7 +116,7 @@ struct fscache_operation {
67045 #endif
67046 };
67047
67048 -extern atomic_t fscache_op_debug_id;
67049 +extern atomic_unchecked_t fscache_op_debug_id;
67050 extern const struct slow_work_ops fscache_op_slow_work_ops;
67051
67052 extern void fscache_enqueue_operation(struct fscache_operation *);
67053 @@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
67054 fscache_operation_release_t release)
67055 {
67056 atomic_set(&op->usage, 1);
67057 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
67058 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
67059 op->release = release;
67060 INIT_LIST_HEAD(&op->pend_link);
67061 fscache_set_op_state(op, "Init");
67062 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
67063 index 4d6f47b..00bcedb 100644
67064 --- a/include/linux/fsnotify_backend.h
67065 +++ b/include/linux/fsnotify_backend.h
67066 @@ -86,6 +86,7 @@ struct fsnotify_ops {
67067 void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
67068 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
67069 };
67070 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
67071
67072 /*
67073 * A group is a "thing" that wants to receive notification about filesystem
67074 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
67075 index 4ec5e67..42f1eb9 100644
67076 --- a/include/linux/ftrace_event.h
67077 +++ b/include/linux/ftrace_event.h
67078 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
67079 int filter_type);
67080 extern int trace_define_common_fields(struct ftrace_event_call *call);
67081
67082 -#define is_signed_type(type) (((type)(-1)) < 0)
67083 +#define is_signed_type(type) (((type)(-1)) < (type)1)
67084
67085 int trace_set_clr_event(const char *system, const char *event, int set);
67086
67087 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
67088 index 297df45..b6a74ff 100644
67089 --- a/include/linux/genhd.h
67090 +++ b/include/linux/genhd.h
67091 @@ -161,7 +161,7 @@ struct gendisk {
67092
67093 struct timer_rand_state *random;
67094
67095 - atomic_t sync_io; /* RAID */
67096 + atomic_unchecked_t sync_io; /* RAID */
67097 struct work_struct async_notify;
67098 #ifdef CONFIG_BLK_DEV_INTEGRITY
67099 struct blk_integrity *integrity;
67100 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
67101 new file mode 100644
67102 index 0000000..af663cf
67103 --- /dev/null
67104 +++ b/include/linux/gracl.h
67105 @@ -0,0 +1,319 @@
67106 +#ifndef GR_ACL_H
67107 +#define GR_ACL_H
67108 +
67109 +#include <linux/grdefs.h>
67110 +#include <linux/resource.h>
67111 +#include <linux/capability.h>
67112 +#include <linux/dcache.h>
67113 +#include <asm/resource.h>
67114 +
67115 +/* Major status information */
67116 +
67117 +#define GR_VERSION "grsecurity 2.9"
67118 +#define GRSECURITY_VERSION 0x2900
67119 +
67120 +enum {
67121 + GR_SHUTDOWN = 0,
67122 + GR_ENABLE = 1,
67123 + GR_SPROLE = 2,
67124 + GR_RELOAD = 3,
67125 + GR_SEGVMOD = 4,
67126 + GR_STATUS = 5,
67127 + GR_UNSPROLE = 6,
67128 + GR_PASSSET = 7,
67129 + GR_SPROLEPAM = 8,
67130 +};
67131 +
67132 +/* Password setup definitions
67133 + * kernel/grhash.c */
67134 +enum {
67135 + GR_PW_LEN = 128,
67136 + GR_SALT_LEN = 16,
67137 + GR_SHA_LEN = 32,
67138 +};
67139 +
67140 +enum {
67141 + GR_SPROLE_LEN = 64,
67142 +};
67143 +
67144 +enum {
67145 + GR_NO_GLOB = 0,
67146 + GR_REG_GLOB,
67147 + GR_CREATE_GLOB
67148 +};
67149 +
67150 +#define GR_NLIMITS 32
67151 +
67152 +/* Begin Data Structures */
67153 +
67154 +struct sprole_pw {
67155 + unsigned char *rolename;
67156 + unsigned char salt[GR_SALT_LEN];
67157 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
67158 +};
67159 +
67160 +struct name_entry {
67161 + __u32 key;
67162 + ino_t inode;
67163 + dev_t device;
67164 + char *name;
67165 + __u16 len;
67166 + __u8 deleted;
67167 + struct name_entry *prev;
67168 + struct name_entry *next;
67169 +};
67170 +
67171 +struct inodev_entry {
67172 + struct name_entry *nentry;
67173 + struct inodev_entry *prev;
67174 + struct inodev_entry *next;
67175 +};
67176 +
67177 +struct acl_role_db {
67178 + struct acl_role_label **r_hash;
67179 + __u32 r_size;
67180 +};
67181 +
67182 +struct inodev_db {
67183 + struct inodev_entry **i_hash;
67184 + __u32 i_size;
67185 +};
67186 +
67187 +struct name_db {
67188 + struct name_entry **n_hash;
67189 + __u32 n_size;
67190 +};
67191 +
67192 +struct crash_uid {
67193 + uid_t uid;
67194 + unsigned long expires;
67195 +};
67196 +
67197 +struct gr_hash_struct {
67198 + void **table;
67199 + void **nametable;
67200 + void *first;
67201 + __u32 table_size;
67202 + __u32 used_size;
67203 + int type;
67204 +};
67205 +
67206 +/* Userspace Grsecurity ACL data structures */
67207 +
67208 +struct acl_subject_label {
67209 + char *filename;
67210 + ino_t inode;
67211 + dev_t device;
67212 + __u32 mode;
67213 + kernel_cap_t cap_mask;
67214 + kernel_cap_t cap_lower;
67215 + kernel_cap_t cap_invert_audit;
67216 +
67217 + struct rlimit res[GR_NLIMITS];
67218 + __u32 resmask;
67219 +
67220 + __u8 user_trans_type;
67221 + __u8 group_trans_type;
67222 + uid_t *user_transitions;
67223 + gid_t *group_transitions;
67224 + __u16 user_trans_num;
67225 + __u16 group_trans_num;
67226 +
67227 + __u32 sock_families[2];
67228 + __u32 ip_proto[8];
67229 + __u32 ip_type;
67230 + struct acl_ip_label **ips;
67231 + __u32 ip_num;
67232 + __u32 inaddr_any_override;
67233 +
67234 + __u32 crashes;
67235 + unsigned long expires;
67236 +
67237 + struct acl_subject_label *parent_subject;
67238 + struct gr_hash_struct *hash;
67239 + struct acl_subject_label *prev;
67240 + struct acl_subject_label *next;
67241 +
67242 + struct acl_object_label **obj_hash;
67243 + __u32 obj_hash_size;
67244 + __u16 pax_flags;
67245 +};
67246 +
67247 +struct role_allowed_ip {
67248 + __u32 addr;
67249 + __u32 netmask;
67250 +
67251 + struct role_allowed_ip *prev;
67252 + struct role_allowed_ip *next;
67253 +};
67254 +
67255 +struct role_transition {
67256 + char *rolename;
67257 +
67258 + struct role_transition *prev;
67259 + struct role_transition *next;
67260 +};
67261 +
67262 +struct acl_role_label {
67263 + char *rolename;
67264 + uid_t uidgid;
67265 + __u16 roletype;
67266 +
67267 + __u16 auth_attempts;
67268 + unsigned long expires;
67269 +
67270 + struct acl_subject_label *root_label;
67271 + struct gr_hash_struct *hash;
67272 +
67273 + struct acl_role_label *prev;
67274 + struct acl_role_label *next;
67275 +
67276 + struct role_transition *transitions;
67277 + struct role_allowed_ip *allowed_ips;
67278 + uid_t *domain_children;
67279 + __u16 domain_child_num;
67280 +
67281 + mode_t umask;
67282 +
67283 + struct acl_subject_label **subj_hash;
67284 + __u32 subj_hash_size;
67285 +};
67286 +
67287 +struct user_acl_role_db {
67288 + struct acl_role_label **r_table;
67289 + __u32 num_pointers; /* Number of allocations to track */
67290 + __u32 num_roles; /* Number of roles */
67291 + __u32 num_domain_children; /* Number of domain children */
67292 + __u32 num_subjects; /* Number of subjects */
67293 + __u32 num_objects; /* Number of objects */
67294 +};
67295 +
67296 +struct acl_object_label {
67297 + char *filename;
67298 + ino_t inode;
67299 + dev_t device;
67300 + __u32 mode;
67301 +
67302 + struct acl_subject_label *nested;
67303 + struct acl_object_label *globbed;
67304 +
67305 + /* next two structures not used */
67306 +
67307 + struct acl_object_label *prev;
67308 + struct acl_object_label *next;
67309 +};
67310 +
67311 +struct acl_ip_label {
67312 + char *iface;
67313 + __u32 addr;
67314 + __u32 netmask;
67315 + __u16 low, high;
67316 + __u8 mode;
67317 + __u32 type;
67318 + __u32 proto[8];
67319 +
67320 + /* next two structures not used */
67321 +
67322 + struct acl_ip_label *prev;
67323 + struct acl_ip_label *next;
67324 +};
67325 +
67326 +struct gr_arg {
67327 + struct user_acl_role_db role_db;
67328 + unsigned char pw[GR_PW_LEN];
67329 + unsigned char salt[GR_SALT_LEN];
67330 + unsigned char sum[GR_SHA_LEN];
67331 + unsigned char sp_role[GR_SPROLE_LEN];
67332 + struct sprole_pw *sprole_pws;
67333 + dev_t segv_device;
67334 + ino_t segv_inode;
67335 + uid_t segv_uid;
67336 + __u16 num_sprole_pws;
67337 + __u16 mode;
67338 +};
67339 +
67340 +struct gr_arg_wrapper {
67341 + struct gr_arg *arg;
67342 + __u32 version;
67343 + __u32 size;
67344 +};
67345 +
67346 +struct subject_map {
67347 + struct acl_subject_label *user;
67348 + struct acl_subject_label *kernel;
67349 + struct subject_map *prev;
67350 + struct subject_map *next;
67351 +};
67352 +
67353 +struct acl_subj_map_db {
67354 + struct subject_map **s_hash;
67355 + __u32 s_size;
67356 +};
67357 +
67358 +/* End Data Structures Section */
67359 +
67360 +/* Hash functions generated by empirical testing by Brad Spengler
67361 + Makes good use of the low bits of the inode. Generally 0-1 times
67362 + in loop for successful match. 0-3 for unsuccessful match.
67363 + Shift/add algorithm with modulus of table size and an XOR*/
67364 +
67365 +static __inline__ unsigned int
67366 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
67367 +{
67368 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
67369 +}
67370 +
67371 + static __inline__ unsigned int
67372 +shash(const struct acl_subject_label *userp, const unsigned int sz)
67373 +{
67374 + return ((const unsigned long)userp % sz);
67375 +}
67376 +
67377 +static __inline__ unsigned int
67378 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
67379 +{
67380 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
67381 +}
67382 +
67383 +static __inline__ unsigned int
67384 +nhash(const char *name, const __u16 len, const unsigned int sz)
67385 +{
67386 + return full_name_hash((const unsigned char *)name, len) % sz;
67387 +}
67388 +
67389 +#define FOR_EACH_ROLE_START(role) \
67390 + role = role_list; \
67391 + while (role) {
67392 +
67393 +#define FOR_EACH_ROLE_END(role) \
67394 + role = role->prev; \
67395 + }
67396 +
67397 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
67398 + subj = NULL; \
67399 + iter = 0; \
67400 + while (iter < role->subj_hash_size) { \
67401 + if (subj == NULL) \
67402 + subj = role->subj_hash[iter]; \
67403 + if (subj == NULL) { \
67404 + iter++; \
67405 + continue; \
67406 + }
67407 +
67408 +#define FOR_EACH_SUBJECT_END(subj,iter) \
67409 + subj = subj->next; \
67410 + if (subj == NULL) \
67411 + iter++; \
67412 + }
67413 +
67414 +
67415 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
67416 + subj = role->hash->first; \
67417 + while (subj != NULL) {
67418 +
67419 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
67420 + subj = subj->next; \
67421 + }
67422 +
67423 +#endif
67424 +
67425 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
67426 new file mode 100644
67427 index 0000000..323ecf2
67428 --- /dev/null
67429 +++ b/include/linux/gralloc.h
67430 @@ -0,0 +1,9 @@
67431 +#ifndef __GRALLOC_H
67432 +#define __GRALLOC_H
67433 +
67434 +void acl_free_all(void);
67435 +int acl_alloc_stack_init(unsigned long size);
67436 +void *acl_alloc(unsigned long len);
67437 +void *acl_alloc_num(unsigned long num, unsigned long len);
67438 +
67439 +#endif
67440 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
67441 new file mode 100644
67442 index 0000000..70d6cd5
67443 --- /dev/null
67444 +++ b/include/linux/grdefs.h
67445 @@ -0,0 +1,140 @@
67446 +#ifndef GRDEFS_H
67447 +#define GRDEFS_H
67448 +
67449 +/* Begin grsecurity status declarations */
67450 +
67451 +enum {
67452 + GR_READY = 0x01,
67453 + GR_STATUS_INIT = 0x00 // disabled state
67454 +};
67455 +
67456 +/* Begin ACL declarations */
67457 +
67458 +/* Role flags */
67459 +
67460 +enum {
67461 + GR_ROLE_USER = 0x0001,
67462 + GR_ROLE_GROUP = 0x0002,
67463 + GR_ROLE_DEFAULT = 0x0004,
67464 + GR_ROLE_SPECIAL = 0x0008,
67465 + GR_ROLE_AUTH = 0x0010,
67466 + GR_ROLE_NOPW = 0x0020,
67467 + GR_ROLE_GOD = 0x0040,
67468 + GR_ROLE_LEARN = 0x0080,
67469 + GR_ROLE_TPE = 0x0100,
67470 + GR_ROLE_DOMAIN = 0x0200,
67471 + GR_ROLE_PAM = 0x0400,
67472 + GR_ROLE_PERSIST = 0x800
67473 +};
67474 +
67475 +/* ACL Subject and Object mode flags */
67476 +enum {
67477 + GR_DELETED = 0x80000000
67478 +};
67479 +
67480 +/* ACL Object-only mode flags */
67481 +enum {
67482 + GR_READ = 0x00000001,
67483 + GR_APPEND = 0x00000002,
67484 + GR_WRITE = 0x00000004,
67485 + GR_EXEC = 0x00000008,
67486 + GR_FIND = 0x00000010,
67487 + GR_INHERIT = 0x00000020,
67488 + GR_SETID = 0x00000040,
67489 + GR_CREATE = 0x00000080,
67490 + GR_DELETE = 0x00000100,
67491 + GR_LINK = 0x00000200,
67492 + GR_AUDIT_READ = 0x00000400,
67493 + GR_AUDIT_APPEND = 0x00000800,
67494 + GR_AUDIT_WRITE = 0x00001000,
67495 + GR_AUDIT_EXEC = 0x00002000,
67496 + GR_AUDIT_FIND = 0x00004000,
67497 + GR_AUDIT_INHERIT= 0x00008000,
67498 + GR_AUDIT_SETID = 0x00010000,
67499 + GR_AUDIT_CREATE = 0x00020000,
67500 + GR_AUDIT_DELETE = 0x00040000,
67501 + GR_AUDIT_LINK = 0x00080000,
67502 + GR_PTRACERD = 0x00100000,
67503 + GR_NOPTRACE = 0x00200000,
67504 + GR_SUPPRESS = 0x00400000,
67505 + GR_NOLEARN = 0x00800000,
67506 + GR_INIT_TRANSFER= 0x01000000
67507 +};
67508 +
67509 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
67510 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
67511 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
67512 +
67513 +/* ACL subject-only mode flags */
67514 +enum {
67515 + GR_KILL = 0x00000001,
67516 + GR_VIEW = 0x00000002,
67517 + GR_PROTECTED = 0x00000004,
67518 + GR_LEARN = 0x00000008,
67519 + GR_OVERRIDE = 0x00000010,
67520 + /* just a placeholder, this mode is only used in userspace */
67521 + GR_DUMMY = 0x00000020,
67522 + GR_PROTSHM = 0x00000040,
67523 + GR_KILLPROC = 0x00000080,
67524 + GR_KILLIPPROC = 0x00000100,
67525 + /* just a placeholder, this mode is only used in userspace */
67526 + GR_NOTROJAN = 0x00000200,
67527 + GR_PROTPROCFD = 0x00000400,
67528 + GR_PROCACCT = 0x00000800,
67529 + GR_RELAXPTRACE = 0x00001000,
67530 + GR_NESTED = 0x00002000,
67531 + GR_INHERITLEARN = 0x00004000,
67532 + GR_PROCFIND = 0x00008000,
67533 + GR_POVERRIDE = 0x00010000,
67534 + GR_KERNELAUTH = 0x00020000,
67535 + GR_ATSECURE = 0x00040000,
67536 + GR_SHMEXEC = 0x00080000
67537 +};
67538 +
67539 +enum {
67540 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
67541 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
67542 + GR_PAX_ENABLE_MPROTECT = 0x0004,
67543 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
67544 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
67545 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
67546 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
67547 + GR_PAX_DISABLE_MPROTECT = 0x0400,
67548 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
67549 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
67550 +};
67551 +
67552 +enum {
67553 + GR_ID_USER = 0x01,
67554 + GR_ID_GROUP = 0x02,
67555 +};
67556 +
67557 +enum {
67558 + GR_ID_ALLOW = 0x01,
67559 + GR_ID_DENY = 0x02,
67560 +};
67561 +
67562 +#define GR_CRASH_RES 31
67563 +#define GR_UIDTABLE_MAX 500
67564 +
67565 +/* begin resource learning section */
67566 +enum {
67567 + GR_RLIM_CPU_BUMP = 60,
67568 + GR_RLIM_FSIZE_BUMP = 50000,
67569 + GR_RLIM_DATA_BUMP = 10000,
67570 + GR_RLIM_STACK_BUMP = 1000,
67571 + GR_RLIM_CORE_BUMP = 10000,
67572 + GR_RLIM_RSS_BUMP = 500000,
67573 + GR_RLIM_NPROC_BUMP = 1,
67574 + GR_RLIM_NOFILE_BUMP = 5,
67575 + GR_RLIM_MEMLOCK_BUMP = 50000,
67576 + GR_RLIM_AS_BUMP = 500000,
67577 + GR_RLIM_LOCKS_BUMP = 2,
67578 + GR_RLIM_SIGPENDING_BUMP = 5,
67579 + GR_RLIM_MSGQUEUE_BUMP = 10000,
67580 + GR_RLIM_NICE_BUMP = 1,
67581 + GR_RLIM_RTPRIO_BUMP = 1,
67582 + GR_RLIM_RTTIME_BUMP = 1000000
67583 +};
67584 +
67585 +#endif
67586 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
67587 new file mode 100644
67588 index 0000000..3826b91
67589 --- /dev/null
67590 +++ b/include/linux/grinternal.h
67591 @@ -0,0 +1,219 @@
67592 +#ifndef __GRINTERNAL_H
67593 +#define __GRINTERNAL_H
67594 +
67595 +#ifdef CONFIG_GRKERNSEC
67596 +
67597 +#include <linux/fs.h>
67598 +#include <linux/mnt_namespace.h>
67599 +#include <linux/nsproxy.h>
67600 +#include <linux/gracl.h>
67601 +#include <linux/grdefs.h>
67602 +#include <linux/grmsg.h>
67603 +
67604 +void gr_add_learn_entry(const char *fmt, ...)
67605 + __attribute__ ((format (printf, 1, 2)));
67606 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
67607 + const struct vfsmount *mnt);
67608 +__u32 gr_check_create(const struct dentry *new_dentry,
67609 + const struct dentry *parent,
67610 + const struct vfsmount *mnt, const __u32 mode);
67611 +int gr_check_protected_task(const struct task_struct *task);
67612 +__u32 to_gr_audit(const __u32 reqmode);
67613 +int gr_set_acls(const int type);
67614 +int gr_apply_subject_to_task(struct task_struct *task);
67615 +int gr_acl_is_enabled(void);
67616 +char gr_roletype_to_char(void);
67617 +
67618 +void gr_handle_alertkill(struct task_struct *task);
67619 +char *gr_to_filename(const struct dentry *dentry,
67620 + const struct vfsmount *mnt);
67621 +char *gr_to_filename1(const struct dentry *dentry,
67622 + const struct vfsmount *mnt);
67623 +char *gr_to_filename2(const struct dentry *dentry,
67624 + const struct vfsmount *mnt);
67625 +char *gr_to_filename3(const struct dentry *dentry,
67626 + const struct vfsmount *mnt);
67627 +
67628 +extern int grsec_enable_ptrace_readexec;
67629 +extern int grsec_enable_harden_ptrace;
67630 +extern int grsec_enable_link;
67631 +extern int grsec_enable_fifo;
67632 +extern int grsec_enable_shm;
67633 +extern int grsec_enable_execlog;
67634 +extern int grsec_enable_signal;
67635 +extern int grsec_enable_audit_ptrace;
67636 +extern int grsec_enable_forkfail;
67637 +extern int grsec_enable_time;
67638 +extern int grsec_enable_rofs;
67639 +extern int grsec_enable_chroot_shmat;
67640 +extern int grsec_enable_chroot_mount;
67641 +extern int grsec_enable_chroot_double;
67642 +extern int grsec_enable_chroot_pivot;
67643 +extern int grsec_enable_chroot_chdir;
67644 +extern int grsec_enable_chroot_chmod;
67645 +extern int grsec_enable_chroot_mknod;
67646 +extern int grsec_enable_chroot_fchdir;
67647 +extern int grsec_enable_chroot_nice;
67648 +extern int grsec_enable_chroot_execlog;
67649 +extern int grsec_enable_chroot_caps;
67650 +extern int grsec_enable_chroot_sysctl;
67651 +extern int grsec_enable_chroot_unix;
67652 +extern int grsec_enable_tpe;
67653 +extern int grsec_tpe_gid;
67654 +extern int grsec_enable_tpe_all;
67655 +extern int grsec_enable_tpe_invert;
67656 +extern int grsec_enable_socket_all;
67657 +extern int grsec_socket_all_gid;
67658 +extern int grsec_enable_socket_client;
67659 +extern int grsec_socket_client_gid;
67660 +extern int grsec_enable_socket_server;
67661 +extern int grsec_socket_server_gid;
67662 +extern int grsec_audit_gid;
67663 +extern int grsec_enable_group;
67664 +extern int grsec_enable_audit_textrel;
67665 +extern int grsec_enable_log_rwxmaps;
67666 +extern int grsec_enable_mount;
67667 +extern int grsec_enable_chdir;
67668 +extern int grsec_resource_logging;
67669 +extern int grsec_enable_blackhole;
67670 +extern int grsec_lastack_retries;
67671 +extern int grsec_enable_brute;
67672 +extern int grsec_lock;
67673 +
67674 +extern spinlock_t grsec_alert_lock;
67675 +extern unsigned long grsec_alert_wtime;
67676 +extern unsigned long grsec_alert_fyet;
67677 +
67678 +extern spinlock_t grsec_audit_lock;
67679 +
67680 +extern rwlock_t grsec_exec_file_lock;
67681 +
67682 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
67683 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
67684 + (tsk)->exec_file->f_vfsmnt) : "/")
67685 +
67686 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
67687 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
67688 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67689 +
67690 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
67691 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
67692 + (tsk)->exec_file->f_vfsmnt) : "/")
67693 +
67694 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
67695 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
67696 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67697 +
67698 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
67699 +
67700 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
67701 +
67702 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
67703 + (task)->pid, (cred)->uid, \
67704 + (cred)->euid, (cred)->gid, (cred)->egid, \
67705 + gr_parent_task_fullpath(task), \
67706 + (task)->real_parent->comm, (task)->real_parent->pid, \
67707 + (pcred)->uid, (pcred)->euid, \
67708 + (pcred)->gid, (pcred)->egid
67709 +
67710 +#define GR_CHROOT_CAPS {{ \
67711 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
67712 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
67713 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
67714 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
67715 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
67716 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
67717 + CAP_TO_MASK(CAP_MAC_ADMIN) }}
67718 +
67719 +#define security_learn(normal_msg,args...) \
67720 +({ \
67721 + read_lock(&grsec_exec_file_lock); \
67722 + gr_add_learn_entry(normal_msg "\n", ## args); \
67723 + read_unlock(&grsec_exec_file_lock); \
67724 +})
67725 +
67726 +enum {
67727 + GR_DO_AUDIT,
67728 + GR_DONT_AUDIT,
67729 + GR_DONT_AUDIT_GOOD
67730 +};
67731 +
67732 +enum {
67733 + GR_TTYSNIFF,
67734 + GR_RBAC,
67735 + GR_RBAC_STR,
67736 + GR_STR_RBAC,
67737 + GR_RBAC_MODE2,
67738 + GR_RBAC_MODE3,
67739 + GR_FILENAME,
67740 + GR_SYSCTL_HIDDEN,
67741 + GR_NOARGS,
67742 + GR_ONE_INT,
67743 + GR_ONE_INT_TWO_STR,
67744 + GR_ONE_STR,
67745 + GR_STR_INT,
67746 + GR_TWO_STR_INT,
67747 + GR_TWO_INT,
67748 + GR_TWO_U64,
67749 + GR_THREE_INT,
67750 + GR_FIVE_INT_TWO_STR,
67751 + GR_TWO_STR,
67752 + GR_THREE_STR,
67753 + GR_FOUR_STR,
67754 + GR_STR_FILENAME,
67755 + GR_FILENAME_STR,
67756 + GR_FILENAME_TWO_INT,
67757 + GR_FILENAME_TWO_INT_STR,
67758 + GR_TEXTREL,
67759 + GR_PTRACE,
67760 + GR_RESOURCE,
67761 + GR_CAP,
67762 + GR_SIG,
67763 + GR_SIG2,
67764 + GR_CRASH1,
67765 + GR_CRASH2,
67766 + GR_PSACCT,
67767 + GR_RWXMAP
67768 +};
67769 +
67770 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
67771 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
67772 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
67773 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
67774 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
67775 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
67776 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
67777 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
67778 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
67779 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
67780 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
67781 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
67782 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
67783 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
67784 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
67785 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
67786 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
67787 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
67788 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
67789 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
67790 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
67791 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
67792 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
67793 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
67794 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
67795 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
67796 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
67797 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
67798 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
67799 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
67800 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
67801 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
67802 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
67803 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
67804 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
67805 +
67806 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
67807 +
67808 +#endif
67809 +
67810 +#endif
67811 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
67812 new file mode 100644
67813 index 0000000..f885406
67814 --- /dev/null
67815 +++ b/include/linux/grmsg.h
67816 @@ -0,0 +1,109 @@
67817 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
67818 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
67819 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
67820 +#define GR_STOPMOD_MSG "denied modification of module state by "
67821 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
67822 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
67823 +#define GR_IOPERM_MSG "denied use of ioperm() by "
67824 +#define GR_IOPL_MSG "denied use of iopl() by "
67825 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
67826 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
67827 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
67828 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
67829 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
67830 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
67831 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
67832 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
67833 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
67834 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
67835 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
67836 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
67837 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
67838 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
67839 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
67840 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
67841 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
67842 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
67843 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
67844 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
67845 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
67846 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
67847 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
67848 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
67849 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
67850 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
67851 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
67852 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
67853 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
67854 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
67855 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
67856 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
67857 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
67858 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
67859 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
67860 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
67861 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
67862 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
67863 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
67864 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
67865 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
67866 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
67867 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
67868 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
67869 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
67870 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
67871 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
67872 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
67873 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
67874 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
67875 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
67876 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
67877 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
67878 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
67879 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
67880 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
67881 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
67882 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
67883 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
67884 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
67885 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
67886 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
67887 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
67888 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
67889 +#define GR_NICE_CHROOT_MSG "denied priority change by "
67890 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
67891 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
67892 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
67893 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
67894 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
67895 +#define GR_TIME_MSG "time set by "
67896 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
67897 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
67898 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
67899 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
67900 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
67901 +#define GR_BIND_MSG "denied bind() by "
67902 +#define GR_CONNECT_MSG "denied connect() by "
67903 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
67904 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
67905 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
67906 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
67907 +#define GR_CAP_ACL_MSG "use of %s denied for "
67908 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
67909 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
67910 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
67911 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
67912 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
67913 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
67914 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
67915 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
67916 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
67917 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
67918 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
67919 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
67920 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
67921 +#define GR_VM86_MSG "denied use of vm86 by "
67922 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
67923 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
67924 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
67925 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
67926 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
67927 new file mode 100644
67928 index 0000000..c1793ae
67929 --- /dev/null
67930 +++ b/include/linux/grsecurity.h
67931 @@ -0,0 +1,219 @@
67932 +#ifndef GR_SECURITY_H
67933 +#define GR_SECURITY_H
67934 +#include <linux/fs.h>
67935 +#include <linux/fs_struct.h>
67936 +#include <linux/binfmts.h>
67937 +#include <linux/gracl.h>
67938 +#include <linux/compat.h>
67939 +
67940 +/* notify of brain-dead configs */
67941 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67942 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
67943 +#endif
67944 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
67945 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
67946 +#endif
67947 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
67948 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
67949 +#endif
67950 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
67951 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
67952 +#endif
67953 +
67954 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
67955 +void gr_handle_brute_check(void);
67956 +void gr_handle_kernel_exploit(void);
67957 +int gr_process_user_ban(void);
67958 +
67959 +char gr_roletype_to_char(void);
67960 +
67961 +int gr_acl_enable_at_secure(void);
67962 +
67963 +int gr_check_user_change(int real, int effective, int fs);
67964 +int gr_check_group_change(int real, int effective, int fs);
67965 +
67966 +void gr_del_task_from_ip_table(struct task_struct *p);
67967 +
67968 +int gr_pid_is_chrooted(struct task_struct *p);
67969 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
67970 +int gr_handle_chroot_nice(void);
67971 +int gr_handle_chroot_sysctl(const int op);
67972 +int gr_handle_chroot_setpriority(struct task_struct *p,
67973 + const int niceval);
67974 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
67975 +int gr_handle_chroot_chroot(const struct dentry *dentry,
67976 + const struct vfsmount *mnt);
67977 +void gr_handle_chroot_chdir(struct path *path);
67978 +int gr_handle_chroot_chmod(const struct dentry *dentry,
67979 + const struct vfsmount *mnt, const int mode);
67980 +int gr_handle_chroot_mknod(const struct dentry *dentry,
67981 + const struct vfsmount *mnt, const int mode);
67982 +int gr_handle_chroot_mount(const struct dentry *dentry,
67983 + const struct vfsmount *mnt,
67984 + const char *dev_name);
67985 +int gr_handle_chroot_pivot(void);
67986 +int gr_handle_chroot_unix(const pid_t pid);
67987 +
67988 +int gr_handle_rawio(const struct inode *inode);
67989 +
67990 +void gr_handle_ioperm(void);
67991 +void gr_handle_iopl(void);
67992 +
67993 +umode_t gr_acl_umask(void);
67994 +
67995 +int gr_tpe_allow(const struct file *file);
67996 +
67997 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
67998 +void gr_clear_chroot_entries(struct task_struct *task);
67999 +
68000 +void gr_log_forkfail(const int retval);
68001 +void gr_log_timechange(void);
68002 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
68003 +void gr_log_chdir(const struct dentry *dentry,
68004 + const struct vfsmount *mnt);
68005 +void gr_log_chroot_exec(const struct dentry *dentry,
68006 + const struct vfsmount *mnt);
68007 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
68008 +#ifdef CONFIG_COMPAT
68009 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
68010 +#endif
68011 +void gr_log_remount(const char *devname, const int retval);
68012 +void gr_log_unmount(const char *devname, const int retval);
68013 +void gr_log_mount(const char *from, const char *to, const int retval);
68014 +void gr_log_textrel(struct vm_area_struct *vma);
68015 +void gr_log_rwxmmap(struct file *file);
68016 +void gr_log_rwxmprotect(struct file *file);
68017 +
68018 +int gr_handle_follow_link(const struct inode *parent,
68019 + const struct inode *inode,
68020 + const struct dentry *dentry,
68021 + const struct vfsmount *mnt);
68022 +int gr_handle_fifo(const struct dentry *dentry,
68023 + const struct vfsmount *mnt,
68024 + const struct dentry *dir, const int flag,
68025 + const int acc_mode);
68026 +int gr_handle_hardlink(const struct dentry *dentry,
68027 + const struct vfsmount *mnt,
68028 + struct inode *inode,
68029 + const int mode, const char *to);
68030 +
68031 +int gr_is_capable(const int cap);
68032 +int gr_is_capable_nolog(const int cap);
68033 +void gr_learn_resource(const struct task_struct *task, const int limit,
68034 + const unsigned long wanted, const int gt);
68035 +void gr_copy_label(struct task_struct *tsk);
68036 +void gr_handle_crash(struct task_struct *task, const int sig);
68037 +int gr_handle_signal(const struct task_struct *p, const int sig);
68038 +int gr_check_crash_uid(const uid_t uid);
68039 +int gr_check_protected_task(const struct task_struct *task);
68040 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
68041 +int gr_acl_handle_mmap(const struct file *file,
68042 + const unsigned long prot);
68043 +int gr_acl_handle_mprotect(const struct file *file,
68044 + const unsigned long prot);
68045 +int gr_check_hidden_task(const struct task_struct *tsk);
68046 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
68047 + const struct vfsmount *mnt);
68048 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
68049 + const struct vfsmount *mnt);
68050 +__u32 gr_acl_handle_access(const struct dentry *dentry,
68051 + const struct vfsmount *mnt, const int fmode);
68052 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
68053 + const struct vfsmount *mnt, umode_t *mode);
68054 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
68055 + const struct vfsmount *mnt);
68056 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
68057 + const struct vfsmount *mnt);
68058 +int gr_handle_ptrace(struct task_struct *task, const long request);
68059 +int gr_handle_proc_ptrace(struct task_struct *task);
68060 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
68061 + const struct vfsmount *mnt);
68062 +int gr_check_crash_exec(const struct file *filp);
68063 +int gr_acl_is_enabled(void);
68064 +void gr_set_kernel_label(struct task_struct *task);
68065 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
68066 + const gid_t gid);
68067 +int gr_set_proc_label(const struct dentry *dentry,
68068 + const struct vfsmount *mnt,
68069 + const int unsafe_flags);
68070 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
68071 + const struct vfsmount *mnt);
68072 +__u32 gr_acl_handle_open(const struct dentry *dentry,
68073 + const struct vfsmount *mnt, int acc_mode);
68074 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
68075 + const struct dentry *p_dentry,
68076 + const struct vfsmount *p_mnt,
68077 + int open_flags, int acc_mode, const int imode);
68078 +void gr_handle_create(const struct dentry *dentry,
68079 + const struct vfsmount *mnt);
68080 +void gr_handle_proc_create(const struct dentry *dentry,
68081 + const struct inode *inode);
68082 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
68083 + const struct dentry *parent_dentry,
68084 + const struct vfsmount *parent_mnt,
68085 + const int mode);
68086 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
68087 + const struct dentry *parent_dentry,
68088 + const struct vfsmount *parent_mnt);
68089 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
68090 + const struct vfsmount *mnt);
68091 +void gr_handle_delete(const ino_t ino, const dev_t dev);
68092 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
68093 + const struct vfsmount *mnt);
68094 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
68095 + const struct dentry *parent_dentry,
68096 + const struct vfsmount *parent_mnt,
68097 + const char *from);
68098 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
68099 + const struct dentry *parent_dentry,
68100 + const struct vfsmount *parent_mnt,
68101 + const struct dentry *old_dentry,
68102 + const struct vfsmount *old_mnt, const char *to);
68103 +int gr_acl_handle_rename(struct dentry *new_dentry,
68104 + struct dentry *parent_dentry,
68105 + const struct vfsmount *parent_mnt,
68106 + struct dentry *old_dentry,
68107 + struct inode *old_parent_inode,
68108 + struct vfsmount *old_mnt, const char *newname);
68109 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
68110 + struct dentry *old_dentry,
68111 + struct dentry *new_dentry,
68112 + struct vfsmount *mnt, const __u8 replace);
68113 +__u32 gr_check_link(const struct dentry *new_dentry,
68114 + const struct dentry *parent_dentry,
68115 + const struct vfsmount *parent_mnt,
68116 + const struct dentry *old_dentry,
68117 + const struct vfsmount *old_mnt);
68118 +int gr_acl_handle_filldir(const struct file *file, const char *name,
68119 + const unsigned int namelen, const ino_t ino);
68120 +
68121 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
68122 + const struct vfsmount *mnt);
68123 +void gr_acl_handle_exit(void);
68124 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
68125 +int gr_acl_handle_procpidmem(const struct task_struct *task);
68126 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
68127 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
68128 +void gr_audit_ptrace(struct task_struct *task);
68129 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
68130 +
68131 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
68132 +
68133 +#ifdef CONFIG_GRKERNSEC
68134 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
68135 +void gr_handle_vm86(void);
68136 +void gr_handle_mem_readwrite(u64 from, u64 to);
68137 +
68138 +void gr_log_badprocpid(const char *entry);
68139 +
68140 +extern int grsec_enable_dmesg;
68141 +extern int grsec_disable_privio;
68142 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68143 +extern int grsec_enable_chroot_findtask;
68144 +#endif
68145 +#ifdef CONFIG_GRKERNSEC_SETXID
68146 +extern int grsec_enable_setxid;
68147 +#endif
68148 +#endif
68149 +
68150 +#endif
68151 diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
68152 index 6a87154..a3ce57b 100644
68153 --- a/include/linux/hdpu_features.h
68154 +++ b/include/linux/hdpu_features.h
68155 @@ -3,7 +3,7 @@
68156 struct cpustate_t {
68157 spinlock_t lock;
68158 int excl;
68159 - int open_count;
68160 + atomic_t open_count;
68161 unsigned char cached_val;
68162 int inited;
68163 unsigned long *set_addr;
68164 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
68165 index 211ff44..00ab6d7 100644
68166 --- a/include/linux/highmem.h
68167 +++ b/include/linux/highmem.h
68168 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
68169 kunmap_atomic(kaddr, KM_USER0);
68170 }
68171
68172 +static inline void sanitize_highpage(struct page *page)
68173 +{
68174 + void *kaddr;
68175 + unsigned long flags;
68176 +
68177 + local_irq_save(flags);
68178 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
68179 + clear_page(kaddr);
68180 + kunmap_atomic(kaddr, KM_CLEARPAGE);
68181 + local_irq_restore(flags);
68182 +}
68183 +
68184 static inline void zero_user_segments(struct page *page,
68185 unsigned start1, unsigned end1,
68186 unsigned start2, unsigned end2)
68187 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
68188 index 7b40cda..24eb44e 100644
68189 --- a/include/linux/i2c.h
68190 +++ b/include/linux/i2c.h
68191 @@ -325,6 +325,7 @@ struct i2c_algorithm {
68192 /* To determine what the adapter supports */
68193 u32 (*functionality) (struct i2c_adapter *);
68194 };
68195 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
68196
68197 /*
68198 * i2c_adapter is the structure used to identify a physical i2c bus along
68199 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
68200 index 4c4e57d..f3c5303 100644
68201 --- a/include/linux/i2o.h
68202 +++ b/include/linux/i2o.h
68203 @@ -564,7 +564,7 @@ struct i2o_controller {
68204 struct i2o_device *exec; /* Executive */
68205 #if BITS_PER_LONG == 64
68206 spinlock_t context_list_lock; /* lock for context_list */
68207 - atomic_t context_list_counter; /* needed for unique contexts */
68208 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
68209 struct list_head context_list; /* list of context id's
68210 and pointers */
68211 #endif
68212 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
68213 index 21a6f5d..dc42eab 100644
68214 --- a/include/linux/init_task.h
68215 +++ b/include/linux/init_task.h
68216 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
68217 #define INIT_IDS
68218 #endif
68219
68220 +#ifdef CONFIG_X86
68221 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
68222 +#else
68223 +#define INIT_TASK_THREAD_INFO
68224 +#endif
68225 +
68226 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
68227 /*
68228 * Because of the reduced scope of CAP_SETPCAP when filesystem
68229 @@ -156,6 +162,7 @@ extern struct cred init_cred;
68230 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
68231 .comm = "swapper", \
68232 .thread = INIT_THREAD, \
68233 + INIT_TASK_THREAD_INFO \
68234 .fs = &init_fs, \
68235 .files = &init_files, \
68236 .signal = &init_signals, \
68237 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
68238 index 4f0a72a..a849599 100644
68239 --- a/include/linux/intel-iommu.h
68240 +++ b/include/linux/intel-iommu.h
68241 @@ -296,7 +296,7 @@ struct iommu_flush {
68242 u8 fm, u64 type);
68243 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
68244 unsigned int size_order, u64 type);
68245 -};
68246 +} __no_const;
68247
68248 enum {
68249 SR_DMAR_FECTL_REG,
68250 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
68251 index c739150..be577b5 100644
68252 --- a/include/linux/interrupt.h
68253 +++ b/include/linux/interrupt.h
68254 @@ -369,7 +369,7 @@ enum
68255 /* map softirq index to softirq name. update 'softirq_to_name' in
68256 * kernel/softirq.c when adding a new softirq.
68257 */
68258 -extern char *softirq_to_name[NR_SOFTIRQS];
68259 +extern const char * const softirq_to_name[NR_SOFTIRQS];
68260
68261 /* softirq mask and active fields moved to irq_cpustat_t in
68262 * asm/hardirq.h to get better cache usage. KAO
68263 @@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
68264
68265 struct softirq_action
68266 {
68267 - void (*action)(struct softirq_action *);
68268 + void (*action)(void);
68269 };
68270
68271 asmlinkage void do_softirq(void);
68272 asmlinkage void __do_softirq(void);
68273 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
68274 +extern void open_softirq(int nr, void (*action)(void));
68275 extern void softirq_init(void);
68276 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
68277 extern void raise_softirq_irqoff(unsigned int nr);
68278 diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
68279 index eb73632..19abfc1 100644
68280 --- a/include/linux/iocontext.h
68281 +++ b/include/linux/iocontext.h
68282 @@ -94,14 +94,15 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
68283 return NULL;
68284 }
68285
68286 +struct task_struct;
68287 #ifdef CONFIG_BLOCK
68288 int put_io_context(struct io_context *ioc);
68289 -void exit_io_context(void);
68290 +void exit_io_context(struct task_struct *task);
68291 struct io_context *get_io_context(gfp_t gfp_flags, int node);
68292 struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
68293 void copy_io_context(struct io_context **pdst, struct io_context **psrc);
68294 #else
68295 -static inline void exit_io_context(void)
68296 +static inline void exit_io_context(struct task_struct *task)
68297 {
68298 }
68299
68300 diff --git a/include/linux/irq.h b/include/linux/irq.h
68301 index 9e5f45a..025865b 100644
68302 --- a/include/linux/irq.h
68303 +++ b/include/linux/irq.h
68304 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
68305 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
68306 bool boot)
68307 {
68308 +#ifdef CONFIG_CPUMASK_OFFSTACK
68309 gfp_t gfp = GFP_ATOMIC;
68310
68311 if (boot)
68312 gfp = GFP_NOWAIT;
68313
68314 -#ifdef CONFIG_CPUMASK_OFFSTACK
68315 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
68316 return false;
68317
68318 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
68319 index 7922742..27306a2 100644
68320 --- a/include/linux/kallsyms.h
68321 +++ b/include/linux/kallsyms.h
68322 @@ -15,7 +15,8 @@
68323
68324 struct module;
68325
68326 -#ifdef CONFIG_KALLSYMS
68327 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
68328 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
68329 /* Lookup the address for a symbol. Returns 0 if not found. */
68330 unsigned long kallsyms_lookup_name(const char *name);
68331
68332 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
68333 /* Stupid that this does nothing, but I didn't create this mess. */
68334 #define __print_symbol(fmt, addr)
68335 #endif /*CONFIG_KALLSYMS*/
68336 +#else /* when included by kallsyms.c, vsnprintf.c, or
68337 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
68338 +extern void __print_symbol(const char *fmt, unsigned long address);
68339 +extern int sprint_symbol(char *buffer, unsigned long address);
68340 +const char *kallsyms_lookup(unsigned long addr,
68341 + unsigned long *symbolsize,
68342 + unsigned long *offset,
68343 + char **modname, char *namebuf);
68344 +#endif
68345
68346 /* This macro allows us to keep printk typechecking */
68347 static void __check_printsym_format(const char *fmt, ...)
68348 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
68349 index 6adcc29..13369e8 100644
68350 --- a/include/linux/kgdb.h
68351 +++ b/include/linux/kgdb.h
68352 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
68353
68354 extern int kgdb_connected;
68355
68356 -extern atomic_t kgdb_setting_breakpoint;
68357 -extern atomic_t kgdb_cpu_doing_single_step;
68358 +extern atomic_unchecked_t kgdb_setting_breakpoint;
68359 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
68360
68361 extern struct task_struct *kgdb_usethread;
68362 extern struct task_struct *kgdb_contthread;
68363 @@ -235,7 +235,7 @@ struct kgdb_arch {
68364 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
68365 void (*remove_all_hw_break)(void);
68366 void (*correct_hw_break)(void);
68367 -};
68368 +} __do_const;
68369
68370 /**
68371 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
68372 @@ -257,14 +257,14 @@ struct kgdb_io {
68373 int (*init) (void);
68374 void (*pre_exception) (void);
68375 void (*post_exception) (void);
68376 -};
68377 +} __do_const;
68378
68379 -extern struct kgdb_arch arch_kgdb_ops;
68380 +extern const struct kgdb_arch arch_kgdb_ops;
68381
68382 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
68383
68384 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
68385 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
68386 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
68387 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
68388
68389 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
68390 extern int kgdb_mem2hex(char *mem, char *buf, int count);
68391 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
68392 index 0546fe7..2a22bc1 100644
68393 --- a/include/linux/kmod.h
68394 +++ b/include/linux/kmod.h
68395 @@ -31,6 +31,8 @@
68396 * usually useless though. */
68397 extern int __request_module(bool wait, const char *name, ...) \
68398 __attribute__((format(printf, 2, 3)));
68399 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
68400 + __attribute__((format(printf, 3, 4)));
68401 #define request_module(mod...) __request_module(true, mod)
68402 #define request_module_nowait(mod...) __request_module(false, mod)
68403 #define try_then_request_module(x, mod...) \
68404 diff --git a/include/linux/kobject.h b/include/linux/kobject.h
68405 index 58ae8e0..3950d3c 100644
68406 --- a/include/linux/kobject.h
68407 +++ b/include/linux/kobject.h
68408 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
68409
68410 struct kobj_type {
68411 void (*release)(struct kobject *kobj);
68412 - struct sysfs_ops *sysfs_ops;
68413 + const struct sysfs_ops *sysfs_ops;
68414 struct attribute **default_attrs;
68415 };
68416
68417 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
68418 };
68419
68420 struct kset_uevent_ops {
68421 - int (*filter)(struct kset *kset, struct kobject *kobj);
68422 - const char *(*name)(struct kset *kset, struct kobject *kobj);
68423 - int (*uevent)(struct kset *kset, struct kobject *kobj,
68424 + int (* const filter)(struct kset *kset, struct kobject *kobj);
68425 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
68426 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
68427 struct kobj_uevent_env *env);
68428 };
68429
68430 @@ -132,7 +132,7 @@ struct kobj_attribute {
68431 const char *buf, size_t count);
68432 };
68433
68434 -extern struct sysfs_ops kobj_sysfs_ops;
68435 +extern const struct sysfs_ops kobj_sysfs_ops;
68436
68437 /**
68438 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
68439 @@ -155,14 +155,14 @@ struct kset {
68440 struct list_head list;
68441 spinlock_t list_lock;
68442 struct kobject kobj;
68443 - struct kset_uevent_ops *uevent_ops;
68444 + const struct kset_uevent_ops *uevent_ops;
68445 };
68446
68447 extern void kset_init(struct kset *kset);
68448 extern int __must_check kset_register(struct kset *kset);
68449 extern void kset_unregister(struct kset *kset);
68450 extern struct kset * __must_check kset_create_and_add(const char *name,
68451 - struct kset_uevent_ops *u,
68452 + const struct kset_uevent_ops *u,
68453 struct kobject *parent_kobj);
68454
68455 static inline struct kset *to_kset(struct kobject *kobj)
68456 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
68457 index c728a50..752d821 100644
68458 --- a/include/linux/kvm_host.h
68459 +++ b/include/linux/kvm_host.h
68460 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
68461 void vcpu_load(struct kvm_vcpu *vcpu);
68462 void vcpu_put(struct kvm_vcpu *vcpu);
68463
68464 -int kvm_init(void *opaque, unsigned int vcpu_size,
68465 +int kvm_init(const void *opaque, unsigned int vcpu_size,
68466 struct module *module);
68467 void kvm_exit(void);
68468
68469 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
68470 struct kvm_guest_debug *dbg);
68471 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
68472
68473 -int kvm_arch_init(void *opaque);
68474 +int kvm_arch_init(const void *opaque);
68475 void kvm_arch_exit(void);
68476
68477 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
68478 diff --git a/include/linux/libata.h b/include/linux/libata.h
68479 index a069916..223edde 100644
68480 --- a/include/linux/libata.h
68481 +++ b/include/linux/libata.h
68482 @@ -525,11 +525,11 @@ struct ata_ioports {
68483
68484 struct ata_host {
68485 spinlock_t lock;
68486 - struct device *dev;
68487 + struct device *dev;
68488 void __iomem * const *iomap;
68489 unsigned int n_ports;
68490 void *private_data;
68491 - struct ata_port_operations *ops;
68492 + const struct ata_port_operations *ops;
68493 unsigned long flags;
68494 #ifdef CONFIG_ATA_ACPI
68495 acpi_handle acpi_handle;
68496 @@ -710,7 +710,7 @@ struct ata_link {
68497
68498 struct ata_port {
68499 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
68500 - struct ata_port_operations *ops;
68501 + const struct ata_port_operations *ops;
68502 spinlock_t *lock;
68503 /* Flags owned by the EH context. Only EH should touch these once the
68504 port is active */
68505 @@ -884,7 +884,7 @@ struct ata_port_operations {
68506 * fields must be pointers.
68507 */
68508 const struct ata_port_operations *inherits;
68509 -};
68510 +} __do_const;
68511
68512 struct ata_port_info {
68513 unsigned long flags;
68514 @@ -892,7 +892,7 @@ struct ata_port_info {
68515 unsigned long pio_mask;
68516 unsigned long mwdma_mask;
68517 unsigned long udma_mask;
68518 - struct ata_port_operations *port_ops;
68519 + const struct ata_port_operations *port_ops;
68520 void *private_data;
68521 };
68522
68523 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
68524 extern const unsigned long sata_deb_timing_hotplug[];
68525 extern const unsigned long sata_deb_timing_long[];
68526
68527 -extern struct ata_port_operations ata_dummy_port_ops;
68528 +extern const struct ata_port_operations ata_dummy_port_ops;
68529 extern const struct ata_port_info ata_dummy_port_info;
68530
68531 static inline const unsigned long *
68532 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
68533 struct scsi_host_template *sht);
68534 extern void ata_host_detach(struct ata_host *host);
68535 extern void ata_host_init(struct ata_host *, struct device *,
68536 - unsigned long, struct ata_port_operations *);
68537 + unsigned long, const struct ata_port_operations *);
68538 extern int ata_scsi_detect(struct scsi_host_template *sht);
68539 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
68540 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
68541 diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
68542 index fbc48f8..0886e57 100644
68543 --- a/include/linux/lockd/bind.h
68544 +++ b/include/linux/lockd/bind.h
68545 @@ -23,13 +23,13 @@ struct svc_rqst;
68546 * This is the set of functions for lockd->nfsd communication
68547 */
68548 struct nlmsvc_binding {
68549 - __be32 (*fopen)(struct svc_rqst *,
68550 + __be32 (* const fopen)(struct svc_rqst *,
68551 struct nfs_fh *,
68552 struct file **);
68553 - void (*fclose)(struct file *);
68554 + void (* const fclose)(struct file *);
68555 };
68556
68557 -extern struct nlmsvc_binding * nlmsvc_ops;
68558 +extern const struct nlmsvc_binding * nlmsvc_ops;
68559
68560 /*
68561 * Similar to nfs_client_initdata, but without the NFS-specific
68562 diff --git a/include/linux/mca.h b/include/linux/mca.h
68563 index 3797270..7765ede 100644
68564 --- a/include/linux/mca.h
68565 +++ b/include/linux/mca.h
68566 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
68567 int region);
68568 void * (*mca_transform_memory)(struct mca_device *,
68569 void *memory);
68570 -};
68571 +} __no_const;
68572
68573 struct mca_bus {
68574 u64 default_dma_mask;
68575 diff --git a/include/linux/memory.h b/include/linux/memory.h
68576 index 37fa19b..b597c85 100644
68577 --- a/include/linux/memory.h
68578 +++ b/include/linux/memory.h
68579 @@ -108,7 +108,7 @@ struct memory_accessor {
68580 size_t count);
68581 ssize_t (*write)(struct memory_accessor *, const char *buf,
68582 off_t offset, size_t count);
68583 -};
68584 +} __no_const;
68585
68586 /*
68587 * Kernel text modification mutex, used for code patching. Users of this lock
68588 diff --git a/include/linux/mm.h b/include/linux/mm.h
68589 index 11e5be6..1ff2423 100644
68590 --- a/include/linux/mm.h
68591 +++ b/include/linux/mm.h
68592 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
68593
68594 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
68595 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
68596 +
68597 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68598 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
68599 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
68600 +#else
68601 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
68602 +#endif
68603 +
68604 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
68605 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
68606
68607 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
68608 int set_page_dirty_lock(struct page *page);
68609 int clear_page_dirty_for_io(struct page *page);
68610
68611 -/* Is the vma a continuation of the stack vma above it? */
68612 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
68613 -{
68614 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
68615 -}
68616 -
68617 extern unsigned long move_page_tables(struct vm_area_struct *vma,
68618 unsigned long old_addr, struct vm_area_struct *new_vma,
68619 unsigned long new_addr, unsigned long len);
68620 @@ -890,6 +891,8 @@ struct shrinker {
68621 extern void register_shrinker(struct shrinker *);
68622 extern void unregister_shrinker(struct shrinker *);
68623
68624 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
68625 +
68626 int vma_wants_writenotify(struct vm_area_struct *vma);
68627
68628 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
68629 @@ -1162,6 +1165,7 @@ out:
68630 }
68631
68632 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
68633 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
68634
68635 extern unsigned long do_brk(unsigned long, unsigned long);
68636
68637 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
68638 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
68639 struct vm_area_struct **pprev);
68640
68641 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
68642 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
68643 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
68644 +
68645 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
68646 NULL if none. Assume start_addr < end_addr. */
68647 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
68648 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
68649 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
68650 }
68651
68652 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
68653 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
68654 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
68655 unsigned long pfn, unsigned long size, pgprot_t);
68656 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
68657 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
68658 extern int sysctl_memory_failure_early_kill;
68659 extern int sysctl_memory_failure_recovery;
68660 -extern atomic_long_t mce_bad_pages;
68661 +extern atomic_long_unchecked_t mce_bad_pages;
68662 +
68663 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68664 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
68665 +#else
68666 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
68667 +#endif
68668
68669 #endif /* __KERNEL__ */
68670 #endif /* _LINUX_MM_H */
68671 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
68672 index 9d12ed5..6d9707a 100644
68673 --- a/include/linux/mm_types.h
68674 +++ b/include/linux/mm_types.h
68675 @@ -186,6 +186,8 @@ struct vm_area_struct {
68676 #ifdef CONFIG_NUMA
68677 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
68678 #endif
68679 +
68680 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
68681 };
68682
68683 struct core_thread {
68684 @@ -287,6 +289,24 @@ struct mm_struct {
68685 #ifdef CONFIG_MMU_NOTIFIER
68686 struct mmu_notifier_mm *mmu_notifier_mm;
68687 #endif
68688 +
68689 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
68690 + unsigned long pax_flags;
68691 +#endif
68692 +
68693 +#ifdef CONFIG_PAX_DLRESOLVE
68694 + unsigned long call_dl_resolve;
68695 +#endif
68696 +
68697 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
68698 + unsigned long call_syscall;
68699 +#endif
68700 +
68701 +#ifdef CONFIG_PAX_ASLR
68702 + unsigned long delta_mmap; /* randomized offset */
68703 + unsigned long delta_stack; /* randomized offset */
68704 +#endif
68705 +
68706 };
68707
68708 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
68709 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
68710 index 4e02ee2..afb159e 100644
68711 --- a/include/linux/mmu_notifier.h
68712 +++ b/include/linux/mmu_notifier.h
68713 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
68714 */
68715 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
68716 ({ \
68717 - pte_t __pte; \
68718 + pte_t ___pte; \
68719 struct vm_area_struct *___vma = __vma; \
68720 unsigned long ___address = __address; \
68721 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
68722 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
68723 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
68724 - __pte; \
68725 + ___pte; \
68726 })
68727
68728 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
68729 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
68730 index 6c31a2a..4b0e930 100644
68731 --- a/include/linux/mmzone.h
68732 +++ b/include/linux/mmzone.h
68733 @@ -350,7 +350,7 @@ struct zone {
68734 unsigned long flags; /* zone flags, see below */
68735
68736 /* Zone statistics */
68737 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68738 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68739
68740 /*
68741 * prev_priority holds the scanning priority for this zone. It is
68742 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
68743 index f58e9d8..3503935 100644
68744 --- a/include/linux/mod_devicetable.h
68745 +++ b/include/linux/mod_devicetable.h
68746 @@ -12,7 +12,7 @@
68747 typedef unsigned long kernel_ulong_t;
68748 #endif
68749
68750 -#define PCI_ANY_ID (~0)
68751 +#define PCI_ANY_ID ((__u16)~0)
68752
68753 struct pci_device_id {
68754 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
68755 @@ -131,7 +131,7 @@ struct usb_device_id {
68756 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
68757 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
68758
68759 -#define HID_ANY_ID (~0)
68760 +#define HID_ANY_ID (~0U)
68761
68762 struct hid_device_id {
68763 __u16 bus;
68764 diff --git a/include/linux/module.h b/include/linux/module.h
68765 index 482efc8..642032b 100644
68766 --- a/include/linux/module.h
68767 +++ b/include/linux/module.h
68768 @@ -16,6 +16,7 @@
68769 #include <linux/kobject.h>
68770 #include <linux/moduleparam.h>
68771 #include <linux/tracepoint.h>
68772 +#include <linux/fs.h>
68773
68774 #include <asm/local.h>
68775 #include <asm/module.h>
68776 @@ -287,16 +288,16 @@ struct module
68777 int (*init)(void);
68778
68779 /* If this is non-NULL, vfree after init() returns */
68780 - void *module_init;
68781 + void *module_init_rx, *module_init_rw;
68782
68783 /* Here is the actual code + data, vfree'd on unload. */
68784 - void *module_core;
68785 + void *module_core_rx, *module_core_rw;
68786
68787 /* Here are the sizes of the init and core sections */
68788 - unsigned int init_size, core_size;
68789 + unsigned int init_size_rw, core_size_rw;
68790
68791 /* The size of the executable code in each section. */
68792 - unsigned int init_text_size, core_text_size;
68793 + unsigned int init_size_rx, core_size_rx;
68794
68795 /* Arch-specific module values */
68796 struct mod_arch_specific arch;
68797 @@ -345,6 +346,10 @@ struct module
68798 #ifdef CONFIG_EVENT_TRACING
68799 struct ftrace_event_call *trace_events;
68800 unsigned int num_trace_events;
68801 + struct file_operations trace_id;
68802 + struct file_operations trace_enable;
68803 + struct file_operations trace_format;
68804 + struct file_operations trace_filter;
68805 #endif
68806 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
68807 unsigned long *ftrace_callsites;
68808 @@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
68809 bool is_module_address(unsigned long addr);
68810 bool is_module_text_address(unsigned long addr);
68811
68812 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
68813 +{
68814 +
68815 +#ifdef CONFIG_PAX_KERNEXEC
68816 + if (ktla_ktva(addr) >= (unsigned long)start &&
68817 + ktla_ktva(addr) < (unsigned long)start + size)
68818 + return 1;
68819 +#endif
68820 +
68821 + return ((void *)addr >= start && (void *)addr < start + size);
68822 +}
68823 +
68824 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
68825 +{
68826 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
68827 +}
68828 +
68829 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
68830 +{
68831 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
68832 +}
68833 +
68834 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
68835 +{
68836 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
68837 +}
68838 +
68839 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
68840 +{
68841 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
68842 +}
68843 +
68844 static inline int within_module_core(unsigned long addr, struct module *mod)
68845 {
68846 - return (unsigned long)mod->module_core <= addr &&
68847 - addr < (unsigned long)mod->module_core + mod->core_size;
68848 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
68849 }
68850
68851 static inline int within_module_init(unsigned long addr, struct module *mod)
68852 {
68853 - return (unsigned long)mod->module_init <= addr &&
68854 - addr < (unsigned long)mod->module_init + mod->init_size;
68855 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
68856 }
68857
68858 /* Search for module by name: must hold module_mutex. */
68859 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
68860 index c1f40c2..682ca53 100644
68861 --- a/include/linux/moduleloader.h
68862 +++ b/include/linux/moduleloader.h
68863 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
68864 sections. Returns NULL on failure. */
68865 void *module_alloc(unsigned long size);
68866
68867 +#ifdef CONFIG_PAX_KERNEXEC
68868 +void *module_alloc_exec(unsigned long size);
68869 +#else
68870 +#define module_alloc_exec(x) module_alloc(x)
68871 +#endif
68872 +
68873 /* Free memory returned from module_alloc. */
68874 void module_free(struct module *mod, void *module_region);
68875
68876 +#ifdef CONFIG_PAX_KERNEXEC
68877 +void module_free_exec(struct module *mod, void *module_region);
68878 +#else
68879 +#define module_free_exec(x, y) module_free((x), (y))
68880 +#endif
68881 +
68882 /* Apply the given relocation to the (simplified) ELF. Return -error
68883 or 0. */
68884 int apply_relocate(Elf_Shdr *sechdrs,
68885 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
68886 index 82a9124..8a5f622 100644
68887 --- a/include/linux/moduleparam.h
68888 +++ b/include/linux/moduleparam.h
68889 @@ -132,7 +132,7 @@ struct kparam_array
68890
68891 /* Actually copy string: maxlen param is usually sizeof(string). */
68892 #define module_param_string(name, string, len, perm) \
68893 - static const struct kparam_string __param_string_##name \
68894 + static const struct kparam_string __param_string_##name __used \
68895 = { len, string }; \
68896 __module_param_call(MODULE_PARAM_PREFIX, name, \
68897 param_set_copystring, param_get_string, \
68898 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
68899
68900 /* Comma-separated array: *nump is set to number they actually specified. */
68901 #define module_param_array_named(name, array, type, nump, perm) \
68902 - static const struct kparam_array __param_arr_##name \
68903 + static const struct kparam_array __param_arr_##name __used \
68904 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
68905 sizeof(array[0]), array }; \
68906 __module_param_call(MODULE_PARAM_PREFIX, name, \
68907 diff --git a/include/linux/mutex.h b/include/linux/mutex.h
68908 index 878cab4..c92cb3e 100644
68909 --- a/include/linux/mutex.h
68910 +++ b/include/linux/mutex.h
68911 @@ -51,7 +51,7 @@ struct mutex {
68912 spinlock_t wait_lock;
68913 struct list_head wait_list;
68914 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
68915 - struct thread_info *owner;
68916 + struct task_struct *owner;
68917 #endif
68918 #ifdef CONFIG_DEBUG_MUTEXES
68919 const char *name;
68920 diff --git a/include/linux/namei.h b/include/linux/namei.h
68921 index ec0f607..d19e675 100644
68922 --- a/include/linux/namei.h
68923 +++ b/include/linux/namei.h
68924 @@ -22,7 +22,7 @@ struct nameidata {
68925 unsigned int flags;
68926 int last_type;
68927 unsigned depth;
68928 - char *saved_names[MAX_NESTED_LINKS + 1];
68929 + const char *saved_names[MAX_NESTED_LINKS + 1];
68930
68931 /* Intent data */
68932 union {
68933 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
68934 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
68935 extern void unlock_rename(struct dentry *, struct dentry *);
68936
68937 -static inline void nd_set_link(struct nameidata *nd, char *path)
68938 +static inline void nd_set_link(struct nameidata *nd, const char *path)
68939 {
68940 nd->saved_names[nd->depth] = path;
68941 }
68942
68943 -static inline char *nd_get_link(struct nameidata *nd)
68944 +static inline const char *nd_get_link(const struct nameidata *nd)
68945 {
68946 return nd->saved_names[nd->depth];
68947 }
68948 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
68949 index 9d7e8f7..04428c5 100644
68950 --- a/include/linux/netdevice.h
68951 +++ b/include/linux/netdevice.h
68952 @@ -637,6 +637,7 @@ struct net_device_ops {
68953 u16 xid);
68954 #endif
68955 };
68956 +typedef struct net_device_ops __no_const net_device_ops_no_const;
68957
68958 /*
68959 * The DEVICE structure.
68960 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
68961 new file mode 100644
68962 index 0000000..33f4af8
68963 --- /dev/null
68964 +++ b/include/linux/netfilter/xt_gradm.h
68965 @@ -0,0 +1,9 @@
68966 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
68967 +#define _LINUX_NETFILTER_XT_GRADM_H 1
68968 +
68969 +struct xt_gradm_mtinfo {
68970 + __u16 flags;
68971 + __u16 invflags;
68972 +};
68973 +
68974 +#endif
68975 diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
68976 index b359c4a..c08b334 100644
68977 --- a/include/linux/nodemask.h
68978 +++ b/include/linux/nodemask.h
68979 @@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
68980
68981 #define any_online_node(mask) \
68982 ({ \
68983 - int node; \
68984 - for_each_node_mask(node, (mask)) \
68985 - if (node_online(node)) \
68986 + int __node; \
68987 + for_each_node_mask(__node, (mask)) \
68988 + if (node_online(__node)) \
68989 break; \
68990 - node; \
68991 + __node; \
68992 })
68993
68994 #define num_online_nodes() num_node_state(N_ONLINE)
68995 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
68996 index 5171639..7cf4235 100644
68997 --- a/include/linux/oprofile.h
68998 +++ b/include/linux/oprofile.h
68999 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
69000 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
69001 char const * name, ulong * val);
69002
69003 -/** Create a file for read-only access to an atomic_t. */
69004 +/** Create a file for read-only access to an atomic_unchecked_t. */
69005 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
69006 - char const * name, atomic_t * val);
69007 + char const * name, atomic_unchecked_t * val);
69008
69009 /** create a directory */
69010 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
69011 diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
69012 index 3c62ed4..8924c7c 100644
69013 --- a/include/linux/pagemap.h
69014 +++ b/include/linux/pagemap.h
69015 @@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
69016 if (((unsigned long)uaddr & PAGE_MASK) !=
69017 ((unsigned long)end & PAGE_MASK))
69018 ret = __get_user(c, end);
69019 + (void)c;
69020 }
69021 + (void)c;
69022 return ret;
69023 }
69024
69025 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
69026 index 81c9689..a567a55 100644
69027 --- a/include/linux/perf_event.h
69028 +++ b/include/linux/perf_event.h
69029 @@ -476,7 +476,7 @@ struct hw_perf_event {
69030 struct hrtimer hrtimer;
69031 };
69032 };
69033 - atomic64_t prev_count;
69034 + atomic64_unchecked_t prev_count;
69035 u64 sample_period;
69036 u64 last_period;
69037 atomic64_t period_left;
69038 @@ -557,7 +557,7 @@ struct perf_event {
69039 const struct pmu *pmu;
69040
69041 enum perf_event_active_state state;
69042 - atomic64_t count;
69043 + atomic64_unchecked_t count;
69044
69045 /*
69046 * These are the total time in nanoseconds that the event
69047 @@ -595,8 +595,8 @@ struct perf_event {
69048 * These accumulate total time (in nanoseconds) that children
69049 * events have been enabled and running, respectively.
69050 */
69051 - atomic64_t child_total_time_enabled;
69052 - atomic64_t child_total_time_running;
69053 + atomic64_unchecked_t child_total_time_enabled;
69054 + atomic64_unchecked_t child_total_time_running;
69055
69056 /*
69057 * Protect attach/detach and child_list:
69058 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
69059 index b43a9e0..b77d869 100644
69060 --- a/include/linux/pipe_fs_i.h
69061 +++ b/include/linux/pipe_fs_i.h
69062 @@ -46,9 +46,9 @@ struct pipe_inode_info {
69063 wait_queue_head_t wait;
69064 unsigned int nrbufs, curbuf;
69065 struct page *tmp_page;
69066 - unsigned int readers;
69067 - unsigned int writers;
69068 - unsigned int waiting_writers;
69069 + atomic_t readers;
69070 + atomic_t writers;
69071 + atomic_t waiting_writers;
69072 unsigned int r_counter;
69073 unsigned int w_counter;
69074 struct fasync_struct *fasync_readers;
69075 diff --git a/include/linux/poison.h b/include/linux/poison.h
69076 index 34066ff..e95d744 100644
69077 --- a/include/linux/poison.h
69078 +++ b/include/linux/poison.h
69079 @@ -19,8 +19,8 @@
69080 * under normal circumstances, used to verify that nobody uses
69081 * non-initialized list entries.
69082 */
69083 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
69084 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
69085 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
69086 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
69087
69088 /********** include/linux/timer.h **********/
69089 /*
69090 diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
69091 index 4f71bf4..cd2f68e 100644
69092 --- a/include/linux/posix-timers.h
69093 +++ b/include/linux/posix-timers.h
69094 @@ -82,7 +82,8 @@ struct k_clock {
69095 #define TIMER_RETRY 1
69096 void (*timer_get) (struct k_itimer * timr,
69097 struct itimerspec * cur_setting);
69098 -};
69099 +} __do_const;
69100 +typedef struct k_clock __no_const k_clock_no_const;
69101
69102 void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock);
69103
69104 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
69105 index 72b1a10..13303a9 100644
69106 --- a/include/linux/preempt.h
69107 +++ b/include/linux/preempt.h
69108 @@ -110,7 +110,7 @@ struct preempt_ops {
69109 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
69110 void (*sched_out)(struct preempt_notifier *notifier,
69111 struct task_struct *next);
69112 -};
69113 +} __no_const;
69114
69115 /**
69116 * preempt_notifier - key for installing preemption notifiers
69117 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
69118 index 379eaed..1bf73e3 100644
69119 --- a/include/linux/proc_fs.h
69120 +++ b/include/linux/proc_fs.h
69121 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
69122 return proc_create_data(name, mode, parent, proc_fops, NULL);
69123 }
69124
69125 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
69126 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
69127 +{
69128 +#ifdef CONFIG_GRKERNSEC_PROC_USER
69129 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
69130 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69131 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
69132 +#else
69133 + return proc_create_data(name, mode, parent, proc_fops, NULL);
69134 +#endif
69135 +}
69136 +
69137 +
69138 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
69139 mode_t mode, struct proc_dir_entry *base,
69140 read_proc_t *read_proc, void * data)
69141 @@ -256,7 +269,7 @@ union proc_op {
69142 int (*proc_show)(struct seq_file *m,
69143 struct pid_namespace *ns, struct pid *pid,
69144 struct task_struct *task);
69145 -};
69146 +} __no_const;
69147
69148 struct ctl_table_header;
69149 struct ctl_table;
69150 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
69151 index 7456d7d..6c1cfc9 100644
69152 --- a/include/linux/ptrace.h
69153 +++ b/include/linux/ptrace.h
69154 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
69155 extern void exit_ptrace(struct task_struct *tracer);
69156 #define PTRACE_MODE_READ 1
69157 #define PTRACE_MODE_ATTACH 2
69158 -/* Returns 0 on success, -errno on denial. */
69159 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
69160 /* Returns true on success, false on denial. */
69161 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
69162 +/* Returns true on success, false on denial. */
69163 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
69164
69165 static inline int ptrace_reparented(struct task_struct *child)
69166 {
69167 diff --git a/include/linux/random.h b/include/linux/random.h
69168 index 2948046..3262567 100644
69169 --- a/include/linux/random.h
69170 +++ b/include/linux/random.h
69171 @@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
69172 u32 random32(void);
69173 void srandom32(u32 seed);
69174
69175 +static inline unsigned long pax_get_random_long(void)
69176 +{
69177 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
69178 +}
69179 +
69180 #endif /* __KERNEL___ */
69181
69182 #endif /* _LINUX_RANDOM_H */
69183 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
69184 index 988e55f..17cb4ef 100644
69185 --- a/include/linux/reboot.h
69186 +++ b/include/linux/reboot.h
69187 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
69188 * Architecture-specific implementations of sys_reboot commands.
69189 */
69190
69191 -extern void machine_restart(char *cmd);
69192 -extern void machine_halt(void);
69193 -extern void machine_power_off(void);
69194 +extern void machine_restart(char *cmd) __noreturn;
69195 +extern void machine_halt(void) __noreturn;
69196 +extern void machine_power_off(void) __noreturn;
69197
69198 extern void machine_shutdown(void);
69199 struct pt_regs;
69200 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
69201 */
69202
69203 extern void kernel_restart_prepare(char *cmd);
69204 -extern void kernel_restart(char *cmd);
69205 -extern void kernel_halt(void);
69206 -extern void kernel_power_off(void);
69207 +extern void kernel_restart(char *cmd) __noreturn;
69208 +extern void kernel_halt(void) __noreturn;
69209 +extern void kernel_power_off(void) __noreturn;
69210
69211 void ctrl_alt_del(void);
69212
69213 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
69214 * Emergency restart, callable from an interrupt handler.
69215 */
69216
69217 -extern void emergency_restart(void);
69218 +extern void emergency_restart(void) __noreturn;
69219 #include <asm/emergency-restart.h>
69220
69221 #endif
69222 diff --git a/include/linux/regset.h b/include/linux/regset.h
69223 index 8abee65..5150fd1 100644
69224 --- a/include/linux/regset.h
69225 +++ b/include/linux/regset.h
69226 @@ -335,6 +335,9 @@ static inline int copy_regset_to_user(struct task_struct *target,
69227 {
69228 const struct user_regset *regset = &view->regsets[setno];
69229
69230 + if (!regset->get)
69231 + return -EOPNOTSUPP;
69232 +
69233 if (!access_ok(VERIFY_WRITE, data, size))
69234 return -EIO;
69235
69236 @@ -358,6 +361,9 @@ static inline int copy_regset_from_user(struct task_struct *target,
69237 {
69238 const struct user_regset *regset = &view->regsets[setno];
69239
69240 + if (!regset->set)
69241 + return -EOPNOTSUPP;
69242 +
69243 if (!access_ok(VERIFY_READ, data, size))
69244 return -EIO;
69245
69246 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
69247 index dd31e7b..5b03c5c 100644
69248 --- a/include/linux/reiserfs_fs.h
69249 +++ b/include/linux/reiserfs_fs.h
69250 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
69251 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
69252
69253 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
69254 -#define get_generation(s) atomic_read (&fs_generation(s))
69255 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
69256 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
69257 #define __fs_changed(gen,s) (gen != get_generation (s))
69258 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
69259 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
69260 */
69261
69262 struct item_operations {
69263 - int (*bytes_number) (struct item_head * ih, int block_size);
69264 - void (*decrement_key) (struct cpu_key *);
69265 - int (*is_left_mergeable) (struct reiserfs_key * ih,
69266 + int (* const bytes_number) (struct item_head * ih, int block_size);
69267 + void (* const decrement_key) (struct cpu_key *);
69268 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
69269 unsigned long bsize);
69270 - void (*print_item) (struct item_head *, char *item);
69271 - void (*check_item) (struct item_head *, char *item);
69272 + void (* const print_item) (struct item_head *, char *item);
69273 + void (* const check_item) (struct item_head *, char *item);
69274
69275 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
69276 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
69277 int is_affected, int insert_size);
69278 - int (*check_left) (struct virtual_item * vi, int free,
69279 + int (* const check_left) (struct virtual_item * vi, int free,
69280 int start_skip, int end_skip);
69281 - int (*check_right) (struct virtual_item * vi, int free);
69282 - int (*part_size) (struct virtual_item * vi, int from, int to);
69283 - int (*unit_num) (struct virtual_item * vi);
69284 - void (*print_vi) (struct virtual_item * vi);
69285 + int (* const check_right) (struct virtual_item * vi, int free);
69286 + int (* const part_size) (struct virtual_item * vi, int from, int to);
69287 + int (* const unit_num) (struct virtual_item * vi);
69288 + void (* const print_vi) (struct virtual_item * vi);
69289 };
69290
69291 -extern struct item_operations *item_ops[TYPE_ANY + 1];
69292 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
69293
69294 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
69295 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
69296 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
69297 index dab68bb..0688727 100644
69298 --- a/include/linux/reiserfs_fs_sb.h
69299 +++ b/include/linux/reiserfs_fs_sb.h
69300 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
69301 /* Comment? -Hans */
69302 wait_queue_head_t s_wait;
69303 /* To be obsoleted soon by per buffer seals.. -Hans */
69304 - atomic_t s_generation_counter; // increased by one every time the
69305 + atomic_unchecked_t s_generation_counter; // increased by one every time the
69306 // tree gets re-balanced
69307 unsigned long s_properties; /* File system properties. Currently holds
69308 on-disk FS format */
69309 diff --git a/include/linux/relay.h b/include/linux/relay.h
69310 index 14a86bc..17d0700 100644
69311 --- a/include/linux/relay.h
69312 +++ b/include/linux/relay.h
69313 @@ -159,7 +159,7 @@ struct rchan_callbacks
69314 * The callback should return 0 if successful, negative if not.
69315 */
69316 int (*remove_buf_file)(struct dentry *dentry);
69317 -};
69318 +} __no_const;
69319
69320 /*
69321 * CONFIG_RELAY kernel API, kernel/relay.c
69322 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
69323 index 3392c59..a746428 100644
69324 --- a/include/linux/rfkill.h
69325 +++ b/include/linux/rfkill.h
69326 @@ -144,6 +144,7 @@ struct rfkill_ops {
69327 void (*query)(struct rfkill *rfkill, void *data);
69328 int (*set_block)(void *data, bool blocked);
69329 };
69330 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
69331
69332 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
69333 /**
69334 diff --git a/include/linux/sched.h b/include/linux/sched.h
69335 index 71849bf..2ef383dc3 100644
69336 --- a/include/linux/sched.h
69337 +++ b/include/linux/sched.h
69338 @@ -101,6 +101,7 @@ struct bio;
69339 struct fs_struct;
69340 struct bts_context;
69341 struct perf_event_context;
69342 +struct linux_binprm;
69343
69344 /*
69345 * List of flags we want to share for kernel threads,
69346 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
69347 extern signed long schedule_timeout_uninterruptible(signed long timeout);
69348 asmlinkage void __schedule(void);
69349 asmlinkage void schedule(void);
69350 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
69351 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
69352
69353 struct nsproxy;
69354 struct user_namespace;
69355 @@ -371,9 +372,12 @@ struct user_namespace;
69356 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
69357
69358 extern int sysctl_max_map_count;
69359 +extern unsigned long sysctl_heap_stack_gap;
69360
69361 #include <linux/aio.h>
69362
69363 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
69364 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
69365 extern unsigned long
69366 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
69367 unsigned long, unsigned long);
69368 @@ -666,6 +670,16 @@ struct signal_struct {
69369 struct tty_audit_buf *tty_audit_buf;
69370 #endif
69371
69372 +#ifdef CONFIG_GRKERNSEC
69373 + u32 curr_ip;
69374 + u32 saved_ip;
69375 + u32 gr_saddr;
69376 + u32 gr_daddr;
69377 + u16 gr_sport;
69378 + u16 gr_dport;
69379 + u8 used_accept:1;
69380 +#endif
69381 +
69382 int oom_adj; /* OOM kill score adjustment (bit shift) */
69383 };
69384
69385 @@ -723,6 +737,11 @@ struct user_struct {
69386 struct key *session_keyring; /* UID's default session keyring */
69387 #endif
69388
69389 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
69390 + unsigned int banned;
69391 + unsigned long ban_expires;
69392 +#endif
69393 +
69394 /* Hash table maintenance information */
69395 struct hlist_node uidhash_node;
69396 uid_t uid;
69397 @@ -1328,8 +1347,8 @@ struct task_struct {
69398 struct list_head thread_group;
69399
69400 struct completion *vfork_done; /* for vfork() */
69401 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
69402 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
69403 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
69404 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
69405
69406 cputime_t utime, stime, utimescaled, stimescaled;
69407 cputime_t gtime;
69408 @@ -1343,16 +1362,6 @@ struct task_struct {
69409 struct task_cputime cputime_expires;
69410 struct list_head cpu_timers[3];
69411
69412 -/* process credentials */
69413 - const struct cred *real_cred; /* objective and real subjective task
69414 - * credentials (COW) */
69415 - const struct cred *cred; /* effective (overridable) subjective task
69416 - * credentials (COW) */
69417 - struct mutex cred_guard_mutex; /* guard against foreign influences on
69418 - * credential calculations
69419 - * (notably. ptrace) */
69420 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
69421 -
69422 char comm[TASK_COMM_LEN]; /* executable name excluding path
69423 - access with [gs]et_task_comm (which lock
69424 it with task_lock())
69425 @@ -1369,6 +1378,10 @@ struct task_struct {
69426 #endif
69427 /* CPU-specific state of this task */
69428 struct thread_struct thread;
69429 +/* thread_info moved to task_struct */
69430 +#ifdef CONFIG_X86
69431 + struct thread_info tinfo;
69432 +#endif
69433 /* filesystem information */
69434 struct fs_struct *fs;
69435 /* open file information */
69436 @@ -1436,6 +1449,15 @@ struct task_struct {
69437 int hardirq_context;
69438 int softirq_context;
69439 #endif
69440 +
69441 +/* process credentials */
69442 + const struct cred *real_cred; /* objective and real subjective task
69443 + * credentials (COW) */
69444 + struct mutex cred_guard_mutex; /* guard against foreign influences on
69445 + * credential calculations
69446 + * (notably. ptrace) */
69447 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
69448 +
69449 #ifdef CONFIG_LOCKDEP
69450 # define MAX_LOCK_DEPTH 48UL
69451 u64 curr_chain_key;
69452 @@ -1456,6 +1478,9 @@ struct task_struct {
69453
69454 struct backing_dev_info *backing_dev_info;
69455
69456 + const struct cred *cred; /* effective (overridable) subjective task
69457 + * credentials (COW) */
69458 +
69459 struct io_context *io_context;
69460
69461 unsigned long ptrace_message;
69462 @@ -1519,6 +1544,27 @@ struct task_struct {
69463 unsigned long default_timer_slack_ns;
69464
69465 struct list_head *scm_work_list;
69466 +
69467 +#ifdef CONFIG_GRKERNSEC
69468 + /* grsecurity */
69469 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69470 + u64 exec_id;
69471 +#endif
69472 +#ifdef CONFIG_GRKERNSEC_SETXID
69473 + const struct cred *delayed_cred;
69474 +#endif
69475 + struct dentry *gr_chroot_dentry;
69476 + struct acl_subject_label *acl;
69477 + struct acl_role_label *role;
69478 + struct file *exec_file;
69479 + u16 acl_role_id;
69480 + /* is this the task that authenticated to the special role */
69481 + u8 acl_sp_role;
69482 + u8 is_writable;
69483 + u8 brute;
69484 + u8 gr_is_chrooted;
69485 +#endif
69486 +
69487 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
69488 /* Index of current stored adress in ret_stack */
69489 int curr_ret_stack;
69490 @@ -1542,6 +1588,57 @@ struct task_struct {
69491 #endif /* CONFIG_TRACING */
69492 };
69493
69494 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
69495 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
69496 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
69497 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
69498 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
69499 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
69500 +
69501 +#ifdef CONFIG_PAX_SOFTMODE
69502 +extern int pax_softmode;
69503 +#endif
69504 +
69505 +extern int pax_check_flags(unsigned long *);
69506 +
69507 +/* if tsk != current then task_lock must be held on it */
69508 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
69509 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
69510 +{
69511 + if (likely(tsk->mm))
69512 + return tsk->mm->pax_flags;
69513 + else
69514 + return 0UL;
69515 +}
69516 +
69517 +/* if tsk != current then task_lock must be held on it */
69518 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
69519 +{
69520 + if (likely(tsk->mm)) {
69521 + tsk->mm->pax_flags = flags;
69522 + return 0;
69523 + }
69524 + return -EINVAL;
69525 +}
69526 +#endif
69527 +
69528 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
69529 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
69530 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
69531 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
69532 +#endif
69533 +
69534 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
69535 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
69536 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
69537 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
69538 +
69539 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
69540 +extern void pax_track_stack(void);
69541 +#else
69542 +static inline void pax_track_stack(void) {}
69543 +#endif
69544 +
69545 /* Future-safe accessor for struct task_struct's cpus_allowed. */
69546 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
69547
69548 @@ -1740,7 +1837,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
69549 #define PF_DUMPCORE 0x00000200 /* dumped core */
69550 #define PF_SIGNALED 0x00000400 /* killed by a signal */
69551 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
69552 -#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
69553 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
69554 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
69555 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
69556 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
69557 @@ -1978,7 +2075,9 @@ void yield(void);
69558 extern struct exec_domain default_exec_domain;
69559
69560 union thread_union {
69561 +#ifndef CONFIG_X86
69562 struct thread_info thread_info;
69563 +#endif
69564 unsigned long stack[THREAD_SIZE/sizeof(long)];
69565 };
69566
69567 @@ -2011,6 +2110,7 @@ extern struct pid_namespace init_pid_ns;
69568 */
69569
69570 extern struct task_struct *find_task_by_vpid(pid_t nr);
69571 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
69572 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
69573 struct pid_namespace *ns);
69574
69575 @@ -2155,7 +2255,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
69576 extern void exit_itimers(struct signal_struct *);
69577 extern void flush_itimer_signals(void);
69578
69579 -extern NORET_TYPE void do_group_exit(int);
69580 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
69581
69582 extern void daemonize(const char *, ...);
69583 extern int allow_signal(int);
69584 @@ -2284,13 +2384,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
69585
69586 #endif
69587
69588 -static inline int object_is_on_stack(void *obj)
69589 +static inline int object_starts_on_stack(void *obj)
69590 {
69591 - void *stack = task_stack_page(current);
69592 + const void *stack = task_stack_page(current);
69593
69594 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
69595 }
69596
69597 +#ifdef CONFIG_PAX_USERCOPY
69598 +extern int object_is_on_stack(const void *obj, unsigned long len);
69599 +#endif
69600 +
69601 extern void thread_info_cache_init(void);
69602
69603 #ifdef CONFIG_DEBUG_STACK_USAGE
69604 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
69605 index 1ee2c05..81b7ec4 100644
69606 --- a/include/linux/screen_info.h
69607 +++ b/include/linux/screen_info.h
69608 @@ -42,7 +42,8 @@ struct screen_info {
69609 __u16 pages; /* 0x32 */
69610 __u16 vesa_attributes; /* 0x34 */
69611 __u32 capabilities; /* 0x36 */
69612 - __u8 _reserved[6]; /* 0x3a */
69613 + __u16 vesapm_size; /* 0x3a */
69614 + __u8 _reserved[4]; /* 0x3c */
69615 } __attribute__((packed));
69616
69617 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
69618 diff --git a/include/linux/security.h b/include/linux/security.h
69619 index d40d23f..d739b08 100644
69620 --- a/include/linux/security.h
69621 +++ b/include/linux/security.h
69622 @@ -34,6 +34,7 @@
69623 #include <linux/key.h>
69624 #include <linux/xfrm.h>
69625 #include <linux/gfp.h>
69626 +#include <linux/grsecurity.h>
69627 #include <net/flow.h>
69628
69629 /* Maximum number of letters for an LSM name string */
69630 @@ -76,7 +77,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
69631 extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
69632 extern int cap_task_setioprio(struct task_struct *p, int ioprio);
69633 extern int cap_task_setnice(struct task_struct *p, int nice);
69634 -extern int cap_syslog(int type);
69635 +extern int cap_syslog(int type, bool from_file);
69636 extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
69637
69638 struct msghdr;
69639 @@ -1331,6 +1332,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
69640 * logging to the console.
69641 * See the syslog(2) manual page for an explanation of the @type values.
69642 * @type contains the type of action.
69643 + * @from_file indicates the context of action (if it came from /proc).
69644 * Return 0 if permission is granted.
69645 * @settime:
69646 * Check permission to change the system time.
69647 @@ -1445,7 +1447,7 @@ struct security_operations {
69648 int (*sysctl) (struct ctl_table *table, int op);
69649 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
69650 int (*quota_on) (struct dentry *dentry);
69651 - int (*syslog) (int type);
69652 + int (*syslog) (int type, bool from_file);
69653 int (*settime) (struct timespec *ts, struct timezone *tz);
69654 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
69655
69656 @@ -1740,7 +1742,7 @@ int security_acct(struct file *file);
69657 int security_sysctl(struct ctl_table *table, int op);
69658 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
69659 int security_quota_on(struct dentry *dentry);
69660 -int security_syslog(int type);
69661 +int security_syslog(int type, bool from_file);
69662 int security_settime(struct timespec *ts, struct timezone *tz);
69663 int security_vm_enough_memory(long pages);
69664 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
69665 @@ -1986,9 +1988,9 @@ static inline int security_quota_on(struct dentry *dentry)
69666 return 0;
69667 }
69668
69669 -static inline int security_syslog(int type)
69670 +static inline int security_syslog(int type, bool from_file)
69671 {
69672 - return cap_syslog(type);
69673 + return cap_syslog(type, from_file);
69674 }
69675
69676 static inline int security_settime(struct timespec *ts, struct timezone *tz)
69677 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
69678 index 8366d8f..cc5f9d6 100644
69679 --- a/include/linux/seq_file.h
69680 +++ b/include/linux/seq_file.h
69681 @@ -23,6 +23,9 @@ struct seq_file {
69682 u64 version;
69683 struct mutex lock;
69684 const struct seq_operations *op;
69685 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69686 + u64 exec_id;
69687 +#endif
69688 void *private;
69689 };
69690
69691 @@ -32,6 +35,7 @@ struct seq_operations {
69692 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
69693 int (*show) (struct seq_file *m, void *v);
69694 };
69695 +typedef struct seq_operations __no_const seq_operations_no_const;
69696
69697 #define SEQ_SKIP 1
69698
69699 diff --git a/include/linux/shm.h b/include/linux/shm.h
69700 index eca6235..c7417ed 100644
69701 --- a/include/linux/shm.h
69702 +++ b/include/linux/shm.h
69703 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
69704 pid_t shm_cprid;
69705 pid_t shm_lprid;
69706 struct user_struct *mlock_user;
69707 +#ifdef CONFIG_GRKERNSEC
69708 + time_t shm_createtime;
69709 + pid_t shm_lapid;
69710 +#endif
69711 };
69712
69713 /* shm_mode upper byte flags */
69714 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
69715 index bcdd660..6e12e11 100644
69716 --- a/include/linux/skbuff.h
69717 +++ b/include/linux/skbuff.h
69718 @@ -14,6 +14,7 @@
69719 #ifndef _LINUX_SKBUFF_H
69720 #define _LINUX_SKBUFF_H
69721
69722 +#include <linux/const.h>
69723 #include <linux/kernel.h>
69724 #include <linux/kmemcheck.h>
69725 #include <linux/compiler.h>
69726 @@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
69727 */
69728 static inline int skb_queue_empty(const struct sk_buff_head *list)
69729 {
69730 - return list->next == (struct sk_buff *)list;
69731 + return list->next == (const struct sk_buff *)list;
69732 }
69733
69734 /**
69735 @@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
69736 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69737 const struct sk_buff *skb)
69738 {
69739 - return (skb->next == (struct sk_buff *) list);
69740 + return (skb->next == (const struct sk_buff *) list);
69741 }
69742
69743 /**
69744 @@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69745 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
69746 const struct sk_buff *skb)
69747 {
69748 - return (skb->prev == (struct sk_buff *) list);
69749 + return (skb->prev == (const struct sk_buff *) list);
69750 }
69751
69752 /**
69753 @@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
69754 * headroom, you should not reduce this.
69755 */
69756 #ifndef NET_SKB_PAD
69757 -#define NET_SKB_PAD 32
69758 +#define NET_SKB_PAD (_AC(32,UL))
69759 #endif
69760
69761 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
69762 diff --git a/include/linux/slab.h b/include/linux/slab.h
69763 index 2da8372..a3be824 100644
69764 --- a/include/linux/slab.h
69765 +++ b/include/linux/slab.h
69766 @@ -11,12 +11,20 @@
69767
69768 #include <linux/gfp.h>
69769 #include <linux/types.h>
69770 +#include <linux/err.h>
69771
69772 /*
69773 * Flags to pass to kmem_cache_create().
69774 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
69775 */
69776 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
69777 +
69778 +#ifdef CONFIG_PAX_USERCOPY
69779 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
69780 +#else
69781 +#define SLAB_USERCOPY 0x00000000UL
69782 +#endif
69783 +
69784 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
69785 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
69786 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
69787 @@ -82,10 +90,13 @@
69788 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
69789 * Both make kfree a no-op.
69790 */
69791 -#define ZERO_SIZE_PTR ((void *)16)
69792 +#define ZERO_SIZE_PTR \
69793 +({ \
69794 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
69795 + (void *)(-MAX_ERRNO-1L); \
69796 +})
69797
69798 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
69799 - (unsigned long)ZERO_SIZE_PTR)
69800 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
69801
69802 /*
69803 * struct kmem_cache related prototypes
69804 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
69805 void kfree(const void *);
69806 void kzfree(const void *);
69807 size_t ksize(const void *);
69808 +void check_object_size(const void *ptr, unsigned long n, bool to);
69809
69810 /*
69811 * Allocator specific definitions. These are mainly used to establish optimized
69812 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
69813
69814 void __init kmem_cache_init_late(void);
69815
69816 +#define kmalloc(x, y) \
69817 +({ \
69818 + void *___retval; \
69819 + intoverflow_t ___x = (intoverflow_t)x; \
69820 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
69821 + ___retval = NULL; \
69822 + else \
69823 + ___retval = kmalloc((size_t)___x, (y)); \
69824 + ___retval; \
69825 +})
69826 +
69827 +#define kmalloc_node(x, y, z) \
69828 +({ \
69829 + void *___retval; \
69830 + intoverflow_t ___x = (intoverflow_t)x; \
69831 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
69832 + ___retval = NULL; \
69833 + else \
69834 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
69835 + ___retval; \
69836 +})
69837 +
69838 +#define kzalloc(x, y) \
69839 +({ \
69840 + void *___retval; \
69841 + intoverflow_t ___x = (intoverflow_t)x; \
69842 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
69843 + ___retval = NULL; \
69844 + else \
69845 + ___retval = kzalloc((size_t)___x, (y)); \
69846 + ___retval; \
69847 +})
69848 +
69849 #endif /* _LINUX_SLAB_H */
69850 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
69851 index 850d057..d9dfe3c 100644
69852 --- a/include/linux/slab_def.h
69853 +++ b/include/linux/slab_def.h
69854 @@ -69,10 +69,10 @@ struct kmem_cache {
69855 unsigned long node_allocs;
69856 unsigned long node_frees;
69857 unsigned long node_overflow;
69858 - atomic_t allochit;
69859 - atomic_t allocmiss;
69860 - atomic_t freehit;
69861 - atomic_t freemiss;
69862 + atomic_unchecked_t allochit;
69863 + atomic_unchecked_t allocmiss;
69864 + atomic_unchecked_t freehit;
69865 + atomic_unchecked_t freemiss;
69866
69867 /*
69868 * If debugging is enabled, then the allocator can add additional
69869 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
69870 index 5ad70a6..57f9f65 100644
69871 --- a/include/linux/slub_def.h
69872 +++ b/include/linux/slub_def.h
69873 @@ -86,7 +86,7 @@ struct kmem_cache {
69874 struct kmem_cache_order_objects max;
69875 struct kmem_cache_order_objects min;
69876 gfp_t allocflags; /* gfp flags to use on each alloc */
69877 - int refcount; /* Refcount for slab cache destroy */
69878 + atomic_t refcount; /* Refcount for slab cache destroy */
69879 void (*ctor)(void *);
69880 int inuse; /* Offset to metadata */
69881 int align; /* Alignment */
69882 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
69883 #endif
69884
69885 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
69886 -void *__kmalloc(size_t size, gfp_t flags);
69887 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
69888
69889 #ifdef CONFIG_KMEMTRACE
69890 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
69891 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
69892 index 67ad11f..0bbd8af 100644
69893 --- a/include/linux/sonet.h
69894 +++ b/include/linux/sonet.h
69895 @@ -61,7 +61,7 @@ struct sonet_stats {
69896 #include <asm/atomic.h>
69897
69898 struct k_sonet_stats {
69899 -#define __HANDLE_ITEM(i) atomic_t i
69900 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
69901 __SONET_ITEMS
69902 #undef __HANDLE_ITEM
69903 };
69904 diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
69905 index 6f52b4d..5500323 100644
69906 --- a/include/linux/sunrpc/cache.h
69907 +++ b/include/linux/sunrpc/cache.h
69908 @@ -125,7 +125,7 @@ struct cache_detail {
69909 */
69910 struct cache_req {
69911 struct cache_deferred_req *(*defer)(struct cache_req *req);
69912 -};
69913 +} __no_const;
69914 /* this must be embedded in a deferred_request that is being
69915 * delayed awaiting cache-fill
69916 */
69917 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
69918 index 8ed9642..101ceab 100644
69919 --- a/include/linux/sunrpc/clnt.h
69920 +++ b/include/linux/sunrpc/clnt.h
69921 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
69922 {
69923 switch (sap->sa_family) {
69924 case AF_INET:
69925 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
69926 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
69927 case AF_INET6:
69928 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
69929 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
69930 }
69931 return 0;
69932 }
69933 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
69934 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
69935 const struct sockaddr *src)
69936 {
69937 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
69938 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
69939 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
69940
69941 dsin->sin_family = ssin->sin_family;
69942 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
69943 if (sa->sa_family != AF_INET6)
69944 return 0;
69945
69946 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
69947 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
69948 }
69949
69950 #endif /* __KERNEL__ */
69951 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
69952 index c14fe86..393245e 100644
69953 --- a/include/linux/sunrpc/svc_rdma.h
69954 +++ b/include/linux/sunrpc/svc_rdma.h
69955 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
69956 extern unsigned int svcrdma_max_requests;
69957 extern unsigned int svcrdma_max_req_size;
69958
69959 -extern atomic_t rdma_stat_recv;
69960 -extern atomic_t rdma_stat_read;
69961 -extern atomic_t rdma_stat_write;
69962 -extern atomic_t rdma_stat_sq_starve;
69963 -extern atomic_t rdma_stat_rq_starve;
69964 -extern atomic_t rdma_stat_rq_poll;
69965 -extern atomic_t rdma_stat_rq_prod;
69966 -extern atomic_t rdma_stat_sq_poll;
69967 -extern atomic_t rdma_stat_sq_prod;
69968 +extern atomic_unchecked_t rdma_stat_recv;
69969 +extern atomic_unchecked_t rdma_stat_read;
69970 +extern atomic_unchecked_t rdma_stat_write;
69971 +extern atomic_unchecked_t rdma_stat_sq_starve;
69972 +extern atomic_unchecked_t rdma_stat_rq_starve;
69973 +extern atomic_unchecked_t rdma_stat_rq_poll;
69974 +extern atomic_unchecked_t rdma_stat_rq_prod;
69975 +extern atomic_unchecked_t rdma_stat_sq_poll;
69976 +extern atomic_unchecked_t rdma_stat_sq_prod;
69977
69978 #define RPCRDMA_VERSION 1
69979
69980 diff --git a/include/linux/suspend.h b/include/linux/suspend.h
69981 index 5e781d8..1e62818 100644
69982 --- a/include/linux/suspend.h
69983 +++ b/include/linux/suspend.h
69984 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
69985 * which require special recovery actions in that situation.
69986 */
69987 struct platform_suspend_ops {
69988 - int (*valid)(suspend_state_t state);
69989 - int (*begin)(suspend_state_t state);
69990 - int (*prepare)(void);
69991 - int (*prepare_late)(void);
69992 - int (*enter)(suspend_state_t state);
69993 - void (*wake)(void);
69994 - void (*finish)(void);
69995 - void (*end)(void);
69996 - void (*recover)(void);
69997 + int (* const valid)(suspend_state_t state);
69998 + int (* const begin)(suspend_state_t state);
69999 + int (* const prepare)(void);
70000 + int (* const prepare_late)(void);
70001 + int (* const enter)(suspend_state_t state);
70002 + void (* const wake)(void);
70003 + void (* const finish)(void);
70004 + void (* const end)(void);
70005 + void (* const recover)(void);
70006 };
70007
70008 #ifdef CONFIG_SUSPEND
70009 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
70010 * suspend_set_ops - set platform dependent suspend operations
70011 * @ops: The new suspend operations to set.
70012 */
70013 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
70014 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
70015 extern int suspend_valid_only_mem(suspend_state_t state);
70016
70017 /**
70018 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
70019 #else /* !CONFIG_SUSPEND */
70020 #define suspend_valid_only_mem NULL
70021
70022 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
70023 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
70024 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
70025 #endif /* !CONFIG_SUSPEND */
70026
70027 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
70028 * platforms which require special recovery actions in that situation.
70029 */
70030 struct platform_hibernation_ops {
70031 - int (*begin)(void);
70032 - void (*end)(void);
70033 - int (*pre_snapshot)(void);
70034 - void (*finish)(void);
70035 - int (*prepare)(void);
70036 - int (*enter)(void);
70037 - void (*leave)(void);
70038 - int (*pre_restore)(void);
70039 - void (*restore_cleanup)(void);
70040 - void (*recover)(void);
70041 + int (* const begin)(void);
70042 + void (* const end)(void);
70043 + int (* const pre_snapshot)(void);
70044 + void (* const finish)(void);
70045 + int (* const prepare)(void);
70046 + int (* const enter)(void);
70047 + void (* const leave)(void);
70048 + int (* const pre_restore)(void);
70049 + void (* const restore_cleanup)(void);
70050 + void (* const recover)(void);
70051 };
70052
70053 #ifdef CONFIG_HIBERNATION
70054 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
70055 extern void swsusp_unset_page_free(struct page *);
70056 extern unsigned long get_safe_page(gfp_t gfp_mask);
70057
70058 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
70059 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
70060 extern int hibernate(void);
70061 extern bool system_entering_hibernation(void);
70062 #else /* CONFIG_HIBERNATION */
70063 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
70064 static inline void swsusp_set_page_free(struct page *p) {}
70065 static inline void swsusp_unset_page_free(struct page *p) {}
70066
70067 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
70068 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
70069 static inline int hibernate(void) { return -ENOSYS; }
70070 static inline bool system_entering_hibernation(void) { return false; }
70071 #endif /* CONFIG_HIBERNATION */
70072 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
70073 index 0eb6942..a805cb6 100644
70074 --- a/include/linux/sysctl.h
70075 +++ b/include/linux/sysctl.h
70076 @@ -164,7 +164,11 @@ enum
70077 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
70078 };
70079
70080 -
70081 +#ifdef CONFIG_PAX_SOFTMODE
70082 +enum {
70083 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
70084 +};
70085 +#endif
70086
70087 /* CTL_VM names: */
70088 enum
70089 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
70090
70091 extern int proc_dostring(struct ctl_table *, int,
70092 void __user *, size_t *, loff_t *);
70093 +extern int proc_dostring_modpriv(struct ctl_table *, int,
70094 + void __user *, size_t *, loff_t *);
70095 extern int proc_dointvec(struct ctl_table *, int,
70096 void __user *, size_t *, loff_t *);
70097 extern int proc_dointvec_minmax(struct ctl_table *, int,
70098 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
70099
70100 extern ctl_handler sysctl_data;
70101 extern ctl_handler sysctl_string;
70102 +extern ctl_handler sysctl_string_modpriv;
70103 extern ctl_handler sysctl_intvec;
70104 extern ctl_handler sysctl_jiffies;
70105 extern ctl_handler sysctl_ms_jiffies;
70106 diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
70107 index 9d68fed..71f02cc 100644
70108 --- a/include/linux/sysfs.h
70109 +++ b/include/linux/sysfs.h
70110 @@ -75,8 +75,8 @@ struct bin_attribute {
70111 };
70112
70113 struct sysfs_ops {
70114 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
70115 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
70116 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
70117 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
70118 };
70119
70120 struct sysfs_dirent;
70121 diff --git a/include/linux/syslog.h b/include/linux/syslog.h
70122 new file mode 100644
70123 index 0000000..3891139
70124 --- /dev/null
70125 +++ b/include/linux/syslog.h
70126 @@ -0,0 +1,52 @@
70127 +/* Syslog internals
70128 + *
70129 + * Copyright 2010 Canonical, Ltd.
70130 + * Author: Kees Cook <kees.cook@canonical.com>
70131 + *
70132 + * This program is free software; you can redistribute it and/or modify
70133 + * it under the terms of the GNU General Public License as published by
70134 + * the Free Software Foundation; either version 2, or (at your option)
70135 + * any later version.
70136 + *
70137 + * This program is distributed in the hope that it will be useful,
70138 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
70139 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
70140 + * GNU General Public License for more details.
70141 + *
70142 + * You should have received a copy of the GNU General Public License
70143 + * along with this program; see the file COPYING. If not, write to
70144 + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
70145 + */
70146 +
70147 +#ifndef _LINUX_SYSLOG_H
70148 +#define _LINUX_SYSLOG_H
70149 +
70150 +/* Close the log. Currently a NOP. */
70151 +#define SYSLOG_ACTION_CLOSE 0
70152 +/* Open the log. Currently a NOP. */
70153 +#define SYSLOG_ACTION_OPEN 1
70154 +/* Read from the log. */
70155 +#define SYSLOG_ACTION_READ 2
70156 +/* Read all messages remaining in the ring buffer. */
70157 +#define SYSLOG_ACTION_READ_ALL 3
70158 +/* Read and clear all messages remaining in the ring buffer */
70159 +#define SYSLOG_ACTION_READ_CLEAR 4
70160 +/* Clear ring buffer. */
70161 +#define SYSLOG_ACTION_CLEAR 5
70162 +/* Disable printk's to console */
70163 +#define SYSLOG_ACTION_CONSOLE_OFF 6
70164 +/* Enable printk's to console */
70165 +#define SYSLOG_ACTION_CONSOLE_ON 7
70166 +/* Set level of messages printed to console */
70167 +#define SYSLOG_ACTION_CONSOLE_LEVEL 8
70168 +/* Return number of unread characters in the log buffer */
70169 +#define SYSLOG_ACTION_SIZE_UNREAD 9
70170 +/* Return size of the log buffer */
70171 +#define SYSLOG_ACTION_SIZE_BUFFER 10
70172 +
70173 +#define SYSLOG_FROM_CALL 0
70174 +#define SYSLOG_FROM_FILE 1
70175 +
70176 +int do_syslog(int type, char __user *buf, int count, bool from_file);
70177 +
70178 +#endif /* _LINUX_SYSLOG_H */
70179 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
70180 index a8cc4e1..98d3b85 100644
70181 --- a/include/linux/thread_info.h
70182 +++ b/include/linux/thread_info.h
70183 @@ -23,7 +23,7 @@ struct restart_block {
70184 };
70185 /* For futex_wait and futex_wait_requeue_pi */
70186 struct {
70187 - u32 *uaddr;
70188 + u32 __user *uaddr;
70189 u32 val;
70190 u32 flags;
70191 u32 bitset;
70192 diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
70193 index 1eb44a9..f582df3 100644
70194 --- a/include/linux/tracehook.h
70195 +++ b/include/linux/tracehook.h
70196 @@ -69,12 +69,12 @@ static inline int tracehook_expect_breakpoints(struct task_struct *task)
70197 /*
70198 * ptrace report for syscall entry and exit looks identical.
70199 */
70200 -static inline void ptrace_report_syscall(struct pt_regs *regs)
70201 +static inline int ptrace_report_syscall(struct pt_regs *regs)
70202 {
70203 int ptrace = task_ptrace(current);
70204
70205 if (!(ptrace & PT_PTRACED))
70206 - return;
70207 + return 0;
70208
70209 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
70210
70211 @@ -87,6 +87,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
70212 send_sig(current->exit_code, current, 1);
70213 current->exit_code = 0;
70214 }
70215 +
70216 + return fatal_signal_pending(current);
70217 }
70218
70219 /**
70220 @@ -111,8 +113,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
70221 static inline __must_check int tracehook_report_syscall_entry(
70222 struct pt_regs *regs)
70223 {
70224 - ptrace_report_syscall(regs);
70225 - return 0;
70226 + return ptrace_report_syscall(regs);
70227 }
70228
70229 /**
70230 diff --git a/include/linux/tty.h b/include/linux/tty.h
70231 index e9c57e9..ee6d489 100644
70232 --- a/include/linux/tty.h
70233 +++ b/include/linux/tty.h
70234 @@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
70235 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
70236 extern void tty_ldisc_enable(struct tty_struct *tty);
70237
70238 -
70239 /* n_tty.c */
70240 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
70241
70242 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
70243 index 0c4ee9b..9f7c426 100644
70244 --- a/include/linux/tty_ldisc.h
70245 +++ b/include/linux/tty_ldisc.h
70246 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
70247
70248 struct module *owner;
70249
70250 - int refcount;
70251 + atomic_t refcount;
70252 };
70253
70254 struct tty_ldisc {
70255 diff --git a/include/linux/types.h b/include/linux/types.h
70256 index c42724f..d190eee 100644
70257 --- a/include/linux/types.h
70258 +++ b/include/linux/types.h
70259 @@ -191,10 +191,26 @@ typedef struct {
70260 volatile int counter;
70261 } atomic_t;
70262
70263 +#ifdef CONFIG_PAX_REFCOUNT
70264 +typedef struct {
70265 + volatile int counter;
70266 +} atomic_unchecked_t;
70267 +#else
70268 +typedef atomic_t atomic_unchecked_t;
70269 +#endif
70270 +
70271 #ifdef CONFIG_64BIT
70272 typedef struct {
70273 volatile long counter;
70274 } atomic64_t;
70275 +
70276 +#ifdef CONFIG_PAX_REFCOUNT
70277 +typedef struct {
70278 + volatile long counter;
70279 +} atomic64_unchecked_t;
70280 +#else
70281 +typedef atomic64_t atomic64_unchecked_t;
70282 +#endif
70283 #endif
70284
70285 struct ustat {
70286 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
70287 index 6b58367..53a3e8e 100644
70288 --- a/include/linux/uaccess.h
70289 +++ b/include/linux/uaccess.h
70290 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
70291 long ret; \
70292 mm_segment_t old_fs = get_fs(); \
70293 \
70294 - set_fs(KERNEL_DS); \
70295 pagefault_disable(); \
70296 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
70297 - pagefault_enable(); \
70298 + set_fs(KERNEL_DS); \
70299 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
70300 set_fs(old_fs); \
70301 + pagefault_enable(); \
70302 ret; \
70303 })
70304
70305 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
70306 * Safely read from address @src to the buffer at @dst. If a kernel fault
70307 * happens, handle that and return -EFAULT.
70308 */
70309 -extern long probe_kernel_read(void *dst, void *src, size_t size);
70310 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
70311
70312 /*
70313 * probe_kernel_write(): safely attempt to write to a location
70314 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
70315 * Safely write to address @dst from the buffer at @src. If a kernel fault
70316 * happens, handle that and return -EFAULT.
70317 */
70318 -extern long probe_kernel_write(void *dst, void *src, size_t size);
70319 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
70320
70321 #endif /* __LINUX_UACCESS_H__ */
70322 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
70323 index 99c1b4d..bb94261 100644
70324 --- a/include/linux/unaligned/access_ok.h
70325 +++ b/include/linux/unaligned/access_ok.h
70326 @@ -6,32 +6,32 @@
70327
70328 static inline u16 get_unaligned_le16(const void *p)
70329 {
70330 - return le16_to_cpup((__le16 *)p);
70331 + return le16_to_cpup((const __le16 *)p);
70332 }
70333
70334 static inline u32 get_unaligned_le32(const void *p)
70335 {
70336 - return le32_to_cpup((__le32 *)p);
70337 + return le32_to_cpup((const __le32 *)p);
70338 }
70339
70340 static inline u64 get_unaligned_le64(const void *p)
70341 {
70342 - return le64_to_cpup((__le64 *)p);
70343 + return le64_to_cpup((const __le64 *)p);
70344 }
70345
70346 static inline u16 get_unaligned_be16(const void *p)
70347 {
70348 - return be16_to_cpup((__be16 *)p);
70349 + return be16_to_cpup((const __be16 *)p);
70350 }
70351
70352 static inline u32 get_unaligned_be32(const void *p)
70353 {
70354 - return be32_to_cpup((__be32 *)p);
70355 + return be32_to_cpup((const __be32 *)p);
70356 }
70357
70358 static inline u64 get_unaligned_be64(const void *p)
70359 {
70360 - return be64_to_cpup((__be64 *)p);
70361 + return be64_to_cpup((const __be64 *)p);
70362 }
70363
70364 static inline void put_unaligned_le16(u16 val, void *p)
70365 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
70366 index 79b9837..b5a56f9 100644
70367 --- a/include/linux/vermagic.h
70368 +++ b/include/linux/vermagic.h
70369 @@ -26,9 +26,35 @@
70370 #define MODULE_ARCH_VERMAGIC ""
70371 #endif
70372
70373 +#ifdef CONFIG_PAX_REFCOUNT
70374 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
70375 +#else
70376 +#define MODULE_PAX_REFCOUNT ""
70377 +#endif
70378 +
70379 +#ifdef CONSTIFY_PLUGIN
70380 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
70381 +#else
70382 +#define MODULE_CONSTIFY_PLUGIN ""
70383 +#endif
70384 +
70385 +#ifdef STACKLEAK_PLUGIN
70386 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
70387 +#else
70388 +#define MODULE_STACKLEAK_PLUGIN ""
70389 +#endif
70390 +
70391 +#ifdef CONFIG_GRKERNSEC
70392 +#define MODULE_GRSEC "GRSEC "
70393 +#else
70394 +#define MODULE_GRSEC ""
70395 +#endif
70396 +
70397 #define VERMAGIC_STRING \
70398 UTS_RELEASE " " \
70399 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
70400 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
70401 - MODULE_ARCH_VERMAGIC
70402 + MODULE_ARCH_VERMAGIC \
70403 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
70404 + MODULE_GRSEC
70405
70406 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
70407 index 819a634..462ac12 100644
70408 --- a/include/linux/vmalloc.h
70409 +++ b/include/linux/vmalloc.h
70410 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
70411 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
70412 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
70413 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
70414 +
70415 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70416 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
70417 +#endif
70418 +
70419 /* bits [20..32] reserved for arch specific ioremap internals */
70420
70421 /*
70422 @@ -124,4 +129,81 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
70423
70424 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
70425
70426 +#define vmalloc(x) \
70427 +({ \
70428 + void *___retval; \
70429 + intoverflow_t ___x = (intoverflow_t)x; \
70430 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
70431 + ___retval = NULL; \
70432 + else \
70433 + ___retval = vmalloc((unsigned long)___x); \
70434 + ___retval; \
70435 +})
70436 +
70437 +#define __vmalloc(x, y, z) \
70438 +({ \
70439 + void *___retval; \
70440 + intoverflow_t ___x = (intoverflow_t)x; \
70441 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
70442 + ___retval = NULL; \
70443 + else \
70444 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
70445 + ___retval; \
70446 +})
70447 +
70448 +#define vmalloc_user(x) \
70449 +({ \
70450 + void *___retval; \
70451 + intoverflow_t ___x = (intoverflow_t)x; \
70452 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
70453 + ___retval = NULL; \
70454 + else \
70455 + ___retval = vmalloc_user((unsigned long)___x); \
70456 + ___retval; \
70457 +})
70458 +
70459 +#define vmalloc_exec(x) \
70460 +({ \
70461 + void *___retval; \
70462 + intoverflow_t ___x = (intoverflow_t)x; \
70463 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
70464 + ___retval = NULL; \
70465 + else \
70466 + ___retval = vmalloc_exec((unsigned long)___x); \
70467 + ___retval; \
70468 +})
70469 +
70470 +#define vmalloc_node(x, y) \
70471 +({ \
70472 + void *___retval; \
70473 + intoverflow_t ___x = (intoverflow_t)x; \
70474 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
70475 + ___retval = NULL; \
70476 + else \
70477 + ___retval = vmalloc_node((unsigned long)___x, (y));\
70478 + ___retval; \
70479 +})
70480 +
70481 +#define vmalloc_32(x) \
70482 +({ \
70483 + void *___retval; \
70484 + intoverflow_t ___x = (intoverflow_t)x; \
70485 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
70486 + ___retval = NULL; \
70487 + else \
70488 + ___retval = vmalloc_32((unsigned long)___x); \
70489 + ___retval; \
70490 +})
70491 +
70492 +#define vmalloc_32_user(x) \
70493 +({ \
70494 + void *___retval; \
70495 + intoverflow_t ___x = (intoverflow_t)x; \
70496 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
70497 + ___retval = NULL; \
70498 + else \
70499 + ___retval = vmalloc_32_user((unsigned long)___x);\
70500 + ___retval; \
70501 +})
70502 +
70503 #endif /* _LINUX_VMALLOC_H */
70504 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
70505 index 13070d6..aa4159a 100644
70506 --- a/include/linux/vmstat.h
70507 +++ b/include/linux/vmstat.h
70508 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
70509 /*
70510 * Zone based page accounting with per cpu differentials.
70511 */
70512 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70513 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70514
70515 static inline void zone_page_state_add(long x, struct zone *zone,
70516 enum zone_stat_item item)
70517 {
70518 - atomic_long_add(x, &zone->vm_stat[item]);
70519 - atomic_long_add(x, &vm_stat[item]);
70520 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
70521 + atomic_long_add_unchecked(x, &vm_stat[item]);
70522 }
70523
70524 static inline unsigned long global_page_state(enum zone_stat_item item)
70525 {
70526 - long x = atomic_long_read(&vm_stat[item]);
70527 + long x = atomic_long_read_unchecked(&vm_stat[item]);
70528 #ifdef CONFIG_SMP
70529 if (x < 0)
70530 x = 0;
70531 @@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
70532 static inline unsigned long zone_page_state(struct zone *zone,
70533 enum zone_stat_item item)
70534 {
70535 - long x = atomic_long_read(&zone->vm_stat[item]);
70536 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70537 #ifdef CONFIG_SMP
70538 if (x < 0)
70539 x = 0;
70540 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
70541 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
70542 enum zone_stat_item item)
70543 {
70544 - long x = atomic_long_read(&zone->vm_stat[item]);
70545 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70546
70547 #ifdef CONFIG_SMP
70548 int cpu;
70549 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
70550
70551 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
70552 {
70553 - atomic_long_inc(&zone->vm_stat[item]);
70554 - atomic_long_inc(&vm_stat[item]);
70555 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
70556 + atomic_long_inc_unchecked(&vm_stat[item]);
70557 }
70558
70559 static inline void __inc_zone_page_state(struct page *page,
70560 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
70561
70562 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
70563 {
70564 - atomic_long_dec(&zone->vm_stat[item]);
70565 - atomic_long_dec(&vm_stat[item]);
70566 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
70567 + atomic_long_dec_unchecked(&vm_stat[item]);
70568 }
70569
70570 static inline void __dec_zone_page_state(struct page *page,
70571 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
70572 index 5c84af8..1a3b6e2 100644
70573 --- a/include/linux/xattr.h
70574 +++ b/include/linux/xattr.h
70575 @@ -33,6 +33,11 @@
70576 #define XATTR_USER_PREFIX "user."
70577 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
70578
70579 +/* User namespace */
70580 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
70581 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
70582 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
70583 +
70584 struct inode;
70585 struct dentry;
70586
70587 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
70588 index eed5fcc..5080d24 100644
70589 --- a/include/media/saa7146_vv.h
70590 +++ b/include/media/saa7146_vv.h
70591 @@ -167,7 +167,7 @@ struct saa7146_ext_vv
70592 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
70593
70594 /* the extension can override this */
70595 - struct v4l2_ioctl_ops ops;
70596 + v4l2_ioctl_ops_no_const ops;
70597 /* pointer to the saa7146 core ops */
70598 const struct v4l2_ioctl_ops *core_ops;
70599
70600 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
70601 index 73c9867..2da8837 100644
70602 --- a/include/media/v4l2-dev.h
70603 +++ b/include/media/v4l2-dev.h
70604 @@ -34,7 +34,7 @@ struct v4l2_device;
70605 #define V4L2_FL_UNREGISTERED (0)
70606
70607 struct v4l2_file_operations {
70608 - struct module *owner;
70609 + struct module * const owner;
70610 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
70611 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
70612 unsigned int (*poll) (struct file *, struct poll_table_struct *);
70613 @@ -46,6 +46,7 @@ struct v4l2_file_operations {
70614 int (*open) (struct file *);
70615 int (*release) (struct file *);
70616 };
70617 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
70618
70619 /*
70620 * Newer version of video_device, handled by videodev2.c
70621 diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
70622 index 5d5d550..f559ef1 100644
70623 --- a/include/media/v4l2-device.h
70624 +++ b/include/media/v4l2-device.h
70625 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
70626 this function returns 0. If the name ends with a digit (e.g. cx18),
70627 then the name will be set to cx18-0 since cx180 looks really odd. */
70628 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
70629 - atomic_t *instance);
70630 + atomic_unchecked_t *instance);
70631
70632 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
70633 Since the parent disappears this ensures that v4l2_dev doesn't have an
70634 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
70635 index 7a4529d..7244290 100644
70636 --- a/include/media/v4l2-ioctl.h
70637 +++ b/include/media/v4l2-ioctl.h
70638 @@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
70639 long (*vidioc_default) (struct file *file, void *fh,
70640 int cmd, void *arg);
70641 };
70642 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
70643
70644
70645 /* v4l debugging and diagnostics */
70646 diff --git a/include/net/flow.h b/include/net/flow.h
70647 index 809970b..c3df4f3 100644
70648 --- a/include/net/flow.h
70649 +++ b/include/net/flow.h
70650 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
70651 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
70652 u8 dir, flow_resolve_t resolver);
70653 extern void flow_cache_flush(void);
70654 -extern atomic_t flow_cache_genid;
70655 +extern atomic_unchecked_t flow_cache_genid;
70656
70657 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
70658 {
70659 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
70660 index 15e1f8fe..668837c 100644
70661 --- a/include/net/inetpeer.h
70662 +++ b/include/net/inetpeer.h
70663 @@ -24,7 +24,7 @@ struct inet_peer
70664 __u32 dtime; /* the time of last use of not
70665 * referenced entries */
70666 atomic_t refcnt;
70667 - atomic_t rid; /* Frag reception counter */
70668 + atomic_unchecked_t rid; /* Frag reception counter */
70669 __u32 tcp_ts;
70670 unsigned long tcp_ts_stamp;
70671 };
70672 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
70673 index 98978e7..2243a3d 100644
70674 --- a/include/net/ip_vs.h
70675 +++ b/include/net/ip_vs.h
70676 @@ -365,7 +365,7 @@ struct ip_vs_conn {
70677 struct ip_vs_conn *control; /* Master control connection */
70678 atomic_t n_control; /* Number of controlled ones */
70679 struct ip_vs_dest *dest; /* real server */
70680 - atomic_t in_pkts; /* incoming packet counter */
70681 + atomic_unchecked_t in_pkts; /* incoming packet counter */
70682
70683 /* packet transmitter for different forwarding methods. If it
70684 mangles the packet, it must return NF_DROP or better NF_STOLEN,
70685 @@ -466,7 +466,7 @@ struct ip_vs_dest {
70686 union nf_inet_addr addr; /* IP address of the server */
70687 __be16 port; /* port number of the server */
70688 volatile unsigned flags; /* dest status flags */
70689 - atomic_t conn_flags; /* flags to copy to conn */
70690 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
70691 atomic_t weight; /* server weight */
70692
70693 atomic_t refcnt; /* reference counter */
70694 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
70695 index 69b610a..fe3962c 100644
70696 --- a/include/net/irda/ircomm_core.h
70697 +++ b/include/net/irda/ircomm_core.h
70698 @@ -51,7 +51,7 @@ typedef struct {
70699 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
70700 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
70701 struct ircomm_info *);
70702 -} call_t;
70703 +} __no_const call_t;
70704
70705 struct ircomm_cb {
70706 irda_queue_t queue;
70707 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
70708 index eea2e61..08c692d 100644
70709 --- a/include/net/irda/ircomm_tty.h
70710 +++ b/include/net/irda/ircomm_tty.h
70711 @@ -35,6 +35,7 @@
70712 #include <linux/termios.h>
70713 #include <linux/timer.h>
70714 #include <linux/tty.h> /* struct tty_struct */
70715 +#include <asm/local.h>
70716
70717 #include <net/irda/irias_object.h>
70718 #include <net/irda/ircomm_core.h>
70719 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
70720 unsigned short close_delay;
70721 unsigned short closing_wait; /* time to wait before closing */
70722
70723 - int open_count;
70724 - int blocked_open; /* # of blocked opens */
70725 + local_t open_count;
70726 + local_t blocked_open; /* # of blocked opens */
70727
70728 /* Protect concurent access to :
70729 * o self->open_count
70730 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
70731 index f82a1e8..82d81e8 100644
70732 --- a/include/net/iucv/af_iucv.h
70733 +++ b/include/net/iucv/af_iucv.h
70734 @@ -87,7 +87,7 @@ struct iucv_sock {
70735 struct iucv_sock_list {
70736 struct hlist_head head;
70737 rwlock_t lock;
70738 - atomic_t autobind_name;
70739 + atomic_unchecked_t autobind_name;
70740 };
70741
70742 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
70743 diff --git a/include/net/lapb.h b/include/net/lapb.h
70744 index 96cb5dd..25e8d4f 100644
70745 --- a/include/net/lapb.h
70746 +++ b/include/net/lapb.h
70747 @@ -95,7 +95,7 @@ struct lapb_cb {
70748 struct sk_buff_head write_queue;
70749 struct sk_buff_head ack_queue;
70750 unsigned char window;
70751 - struct lapb_register_struct callbacks;
70752 + struct lapb_register_struct *callbacks;
70753
70754 /* FRMR control information */
70755 struct lapb_frame frmr_data;
70756 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
70757 index 3817fda..cdb2343 100644
70758 --- a/include/net/neighbour.h
70759 +++ b/include/net/neighbour.h
70760 @@ -131,7 +131,7 @@ struct neigh_ops
70761 int (*connected_output)(struct sk_buff*);
70762 int (*hh_output)(struct sk_buff*);
70763 int (*queue_xmit)(struct sk_buff*);
70764 -};
70765 +} __do_const;
70766
70767 struct pneigh_entry
70768 {
70769 diff --git a/include/net/netlink.h b/include/net/netlink.h
70770 index c344646..4778c71 100644
70771 --- a/include/net/netlink.h
70772 +++ b/include/net/netlink.h
70773 @@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
70774 {
70775 return (remaining >= (int) sizeof(struct nlmsghdr) &&
70776 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
70777 - nlh->nlmsg_len <= remaining);
70778 + nlh->nlmsg_len <= (unsigned int)remaining);
70779 }
70780
70781 /**
70782 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
70783 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
70784 {
70785 if (mark)
70786 - skb_trim(skb, (unsigned char *) mark - skb->data);
70787 + skb_trim(skb, (const unsigned char *) mark - skb->data);
70788 }
70789
70790 /**
70791 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
70792 index 9a4b8b7..e49e077 100644
70793 --- a/include/net/netns/ipv4.h
70794 +++ b/include/net/netns/ipv4.h
70795 @@ -54,7 +54,7 @@ struct netns_ipv4 {
70796 int current_rt_cache_rebuild_count;
70797
70798 struct timer_list rt_secret_timer;
70799 - atomic_t rt_genid;
70800 + atomic_unchecked_t rt_genid;
70801
70802 #ifdef CONFIG_IP_MROUTE
70803 struct sock *mroute_sk;
70804 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
70805 index 8a6d529..171f401 100644
70806 --- a/include/net/sctp/sctp.h
70807 +++ b/include/net/sctp/sctp.h
70808 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
70809
70810 #else /* SCTP_DEBUG */
70811
70812 -#define SCTP_DEBUG_PRINTK(whatever...)
70813 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
70814 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
70815 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
70816 #define SCTP_ENABLE_DEBUG
70817 #define SCTP_DISABLE_DEBUG
70818 #define SCTP_ASSERT(expr, str, func)
70819 diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
70820 index d97f689..f3b90ab 100644
70821 --- a/include/net/secure_seq.h
70822 +++ b/include/net/secure_seq.h
70823 @@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
70824 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
70825 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
70826 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
70827 - __be16 dport);
70828 + __be16 dport);
70829 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
70830 __be16 sport, __be16 dport);
70831 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70832 - __be16 sport, __be16 dport);
70833 + __be16 sport, __be16 dport);
70834 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
70835 - __be16 sport, __be16 dport);
70836 + __be16 sport, __be16 dport);
70837 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70838 - __be16 sport, __be16 dport);
70839 + __be16 sport, __be16 dport);
70840
70841 #endif /* _NET_SECURE_SEQ */
70842 diff --git a/include/net/sock.h b/include/net/sock.h
70843 index 78adf52..99afd29 100644
70844 --- a/include/net/sock.h
70845 +++ b/include/net/sock.h
70846 @@ -272,7 +272,7 @@ struct sock {
70847 rwlock_t sk_callback_lock;
70848 int sk_err,
70849 sk_err_soft;
70850 - atomic_t sk_drops;
70851 + atomic_unchecked_t sk_drops;
70852 unsigned short sk_ack_backlog;
70853 unsigned short sk_max_ack_backlog;
70854 __u32 sk_priority;
70855 @@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
70856 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
70857 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
70858 #else
70859 -static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
70860 +static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
70861 int inc)
70862 {
70863 }
70864 diff --git a/include/net/tcp.h b/include/net/tcp.h
70865 index 6cfe18b..dd21acb 100644
70866 --- a/include/net/tcp.h
70867 +++ b/include/net/tcp.h
70868 @@ -1444,8 +1444,8 @@ enum tcp_seq_states {
70869 struct tcp_seq_afinfo {
70870 char *name;
70871 sa_family_t family;
70872 - struct file_operations seq_fops;
70873 - struct seq_operations seq_ops;
70874 + file_operations_no_const seq_fops;
70875 + seq_operations_no_const seq_ops;
70876 };
70877
70878 struct tcp_iter_state {
70879 diff --git a/include/net/udp.h b/include/net/udp.h
70880 index f98abd2..b4b042f 100644
70881 --- a/include/net/udp.h
70882 +++ b/include/net/udp.h
70883 @@ -187,8 +187,8 @@ struct udp_seq_afinfo {
70884 char *name;
70885 sa_family_t family;
70886 struct udp_table *udp_table;
70887 - struct file_operations seq_fops;
70888 - struct seq_operations seq_ops;
70889 + file_operations_no_const seq_fops;
70890 + seq_operations_no_const seq_ops;
70891 };
70892
70893 struct udp_iter_state {
70894 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
70895 index cbb822e..e9c1cbe 100644
70896 --- a/include/rdma/iw_cm.h
70897 +++ b/include/rdma/iw_cm.h
70898 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
70899 int backlog);
70900
70901 int (*destroy_listen)(struct iw_cm_id *cm_id);
70902 -};
70903 +} __no_const;
70904
70905 /**
70906 * iw_create_cm_id - Create an IW CM identifier.
70907 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
70908 index 09a124b..caa8ca8 100644
70909 --- a/include/scsi/libfc.h
70910 +++ b/include/scsi/libfc.h
70911 @@ -675,6 +675,7 @@ struct libfc_function_template {
70912 */
70913 void (*disc_stop_final) (struct fc_lport *);
70914 };
70915 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
70916
70917 /* information used by the discovery layer */
70918 struct fc_disc {
70919 @@ -707,7 +708,7 @@ struct fc_lport {
70920 struct fc_disc disc;
70921
70922 /* Operational Information */
70923 - struct libfc_function_template tt;
70924 + libfc_function_template_no_const tt;
70925 u8 link_up;
70926 u8 qfull;
70927 enum fc_lport_state state;
70928 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
70929 index de8e180..f15e0d7 100644
70930 --- a/include/scsi/scsi_device.h
70931 +++ b/include/scsi/scsi_device.h
70932 @@ -156,9 +156,9 @@ struct scsi_device {
70933 unsigned int max_device_blocked; /* what device_blocked counts down from */
70934 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
70935
70936 - atomic_t iorequest_cnt;
70937 - atomic_t iodone_cnt;
70938 - atomic_t ioerr_cnt;
70939 + atomic_unchecked_t iorequest_cnt;
70940 + atomic_unchecked_t iodone_cnt;
70941 + atomic_unchecked_t ioerr_cnt;
70942
70943 struct device sdev_gendev,
70944 sdev_dev;
70945 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
70946 index fc50bd6..81ba9cb 100644
70947 --- a/include/scsi/scsi_transport_fc.h
70948 +++ b/include/scsi/scsi_transport_fc.h
70949 @@ -708,7 +708,7 @@ struct fc_function_template {
70950 unsigned long show_host_system_hostname:1;
70951
70952 unsigned long disable_target_scan:1;
70953 -};
70954 +} __do_const;
70955
70956
70957 /**
70958 diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
70959 index 3dae3f7..8440d6f 100644
70960 --- a/include/sound/ac97_codec.h
70961 +++ b/include/sound/ac97_codec.h
70962 @@ -419,15 +419,15 @@
70963 struct snd_ac97;
70964
70965 struct snd_ac97_build_ops {
70966 - int (*build_3d) (struct snd_ac97 *ac97);
70967 - int (*build_specific) (struct snd_ac97 *ac97);
70968 - int (*build_spdif) (struct snd_ac97 *ac97);
70969 - int (*build_post_spdif) (struct snd_ac97 *ac97);
70970 + int (* const build_3d) (struct snd_ac97 *ac97);
70971 + int (* const build_specific) (struct snd_ac97 *ac97);
70972 + int (* const build_spdif) (struct snd_ac97 *ac97);
70973 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
70974 #ifdef CONFIG_PM
70975 - void (*suspend) (struct snd_ac97 *ac97);
70976 - void (*resume) (struct snd_ac97 *ac97);
70977 + void (* const suspend) (struct snd_ac97 *ac97);
70978 + void (* const resume) (struct snd_ac97 *ac97);
70979 #endif
70980 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
70981 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
70982 };
70983
70984 struct snd_ac97_bus_ops {
70985 @@ -477,7 +477,7 @@ struct snd_ac97_template {
70986
70987 struct snd_ac97 {
70988 /* -- lowlevel (hardware) driver specific -- */
70989 - struct snd_ac97_build_ops * build_ops;
70990 + const struct snd_ac97_build_ops * build_ops;
70991 void *private_data;
70992 void (*private_free) (struct snd_ac97 *ac97);
70993 /* --- */
70994 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
70995 index 891cf1a..a94ba2b 100644
70996 --- a/include/sound/ak4xxx-adda.h
70997 +++ b/include/sound/ak4xxx-adda.h
70998 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
70999 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
71000 unsigned char val);
71001 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
71002 -};
71003 +} __no_const;
71004
71005 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
71006
71007 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
71008 index 8c05e47..2b5df97 100644
71009 --- a/include/sound/hwdep.h
71010 +++ b/include/sound/hwdep.h
71011 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
71012 struct snd_hwdep_dsp_status *status);
71013 int (*dsp_load)(struct snd_hwdep *hw,
71014 struct snd_hwdep_dsp_image *image);
71015 -};
71016 +} __no_const;
71017
71018 struct snd_hwdep {
71019 struct snd_card *card;
71020 diff --git a/include/sound/info.h b/include/sound/info.h
71021 index 112e894..6fda5b5 100644
71022 --- a/include/sound/info.h
71023 +++ b/include/sound/info.h
71024 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
71025 struct snd_info_buffer *buffer);
71026 void (*write)(struct snd_info_entry *entry,
71027 struct snd_info_buffer *buffer);
71028 -};
71029 +} __no_const;
71030
71031 struct snd_info_entry_ops {
71032 int (*open)(struct snd_info_entry *entry,
71033 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
71034 index de6d981..590a550 100644
71035 --- a/include/sound/pcm.h
71036 +++ b/include/sound/pcm.h
71037 @@ -80,6 +80,7 @@ struct snd_pcm_ops {
71038 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
71039 int (*ack)(struct snd_pcm_substream *substream);
71040 };
71041 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
71042
71043 /*
71044 *
71045 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
71046 index 736eac7..fe8a80f 100644
71047 --- a/include/sound/sb16_csp.h
71048 +++ b/include/sound/sb16_csp.h
71049 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
71050 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
71051 int (*csp_stop) (struct snd_sb_csp * p);
71052 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
71053 -};
71054 +} __no_const;
71055
71056 /*
71057 * CSP private data
71058 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
71059 index 444cd6b..3327cc5 100644
71060 --- a/include/sound/ymfpci.h
71061 +++ b/include/sound/ymfpci.h
71062 @@ -358,7 +358,7 @@ struct snd_ymfpci {
71063 spinlock_t reg_lock;
71064 spinlock_t voice_lock;
71065 wait_queue_head_t interrupt_sleep;
71066 - atomic_t interrupt_sleep_count;
71067 + atomic_unchecked_t interrupt_sleep_count;
71068 struct snd_info_entry *proc_entry;
71069 const struct firmware *dsp_microcode;
71070 const struct firmware *controller_microcode;
71071 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
71072 index b89f9db..f097b38 100644
71073 --- a/include/trace/events/irq.h
71074 +++ b/include/trace/events/irq.h
71075 @@ -34,7 +34,7 @@
71076 */
71077 TRACE_EVENT(irq_handler_entry,
71078
71079 - TP_PROTO(int irq, struct irqaction *action),
71080 + TP_PROTO(int irq, const struct irqaction *action),
71081
71082 TP_ARGS(irq, action),
71083
71084 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
71085 */
71086 TRACE_EVENT(irq_handler_exit,
71087
71088 - TP_PROTO(int irq, struct irqaction *action, int ret),
71089 + TP_PROTO(int irq, const struct irqaction *action, int ret),
71090
71091 TP_ARGS(irq, action, ret),
71092
71093 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
71094 */
71095 TRACE_EVENT(softirq_entry,
71096
71097 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
71098 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
71099
71100 TP_ARGS(h, vec),
71101
71102 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
71103 */
71104 TRACE_EVENT(softirq_exit,
71105
71106 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
71107 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
71108
71109 TP_ARGS(h, vec),
71110
71111 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
71112 index 0993a22..32ba2fe 100644
71113 --- a/include/video/uvesafb.h
71114 +++ b/include/video/uvesafb.h
71115 @@ -177,6 +177,7 @@ struct uvesafb_par {
71116 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
71117 u8 pmi_setpal; /* PMI for palette changes */
71118 u16 *pmi_base; /* protected mode interface location */
71119 + u8 *pmi_code; /* protected mode code location */
71120 void *pmi_start;
71121 void *pmi_pal;
71122 u8 *vbe_state_orig; /*
71123 diff --git a/init/Kconfig b/init/Kconfig
71124 index d72691b..3996e54 100644
71125 --- a/init/Kconfig
71126 +++ b/init/Kconfig
71127 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
71128
71129 config COMPAT_BRK
71130 bool "Disable heap randomization"
71131 - default y
71132 + default n
71133 help
71134 Randomizing heap placement makes heap exploits harder, but it
71135 also breaks ancient binaries (including anything libc5 based).
71136 diff --git a/init/do_mounts.c b/init/do_mounts.c
71137 index bb008d0..4fa3933 100644
71138 --- a/init/do_mounts.c
71139 +++ b/init/do_mounts.c
71140 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
71141
71142 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
71143 {
71144 - int err = sys_mount(name, "/root", fs, flags, data);
71145 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
71146 if (err)
71147 return err;
71148
71149 - sys_chdir("/root");
71150 + sys_chdir((__force const char __user *)"/root");
71151 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
71152 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
71153 current->fs->pwd.mnt->mnt_sb->s_type->name,
71154 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
71155 va_start(args, fmt);
71156 vsprintf(buf, fmt, args);
71157 va_end(args);
71158 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
71159 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
71160 if (fd >= 0) {
71161 sys_ioctl(fd, FDEJECT, 0);
71162 sys_close(fd);
71163 }
71164 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
71165 - fd = sys_open("/dev/console", O_RDWR, 0);
71166 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
71167 if (fd >= 0) {
71168 sys_ioctl(fd, TCGETS, (long)&termios);
71169 termios.c_lflag &= ~ICANON;
71170 sys_ioctl(fd, TCSETSF, (long)&termios);
71171 - sys_read(fd, &c, 1);
71172 + sys_read(fd, (char __user *)&c, 1);
71173 termios.c_lflag |= ICANON;
71174 sys_ioctl(fd, TCSETSF, (long)&termios);
71175 sys_close(fd);
71176 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
71177 mount_root();
71178 out:
71179 devtmpfs_mount("dev");
71180 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
71181 - sys_chroot(".");
71182 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
71183 + sys_chroot((__force char __user *)".");
71184 }
71185 diff --git a/init/do_mounts.h b/init/do_mounts.h
71186 index f5b978a..69dbfe8 100644
71187 --- a/init/do_mounts.h
71188 +++ b/init/do_mounts.h
71189 @@ -15,15 +15,15 @@ extern int root_mountflags;
71190
71191 static inline int create_dev(char *name, dev_t dev)
71192 {
71193 - sys_unlink(name);
71194 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
71195 + sys_unlink((char __force_user *)name);
71196 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
71197 }
71198
71199 #if BITS_PER_LONG == 32
71200 static inline u32 bstat(char *name)
71201 {
71202 struct stat64 stat;
71203 - if (sys_stat64(name, &stat) != 0)
71204 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
71205 return 0;
71206 if (!S_ISBLK(stat.st_mode))
71207 return 0;
71208 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
71209 static inline u32 bstat(char *name)
71210 {
71211 struct stat stat;
71212 - if (sys_newstat(name, &stat) != 0)
71213 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
71214 return 0;
71215 if (!S_ISBLK(stat.st_mode))
71216 return 0;
71217 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
71218 index 614241b..4da046b 100644
71219 --- a/init/do_mounts_initrd.c
71220 +++ b/init/do_mounts_initrd.c
71221 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
71222 sys_close(old_fd);sys_close(root_fd);
71223 sys_close(0);sys_close(1);sys_close(2);
71224 sys_setsid();
71225 - (void) sys_open("/dev/console",O_RDWR,0);
71226 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
71227 (void) sys_dup(0);
71228 (void) sys_dup(0);
71229 return kernel_execve(shell, argv, envp_init);
71230 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
71231 create_dev("/dev/root.old", Root_RAM0);
71232 /* mount initrd on rootfs' /root */
71233 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
71234 - sys_mkdir("/old", 0700);
71235 - root_fd = sys_open("/", 0, 0);
71236 - old_fd = sys_open("/old", 0, 0);
71237 + sys_mkdir((const char __force_user *)"/old", 0700);
71238 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
71239 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
71240 /* move initrd over / and chdir/chroot in initrd root */
71241 - sys_chdir("/root");
71242 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
71243 - sys_chroot(".");
71244 + sys_chdir((const char __force_user *)"/root");
71245 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
71246 + sys_chroot((const char __force_user *)".");
71247
71248 /*
71249 * In case that a resume from disk is carried out by linuxrc or one of
71250 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
71251
71252 /* move initrd to rootfs' /old */
71253 sys_fchdir(old_fd);
71254 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
71255 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
71256 /* switch root and cwd back to / of rootfs */
71257 sys_fchdir(root_fd);
71258 - sys_chroot(".");
71259 + sys_chroot((const char __force_user *)".");
71260 sys_close(old_fd);
71261 sys_close(root_fd);
71262
71263 if (new_decode_dev(real_root_dev) == Root_RAM0) {
71264 - sys_chdir("/old");
71265 + sys_chdir((const char __force_user *)"/old");
71266 return;
71267 }
71268
71269 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
71270 mount_root();
71271
71272 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
71273 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
71274 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
71275 if (!error)
71276 printk("okay\n");
71277 else {
71278 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
71279 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
71280 if (error == -ENOENT)
71281 printk("/initrd does not exist. Ignored.\n");
71282 else
71283 printk("failed\n");
71284 printk(KERN_NOTICE "Unmounting old root\n");
71285 - sys_umount("/old", MNT_DETACH);
71286 + sys_umount((char __force_user *)"/old", MNT_DETACH);
71287 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
71288 if (fd < 0) {
71289 error = fd;
71290 @@ -119,11 +119,11 @@ int __init initrd_load(void)
71291 * mounted in the normal path.
71292 */
71293 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
71294 - sys_unlink("/initrd.image");
71295 + sys_unlink((const char __force_user *)"/initrd.image");
71296 handle_initrd();
71297 return 1;
71298 }
71299 }
71300 - sys_unlink("/initrd.image");
71301 + sys_unlink((const char __force_user *)"/initrd.image");
71302 return 0;
71303 }
71304 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
71305 index 69aebbf..c0bf6a7 100644
71306 --- a/init/do_mounts_md.c
71307 +++ b/init/do_mounts_md.c
71308 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
71309 partitioned ? "_d" : "", minor,
71310 md_setup_args[ent].device_names);
71311
71312 - fd = sys_open(name, 0, 0);
71313 + fd = sys_open((char __force_user *)name, 0, 0);
71314 if (fd < 0) {
71315 printk(KERN_ERR "md: open failed - cannot start "
71316 "array %s\n", name);
71317 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
71318 * array without it
71319 */
71320 sys_close(fd);
71321 - fd = sys_open(name, 0, 0);
71322 + fd = sys_open((char __force_user *)name, 0, 0);
71323 sys_ioctl(fd, BLKRRPART, 0);
71324 }
71325 sys_close(fd);
71326 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
71327
71328 wait_for_device_probe();
71329
71330 - fd = sys_open("/dev/md0", 0, 0);
71331 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
71332 if (fd >= 0) {
71333 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
71334 sys_close(fd);
71335 diff --git a/init/initramfs.c b/init/initramfs.c
71336 index 1fd59b8..a01b079 100644
71337 --- a/init/initramfs.c
71338 +++ b/init/initramfs.c
71339 @@ -74,7 +74,7 @@ static void __init free_hash(void)
71340 }
71341 }
71342
71343 -static long __init do_utime(char __user *filename, time_t mtime)
71344 +static long __init do_utime(__force char __user *filename, time_t mtime)
71345 {
71346 struct timespec t[2];
71347
71348 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
71349 struct dir_entry *de, *tmp;
71350 list_for_each_entry_safe(de, tmp, &dir_list, list) {
71351 list_del(&de->list);
71352 - do_utime(de->name, de->mtime);
71353 + do_utime((char __force_user *)de->name, de->mtime);
71354 kfree(de->name);
71355 kfree(de);
71356 }
71357 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
71358 if (nlink >= 2) {
71359 char *old = find_link(major, minor, ino, mode, collected);
71360 if (old)
71361 - return (sys_link(old, collected) < 0) ? -1 : 1;
71362 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
71363 }
71364 return 0;
71365 }
71366 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
71367 {
71368 struct stat st;
71369
71370 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
71371 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
71372 if (S_ISDIR(st.st_mode))
71373 - sys_rmdir(path);
71374 + sys_rmdir((char __force_user *)path);
71375 else
71376 - sys_unlink(path);
71377 + sys_unlink((char __force_user *)path);
71378 }
71379 }
71380
71381 @@ -305,7 +305,7 @@ static int __init do_name(void)
71382 int openflags = O_WRONLY|O_CREAT;
71383 if (ml != 1)
71384 openflags |= O_TRUNC;
71385 - wfd = sys_open(collected, openflags, mode);
71386 + wfd = sys_open((char __force_user *)collected, openflags, mode);
71387
71388 if (wfd >= 0) {
71389 sys_fchown(wfd, uid, gid);
71390 @@ -317,17 +317,17 @@ static int __init do_name(void)
71391 }
71392 }
71393 } else if (S_ISDIR(mode)) {
71394 - sys_mkdir(collected, mode);
71395 - sys_chown(collected, uid, gid);
71396 - sys_chmod(collected, mode);
71397 + sys_mkdir((char __force_user *)collected, mode);
71398 + sys_chown((char __force_user *)collected, uid, gid);
71399 + sys_chmod((char __force_user *)collected, mode);
71400 dir_add(collected, mtime);
71401 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
71402 S_ISFIFO(mode) || S_ISSOCK(mode)) {
71403 if (maybe_link() == 0) {
71404 - sys_mknod(collected, mode, rdev);
71405 - sys_chown(collected, uid, gid);
71406 - sys_chmod(collected, mode);
71407 - do_utime(collected, mtime);
71408 + sys_mknod((char __force_user *)collected, mode, rdev);
71409 + sys_chown((char __force_user *)collected, uid, gid);
71410 + sys_chmod((char __force_user *)collected, mode);
71411 + do_utime((char __force_user *)collected, mtime);
71412 }
71413 }
71414 return 0;
71415 @@ -336,15 +336,15 @@ static int __init do_name(void)
71416 static int __init do_copy(void)
71417 {
71418 if (count >= body_len) {
71419 - sys_write(wfd, victim, body_len);
71420 + sys_write(wfd, (char __force_user *)victim, body_len);
71421 sys_close(wfd);
71422 - do_utime(vcollected, mtime);
71423 + do_utime((char __force_user *)vcollected, mtime);
71424 kfree(vcollected);
71425 eat(body_len);
71426 state = SkipIt;
71427 return 0;
71428 } else {
71429 - sys_write(wfd, victim, count);
71430 + sys_write(wfd, (char __force_user *)victim, count);
71431 body_len -= count;
71432 eat(count);
71433 return 1;
71434 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
71435 {
71436 collected[N_ALIGN(name_len) + body_len] = '\0';
71437 clean_path(collected, 0);
71438 - sys_symlink(collected + N_ALIGN(name_len), collected);
71439 - sys_lchown(collected, uid, gid);
71440 - do_utime(collected, mtime);
71441 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
71442 + sys_lchown((char __force_user *)collected, uid, gid);
71443 + do_utime((char __force_user *)collected, mtime);
71444 state = SkipIt;
71445 next_state = Reset;
71446 return 0;
71447 diff --git a/init/main.c b/init/main.c
71448 index 1eb4bd5..fea5bbe 100644
71449 --- a/init/main.c
71450 +++ b/init/main.c
71451 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
71452 #ifdef CONFIG_TC
71453 extern void tc_init(void);
71454 #endif
71455 +extern void grsecurity_init(void);
71456
71457 enum system_states system_state __read_mostly;
71458 EXPORT_SYMBOL(system_state);
71459 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
71460
71461 __setup("reset_devices", set_reset_devices);
71462
71463 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
71464 +extern char pax_enter_kernel_user[];
71465 +extern char pax_exit_kernel_user[];
71466 +extern pgdval_t clone_pgd_mask;
71467 +#endif
71468 +
71469 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
71470 +static int __init setup_pax_nouderef(char *str)
71471 +{
71472 +#ifdef CONFIG_X86_32
71473 + unsigned int cpu;
71474 + struct desc_struct *gdt;
71475 +
71476 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
71477 + gdt = get_cpu_gdt_table(cpu);
71478 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
71479 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
71480 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
71481 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
71482 + }
71483 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
71484 +#else
71485 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
71486 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
71487 + clone_pgd_mask = ~(pgdval_t)0UL;
71488 +#endif
71489 +
71490 + return 0;
71491 +}
71492 +early_param("pax_nouderef", setup_pax_nouderef);
71493 +#endif
71494 +
71495 +#ifdef CONFIG_PAX_SOFTMODE
71496 +int pax_softmode;
71497 +
71498 +static int __init setup_pax_softmode(char *str)
71499 +{
71500 + get_option(&str, &pax_softmode);
71501 + return 1;
71502 +}
71503 +__setup("pax_softmode=", setup_pax_softmode);
71504 +#endif
71505 +
71506 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
71507 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
71508 static const char *panic_later, *panic_param;
71509 @@ -705,52 +749,53 @@ int initcall_debug;
71510 core_param(initcall_debug, initcall_debug, bool, 0644);
71511
71512 static char msgbuf[64];
71513 -static struct boot_trace_call call;
71514 -static struct boot_trace_ret ret;
71515 +static struct boot_trace_call trace_call;
71516 +static struct boot_trace_ret trace_ret;
71517
71518 int do_one_initcall(initcall_t fn)
71519 {
71520 int count = preempt_count();
71521 ktime_t calltime, delta, rettime;
71522 + const char *msg1 = "", *msg2 = "";
71523
71524 if (initcall_debug) {
71525 - call.caller = task_pid_nr(current);
71526 - printk("calling %pF @ %i\n", fn, call.caller);
71527 + trace_call.caller = task_pid_nr(current);
71528 + printk("calling %pF @ %i\n", fn, trace_call.caller);
71529 calltime = ktime_get();
71530 - trace_boot_call(&call, fn);
71531 + trace_boot_call(&trace_call, fn);
71532 enable_boot_trace();
71533 }
71534
71535 - ret.result = fn();
71536 + trace_ret.result = fn();
71537
71538 if (initcall_debug) {
71539 disable_boot_trace();
71540 rettime = ktime_get();
71541 delta = ktime_sub(rettime, calltime);
71542 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
71543 - trace_boot_ret(&ret, fn);
71544 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
71545 + trace_boot_ret(&trace_ret, fn);
71546 printk("initcall %pF returned %d after %Ld usecs\n", fn,
71547 - ret.result, ret.duration);
71548 + trace_ret.result, trace_ret.duration);
71549 }
71550
71551 msgbuf[0] = 0;
71552
71553 - if (ret.result && ret.result != -ENODEV && initcall_debug)
71554 - sprintf(msgbuf, "error code %d ", ret.result);
71555 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
71556 + sprintf(msgbuf, "error code %d ", trace_ret.result);
71557
71558 if (preempt_count() != count) {
71559 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
71560 + msg1 = " preemption imbalance";
71561 preempt_count() = count;
71562 }
71563 if (irqs_disabled()) {
71564 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
71565 + msg2 = " disabled interrupts";
71566 local_irq_enable();
71567 }
71568 - if (msgbuf[0]) {
71569 - printk("initcall %pF returned with %s\n", fn, msgbuf);
71570 + if (msgbuf[0] || *msg1 || *msg2) {
71571 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
71572 }
71573
71574 - return ret.result;
71575 + return trace_ret.result;
71576 }
71577
71578
71579 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
71580 if (!ramdisk_execute_command)
71581 ramdisk_execute_command = "/init";
71582
71583 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
71584 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
71585 ramdisk_execute_command = NULL;
71586 prepare_namespace();
71587 }
71588
71589 + grsecurity_init();
71590 +
71591 /*
71592 * Ok, we have completed the initial bootup, and
71593 * we're essentially up and running. Get rid of the
71594 diff --git a/init/noinitramfs.c b/init/noinitramfs.c
71595 index f4c1a3a..96c19bd 100644
71596 --- a/init/noinitramfs.c
71597 +++ b/init/noinitramfs.c
71598 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
71599 {
71600 int err;
71601
71602 - err = sys_mkdir("/dev", 0755);
71603 + err = sys_mkdir((const char __user *)"/dev", 0755);
71604 if (err < 0)
71605 goto out;
71606
71607 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
71608 if (err < 0)
71609 goto out;
71610
71611 - err = sys_mkdir("/root", 0700);
71612 + err = sys_mkdir((const char __user *)"/root", 0700);
71613 if (err < 0)
71614 goto out;
71615
71616 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
71617 index d01bc14..8df81db 100644
71618 --- a/ipc/mqueue.c
71619 +++ b/ipc/mqueue.c
71620 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
71621 mq_bytes = (mq_msg_tblsz +
71622 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
71623
71624 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
71625 spin_lock(&mq_lock);
71626 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
71627 u->mq_bytes + mq_bytes >
71628 diff --git a/ipc/msg.c b/ipc/msg.c
71629 index 779f762..4af9e36 100644
71630 --- a/ipc/msg.c
71631 +++ b/ipc/msg.c
71632 @@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
71633 return security_msg_queue_associate(msq, msgflg);
71634 }
71635
71636 +static struct ipc_ops msg_ops = {
71637 + .getnew = newque,
71638 + .associate = msg_security,
71639 + .more_checks = NULL
71640 +};
71641 +
71642 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
71643 {
71644 struct ipc_namespace *ns;
71645 - struct ipc_ops msg_ops;
71646 struct ipc_params msg_params;
71647
71648 ns = current->nsproxy->ipc_ns;
71649
71650 - msg_ops.getnew = newque;
71651 - msg_ops.associate = msg_security;
71652 - msg_ops.more_checks = NULL;
71653 -
71654 msg_params.key = key;
71655 msg_params.flg = msgflg;
71656
71657 diff --git a/ipc/sem.c b/ipc/sem.c
71658 index b781007..f738b04 100644
71659 --- a/ipc/sem.c
71660 +++ b/ipc/sem.c
71661 @@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
71662 return 0;
71663 }
71664
71665 +static struct ipc_ops sem_ops = {
71666 + .getnew = newary,
71667 + .associate = sem_security,
71668 + .more_checks = sem_more_checks
71669 +};
71670 +
71671 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
71672 {
71673 struct ipc_namespace *ns;
71674 - struct ipc_ops sem_ops;
71675 struct ipc_params sem_params;
71676
71677 ns = current->nsproxy->ipc_ns;
71678 @@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
71679 if (nsems < 0 || nsems > ns->sc_semmsl)
71680 return -EINVAL;
71681
71682 - sem_ops.getnew = newary;
71683 - sem_ops.associate = sem_security;
71684 - sem_ops.more_checks = sem_more_checks;
71685 -
71686 sem_params.key = key;
71687 sem_params.flg = semflg;
71688 sem_params.u.nsems = nsems;
71689 @@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
71690 ushort* sem_io = fast_sem_io;
71691 int nsems;
71692
71693 + pax_track_stack();
71694 +
71695 sma = sem_lock_check(ns, semid);
71696 if (IS_ERR(sma))
71697 return PTR_ERR(sma);
71698 @@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
71699 unsigned long jiffies_left = 0;
71700 struct ipc_namespace *ns;
71701
71702 + pax_track_stack();
71703 +
71704 ns = current->nsproxy->ipc_ns;
71705
71706 if (nsops < 1 || semid < 0)
71707 diff --git a/ipc/shm.c b/ipc/shm.c
71708 index d30732c..e4992cd 100644
71709 --- a/ipc/shm.c
71710 +++ b/ipc/shm.c
71711 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
71712 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
71713 #endif
71714
71715 +#ifdef CONFIG_GRKERNSEC
71716 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71717 + const time_t shm_createtime, const uid_t cuid,
71718 + const int shmid);
71719 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71720 + const time_t shm_createtime);
71721 +#endif
71722 +
71723 void shm_init_ns(struct ipc_namespace *ns)
71724 {
71725 ns->shm_ctlmax = SHMMAX;
71726 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
71727 shp->shm_lprid = 0;
71728 shp->shm_atim = shp->shm_dtim = 0;
71729 shp->shm_ctim = get_seconds();
71730 +#ifdef CONFIG_GRKERNSEC
71731 + {
71732 + struct timespec timeval;
71733 + do_posix_clock_monotonic_gettime(&timeval);
71734 +
71735 + shp->shm_createtime = timeval.tv_sec;
71736 + }
71737 +#endif
71738 shp->shm_segsz = size;
71739 shp->shm_nattch = 0;
71740 shp->shm_file = file;
71741 @@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
71742 return 0;
71743 }
71744
71745 +static struct ipc_ops shm_ops = {
71746 + .getnew = newseg,
71747 + .associate = shm_security,
71748 + .more_checks = shm_more_checks
71749 +};
71750 +
71751 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
71752 {
71753 struct ipc_namespace *ns;
71754 - struct ipc_ops shm_ops;
71755 struct ipc_params shm_params;
71756
71757 ns = current->nsproxy->ipc_ns;
71758
71759 - shm_ops.getnew = newseg;
71760 - shm_ops.associate = shm_security;
71761 - shm_ops.more_checks = shm_more_checks;
71762 -
71763 shm_params.key = key;
71764 shm_params.flg = shmflg;
71765 shm_params.u.size = size;
71766 @@ -857,6 +874,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71767 f_mode = FMODE_READ | FMODE_WRITE;
71768 }
71769 if (shmflg & SHM_EXEC) {
71770 +
71771 +#ifdef CONFIG_PAX_MPROTECT
71772 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
71773 + goto out;
71774 +#endif
71775 +
71776 prot |= PROT_EXEC;
71777 acc_mode |= S_IXUGO;
71778 }
71779 @@ -880,9 +903,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71780 if (err)
71781 goto out_unlock;
71782
71783 +#ifdef CONFIG_GRKERNSEC
71784 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
71785 + shp->shm_perm.cuid, shmid) ||
71786 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
71787 + err = -EACCES;
71788 + goto out_unlock;
71789 + }
71790 +#endif
71791 +
71792 path.dentry = dget(shp->shm_file->f_path.dentry);
71793 path.mnt = shp->shm_file->f_path.mnt;
71794 shp->shm_nattch++;
71795 +#ifdef CONFIG_GRKERNSEC
71796 + shp->shm_lapid = current->pid;
71797 +#endif
71798 size = i_size_read(path.dentry->d_inode);
71799 shm_unlock(shp);
71800
71801 diff --git a/kernel/acct.c b/kernel/acct.c
71802 index a6605ca..ca91111 100644
71803 --- a/kernel/acct.c
71804 +++ b/kernel/acct.c
71805 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
71806 */
71807 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
71808 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
71809 - file->f_op->write(file, (char *)&ac,
71810 + file->f_op->write(file, (char __force_user *)&ac,
71811 sizeof(acct_t), &file->f_pos);
71812 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
71813 set_fs(fs);
71814 diff --git a/kernel/audit.c b/kernel/audit.c
71815 index 5feed23..48415fd 100644
71816 --- a/kernel/audit.c
71817 +++ b/kernel/audit.c
71818 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
71819 3) suppressed due to audit_rate_limit
71820 4) suppressed due to audit_backlog_limit
71821 */
71822 -static atomic_t audit_lost = ATOMIC_INIT(0);
71823 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
71824
71825 /* The netlink socket. */
71826 static struct sock *audit_sock;
71827 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
71828 unsigned long now;
71829 int print;
71830
71831 - atomic_inc(&audit_lost);
71832 + atomic_inc_unchecked(&audit_lost);
71833
71834 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
71835
71836 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
71837 printk(KERN_WARNING
71838 "audit: audit_lost=%d audit_rate_limit=%d "
71839 "audit_backlog_limit=%d\n",
71840 - atomic_read(&audit_lost),
71841 + atomic_read_unchecked(&audit_lost),
71842 audit_rate_limit,
71843 audit_backlog_limit);
71844 audit_panic(message);
71845 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71846 status_set.pid = audit_pid;
71847 status_set.rate_limit = audit_rate_limit;
71848 status_set.backlog_limit = audit_backlog_limit;
71849 - status_set.lost = atomic_read(&audit_lost);
71850 + status_set.lost = atomic_read_unchecked(&audit_lost);
71851 status_set.backlog = skb_queue_len(&audit_skb_queue);
71852 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
71853 &status_set, sizeof(status_set));
71854 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71855 spin_unlock_irq(&tsk->sighand->siglock);
71856 }
71857 read_unlock(&tasklist_lock);
71858 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
71859 - &s, sizeof(s));
71860 +
71861 + if (!err)
71862 + audit_send_reply(NETLINK_CB(skb).pid, seq,
71863 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
71864 break;
71865 }
71866 case AUDIT_TTY_SET: {
71867 @@ -1262,12 +1264,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
71868 avail = audit_expand(ab,
71869 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
71870 if (!avail)
71871 - goto out;
71872 + goto out_va_end;
71873 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
71874 }
71875 - va_end(args2);
71876 if (len > 0)
71877 skb_put(skb, len);
71878 +out_va_end:
71879 + va_end(args2);
71880 out:
71881 return;
71882 }
71883 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
71884 index 267e484..ac41bc3 100644
71885 --- a/kernel/auditsc.c
71886 +++ b/kernel/auditsc.c
71887 @@ -1157,8 +1157,8 @@ static void audit_log_execve_info(struct audit_context *context,
71888 struct audit_buffer **ab,
71889 struct audit_aux_data_execve *axi)
71890 {
71891 - int i;
71892 - size_t len, len_sent = 0;
71893 + int i, len;
71894 + size_t len_sent = 0;
71895 const char __user *p;
71896 char *buf;
71897
71898 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
71899 }
71900
71901 /* global counter which is incremented every time something logs in */
71902 -static atomic_t session_id = ATOMIC_INIT(0);
71903 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
71904
71905 /**
71906 * audit_set_loginuid - set a task's audit_context loginuid
71907 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
71908 */
71909 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
71910 {
71911 - unsigned int sessionid = atomic_inc_return(&session_id);
71912 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
71913 struct audit_context *context = task->audit_context;
71914
71915 if (context && context->in_syscall) {
71916 diff --git a/kernel/capability.c b/kernel/capability.c
71917 index 8a944f5..db5001e 100644
71918 --- a/kernel/capability.c
71919 +++ b/kernel/capability.c
71920 @@ -305,10 +305,26 @@ int capable(int cap)
71921 BUG();
71922 }
71923
71924 - if (security_capable(cap) == 0) {
71925 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
71926 current->flags |= PF_SUPERPRIV;
71927 return 1;
71928 }
71929 return 0;
71930 }
71931 +
71932 +int capable_nolog(int cap)
71933 +{
71934 + if (unlikely(!cap_valid(cap))) {
71935 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
71936 + BUG();
71937 + }
71938 +
71939 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
71940 + current->flags |= PF_SUPERPRIV;
71941 + return 1;
71942 + }
71943 + return 0;
71944 +}
71945 +
71946 EXPORT_SYMBOL(capable);
71947 +EXPORT_SYMBOL(capable_nolog);
71948 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
71949 index 1fbcc74..7000012 100644
71950 --- a/kernel/cgroup.c
71951 +++ b/kernel/cgroup.c
71952 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
71953 struct hlist_head *hhead;
71954 struct cg_cgroup_link *link;
71955
71956 + pax_track_stack();
71957 +
71958 /* First see if we already have a cgroup group that matches
71959 * the desired set */
71960 read_lock(&css_set_lock);
71961 diff --git a/kernel/compat.c b/kernel/compat.c
71962 index 8bc5578..186e44a 100644
71963 --- a/kernel/compat.c
71964 +++ b/kernel/compat.c
71965 @@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
71966 mm_segment_t oldfs;
71967 long ret;
71968
71969 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
71970 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
71971 oldfs = get_fs();
71972 set_fs(KERNEL_DS);
71973 ret = hrtimer_nanosleep_restart(restart);
71974 @@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
71975 oldfs = get_fs();
71976 set_fs(KERNEL_DS);
71977 ret = hrtimer_nanosleep(&tu,
71978 - rmtp ? (struct timespec __user *)&rmt : NULL,
71979 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
71980 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
71981 set_fs(oldfs);
71982
71983 @@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
71984 mm_segment_t old_fs = get_fs();
71985
71986 set_fs(KERNEL_DS);
71987 - ret = sys_sigpending((old_sigset_t __user *) &s);
71988 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
71989 set_fs(old_fs);
71990 if (ret == 0)
71991 ret = put_user(s, set);
71992 @@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
71993 old_fs = get_fs();
71994 set_fs(KERNEL_DS);
71995 ret = sys_sigprocmask(how,
71996 - set ? (old_sigset_t __user *) &s : NULL,
71997 - oset ? (old_sigset_t __user *) &s : NULL);
71998 + set ? (old_sigset_t __force_user *) &s : NULL,
71999 + oset ? (old_sigset_t __force_user *) &s : NULL);
72000 set_fs(old_fs);
72001 if (ret == 0)
72002 if (oset)
72003 @@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
72004 mm_segment_t old_fs = get_fs();
72005
72006 set_fs(KERNEL_DS);
72007 - ret = sys_old_getrlimit(resource, &r);
72008 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
72009 set_fs(old_fs);
72010
72011 if (!ret) {
72012 @@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
72013 mm_segment_t old_fs = get_fs();
72014
72015 set_fs(KERNEL_DS);
72016 - ret = sys_getrusage(who, (struct rusage __user *) &r);
72017 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
72018 set_fs(old_fs);
72019
72020 if (ret)
72021 @@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
72022 set_fs (KERNEL_DS);
72023 ret = sys_wait4(pid,
72024 (stat_addr ?
72025 - (unsigned int __user *) &status : NULL),
72026 - options, (struct rusage __user *) &r);
72027 + (unsigned int __force_user *) &status : NULL),
72028 + options, (struct rusage __force_user *) &r);
72029 set_fs (old_fs);
72030
72031 if (ret > 0) {
72032 @@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
72033 memset(&info, 0, sizeof(info));
72034
72035 set_fs(KERNEL_DS);
72036 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
72037 - uru ? (struct rusage __user *)&ru : NULL);
72038 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
72039 + uru ? (struct rusage __force_user *)&ru : NULL);
72040 set_fs(old_fs);
72041
72042 if ((ret < 0) || (info.si_signo == 0))
72043 @@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
72044 oldfs = get_fs();
72045 set_fs(KERNEL_DS);
72046 err = sys_timer_settime(timer_id, flags,
72047 - (struct itimerspec __user *) &newts,
72048 - (struct itimerspec __user *) &oldts);
72049 + (struct itimerspec __force_user *) &newts,
72050 + (struct itimerspec __force_user *) &oldts);
72051 set_fs(oldfs);
72052 if (!err && old && put_compat_itimerspec(old, &oldts))
72053 return -EFAULT;
72054 @@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
72055 oldfs = get_fs();
72056 set_fs(KERNEL_DS);
72057 err = sys_timer_gettime(timer_id,
72058 - (struct itimerspec __user *) &ts);
72059 + (struct itimerspec __force_user *) &ts);
72060 set_fs(oldfs);
72061 if (!err && put_compat_itimerspec(setting, &ts))
72062 return -EFAULT;
72063 @@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
72064 oldfs = get_fs();
72065 set_fs(KERNEL_DS);
72066 err = sys_clock_settime(which_clock,
72067 - (struct timespec __user *) &ts);
72068 + (struct timespec __force_user *) &ts);
72069 set_fs(oldfs);
72070 return err;
72071 }
72072 @@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
72073 oldfs = get_fs();
72074 set_fs(KERNEL_DS);
72075 err = sys_clock_gettime(which_clock,
72076 - (struct timespec __user *) &ts);
72077 + (struct timespec __force_user *) &ts);
72078 set_fs(oldfs);
72079 if (!err && put_compat_timespec(&ts, tp))
72080 return -EFAULT;
72081 @@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
72082 oldfs = get_fs();
72083 set_fs(KERNEL_DS);
72084 err = sys_clock_getres(which_clock,
72085 - (struct timespec __user *) &ts);
72086 + (struct timespec __force_user *) &ts);
72087 set_fs(oldfs);
72088 if (!err && tp && put_compat_timespec(&ts, tp))
72089 return -EFAULT;
72090 @@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
72091 long err;
72092 mm_segment_t oldfs;
72093 struct timespec tu;
72094 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
72095 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
72096
72097 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
72098 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
72099 oldfs = get_fs();
72100 set_fs(KERNEL_DS);
72101 err = clock_nanosleep_restart(restart);
72102 @@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
72103 oldfs = get_fs();
72104 set_fs(KERNEL_DS);
72105 err = sys_clock_nanosleep(which_clock, flags,
72106 - (struct timespec __user *) &in,
72107 - (struct timespec __user *) &out);
72108 + (struct timespec __force_user *) &in,
72109 + (struct timespec __force_user *) &out);
72110 set_fs(oldfs);
72111
72112 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
72113 diff --git a/kernel/configs.c b/kernel/configs.c
72114 index abaee68..047facd 100644
72115 --- a/kernel/configs.c
72116 +++ b/kernel/configs.c
72117 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
72118 struct proc_dir_entry *entry;
72119
72120 /* create the current config file */
72121 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
72122 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
72123 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
72124 + &ikconfig_file_ops);
72125 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72126 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
72127 + &ikconfig_file_ops);
72128 +#endif
72129 +#else
72130 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
72131 &ikconfig_file_ops);
72132 +#endif
72133 +
72134 if (!entry)
72135 return -ENOMEM;
72136
72137 diff --git a/kernel/cpu.c b/kernel/cpu.c
72138 index 3f2f04f..4e53ded 100644
72139 --- a/kernel/cpu.c
72140 +++ b/kernel/cpu.c
72141 @@ -20,7 +20,7 @@
72142 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
72143 static DEFINE_MUTEX(cpu_add_remove_lock);
72144
72145 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
72146 +static RAW_NOTIFIER_HEAD(cpu_chain);
72147
72148 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
72149 * Should always be manipulated under cpu_add_remove_lock
72150 diff --git a/kernel/cred.c b/kernel/cred.c
72151 index 0b5b5fc..f7fe51a 100644
72152 --- a/kernel/cred.c
72153 +++ b/kernel/cred.c
72154 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
72155 */
72156 void __put_cred(struct cred *cred)
72157 {
72158 + pax_track_stack();
72159 +
72160 kdebug("__put_cred(%p{%d,%d})", cred,
72161 atomic_read(&cred->usage),
72162 read_cred_subscribers(cred));
72163 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
72164 {
72165 struct cred *cred;
72166
72167 + pax_track_stack();
72168 +
72169 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
72170 atomic_read(&tsk->cred->usage),
72171 read_cred_subscribers(tsk->cred));
72172 @@ -206,6 +210,15 @@ void exit_creds(struct task_struct *tsk)
72173 validate_creds(cred);
72174 put_cred(cred);
72175 }
72176 +
72177 +#ifdef CONFIG_GRKERNSEC_SETXID
72178 + cred = (struct cred *) tsk->delayed_cred;
72179 + if (cred) {
72180 + tsk->delayed_cred = NULL;
72181 + validate_creds(cred);
72182 + put_cred(cred);
72183 + }
72184 +#endif
72185 }
72186
72187 /**
72188 @@ -222,6 +235,8 @@ const struct cred *get_task_cred(struct task_struct *task)
72189 {
72190 const struct cred *cred;
72191
72192 + pax_track_stack();
72193 +
72194 rcu_read_lock();
72195
72196 do {
72197 @@ -241,6 +256,8 @@ struct cred *cred_alloc_blank(void)
72198 {
72199 struct cred *new;
72200
72201 + pax_track_stack();
72202 +
72203 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
72204 if (!new)
72205 return NULL;
72206 @@ -289,6 +306,8 @@ struct cred *prepare_creds(void)
72207 const struct cred *old;
72208 struct cred *new;
72209
72210 + pax_track_stack();
72211 +
72212 validate_process_creds();
72213
72214 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
72215 @@ -335,6 +354,8 @@ struct cred *prepare_exec_creds(void)
72216 struct thread_group_cred *tgcred = NULL;
72217 struct cred *new;
72218
72219 + pax_track_stack();
72220 +
72221 #ifdef CONFIG_KEYS
72222 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
72223 if (!tgcred)
72224 @@ -441,6 +462,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
72225 struct cred *new;
72226 int ret;
72227
72228 + pax_track_stack();
72229 +
72230 mutex_init(&p->cred_guard_mutex);
72231
72232 if (
72233 @@ -523,11 +546,13 @@ error_put:
72234 * Always returns 0 thus allowing this function to be tail-called at the end
72235 * of, say, sys_setgid().
72236 */
72237 -int commit_creds(struct cred *new)
72238 +static int __commit_creds(struct cred *new)
72239 {
72240 struct task_struct *task = current;
72241 const struct cred *old = task->real_cred;
72242
72243 + pax_track_stack();
72244 +
72245 kdebug("commit_creds(%p{%d,%d})", new,
72246 atomic_read(&new->usage),
72247 read_cred_subscribers(new));
72248 @@ -544,6 +569,8 @@ int commit_creds(struct cred *new)
72249
72250 get_cred(new); /* we will require a ref for the subj creds too */
72251
72252 + gr_set_role_label(task, new->uid, new->gid);
72253 +
72254 /* dumpability changes */
72255 if (old->euid != new->euid ||
72256 old->egid != new->egid ||
72257 @@ -563,10 +590,8 @@ int commit_creds(struct cred *new)
72258 key_fsgid_changed(task);
72259
72260 /* do it
72261 - * - What if a process setreuid()'s and this brings the
72262 - * new uid over his NPROC rlimit? We can check this now
72263 - * cheaply with the new uid cache, so if it matters
72264 - * we should be checking for it. -DaveM
72265 + * RLIMIT_NPROC limits on user->processes have already been checked
72266 + * in set_user().
72267 */
72268 alter_cred_subscribers(new, 2);
72269 if (new->user != old->user)
72270 @@ -595,8 +620,96 @@ int commit_creds(struct cred *new)
72271 put_cred(old);
72272 return 0;
72273 }
72274 +
72275 +#ifdef CONFIG_GRKERNSEC_SETXID
72276 +extern int set_user(struct cred *new);
72277 +
72278 +void gr_delayed_cred_worker(void)
72279 +{
72280 + const struct cred *new = current->delayed_cred;
72281 + struct cred *ncred;
72282 +
72283 + current->delayed_cred = NULL;
72284 +
72285 + if (current_uid() && new != NULL) {
72286 + // from doing get_cred on it when queueing this
72287 + put_cred(new);
72288 + return;
72289 + } else if (new == NULL)
72290 + return;
72291 +
72292 + ncred = prepare_creds();
72293 + if (!ncred)
72294 + goto die;
72295 + // uids
72296 + ncred->uid = new->uid;
72297 + ncred->euid = new->euid;
72298 + ncred->suid = new->suid;
72299 + ncred->fsuid = new->fsuid;
72300 + // gids
72301 + ncred->gid = new->gid;
72302 + ncred->egid = new->egid;
72303 + ncred->sgid = new->sgid;
72304 + ncred->fsgid = new->fsgid;
72305 + // groups
72306 + if (set_groups(ncred, new->group_info) < 0) {
72307 + abort_creds(ncred);
72308 + goto die;
72309 + }
72310 + // caps
72311 + ncred->securebits = new->securebits;
72312 + ncred->cap_inheritable = new->cap_inheritable;
72313 + ncred->cap_permitted = new->cap_permitted;
72314 + ncred->cap_effective = new->cap_effective;
72315 + ncred->cap_bset = new->cap_bset;
72316 +
72317 + if (set_user(ncred)) {
72318 + abort_creds(ncred);
72319 + goto die;
72320 + }
72321 +
72322 + // from doing get_cred on it when queueing this
72323 + put_cred(new);
72324 +
72325 + __commit_creds(ncred);
72326 + return;
72327 +die:
72328 + // from doing get_cred on it when queueing this
72329 + put_cred(new);
72330 + do_group_exit(SIGKILL);
72331 +}
72332 +#endif
72333 +
72334 +int commit_creds(struct cred *new)
72335 +{
72336 +#ifdef CONFIG_GRKERNSEC_SETXID
72337 + struct task_struct *t;
72338 +
72339 + /* we won't get called with tasklist_lock held for writing
72340 + and interrupts disabled as the cred struct in that case is
72341 + init_cred
72342 + */
72343 + if (grsec_enable_setxid && !current_is_single_threaded() &&
72344 + !current_uid() && new->uid) {
72345 + rcu_read_lock();
72346 + read_lock(&tasklist_lock);
72347 + for (t = next_thread(current); t != current;
72348 + t = next_thread(t)) {
72349 + if (t->delayed_cred == NULL) {
72350 + t->delayed_cred = get_cred(new);
72351 + set_tsk_need_resched(t);
72352 + }
72353 + }
72354 + read_unlock(&tasklist_lock);
72355 + rcu_read_unlock();
72356 + }
72357 +#endif
72358 + return __commit_creds(new);
72359 +}
72360 +
72361 EXPORT_SYMBOL(commit_creds);
72362
72363 +
72364 /**
72365 * abort_creds - Discard a set of credentials and unlock the current task
72366 * @new: The credentials that were going to be applied
72367 @@ -606,6 +719,8 @@ EXPORT_SYMBOL(commit_creds);
72368 */
72369 void abort_creds(struct cred *new)
72370 {
72371 + pax_track_stack();
72372 +
72373 kdebug("abort_creds(%p{%d,%d})", new,
72374 atomic_read(&new->usage),
72375 read_cred_subscribers(new));
72376 @@ -629,6 +744,8 @@ const struct cred *override_creds(const struct cred *new)
72377 {
72378 const struct cred *old = current->cred;
72379
72380 + pax_track_stack();
72381 +
72382 kdebug("override_creds(%p{%d,%d})", new,
72383 atomic_read(&new->usage),
72384 read_cred_subscribers(new));
72385 @@ -658,6 +775,8 @@ void revert_creds(const struct cred *old)
72386 {
72387 const struct cred *override = current->cred;
72388
72389 + pax_track_stack();
72390 +
72391 kdebug("revert_creds(%p{%d,%d})", old,
72392 atomic_read(&old->usage),
72393 read_cred_subscribers(old));
72394 @@ -704,6 +823,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
72395 const struct cred *old;
72396 struct cred *new;
72397
72398 + pax_track_stack();
72399 +
72400 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
72401 if (!new)
72402 return NULL;
72403 @@ -758,6 +879,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
72404 */
72405 int set_security_override(struct cred *new, u32 secid)
72406 {
72407 + pax_track_stack();
72408 +
72409 return security_kernel_act_as(new, secid);
72410 }
72411 EXPORT_SYMBOL(set_security_override);
72412 @@ -777,6 +900,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
72413 u32 secid;
72414 int ret;
72415
72416 + pax_track_stack();
72417 +
72418 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
72419 if (ret < 0)
72420 return ret;
72421 diff --git a/kernel/exit.c b/kernel/exit.c
72422 index 0f8fae3..7916abf 100644
72423 --- a/kernel/exit.c
72424 +++ b/kernel/exit.c
72425 @@ -55,6 +55,10 @@
72426 #include <asm/pgtable.h>
72427 #include <asm/mmu_context.h>
72428
72429 +#ifdef CONFIG_GRKERNSEC
72430 +extern rwlock_t grsec_exec_file_lock;
72431 +#endif
72432 +
72433 static void exit_mm(struct task_struct * tsk);
72434
72435 static void __unhash_process(struct task_struct *p)
72436 @@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
72437 struct task_struct *leader;
72438 int zap_leader;
72439 repeat:
72440 +#ifdef CONFIG_NET
72441 + gr_del_task_from_ip_table(p);
72442 +#endif
72443 +
72444 tracehook_prepare_release_task(p);
72445 /* don't need to get the RCU readlock here - the process is dead and
72446 * can't be modifying its own credentials */
72447 @@ -397,7 +405,7 @@ int allow_signal(int sig)
72448 * know it'll be handled, so that they don't get converted to
72449 * SIGKILL or just silently dropped.
72450 */
72451 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
72452 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
72453 recalc_sigpending();
72454 spin_unlock_irq(&current->sighand->siglock);
72455 return 0;
72456 @@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
72457 vsnprintf(current->comm, sizeof(current->comm), name, args);
72458 va_end(args);
72459
72460 +#ifdef CONFIG_GRKERNSEC
72461 + write_lock(&grsec_exec_file_lock);
72462 + if (current->exec_file) {
72463 + fput(current->exec_file);
72464 + current->exec_file = NULL;
72465 + }
72466 + write_unlock(&grsec_exec_file_lock);
72467 +#endif
72468 +
72469 + gr_set_kernel_label(current);
72470 +
72471 /*
72472 * If we were started as result of loading a module, close all of the
72473 * user space pages. We don't need them, and if we didn't close them
72474 @@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
72475 struct task_struct *tsk = current;
72476 int group_dead;
72477
72478 - profile_task_exit(tsk);
72479 -
72480 - WARN_ON(atomic_read(&tsk->fs_excl));
72481 -
72482 + /*
72483 + * Check this first since set_fs() below depends on
72484 + * current_thread_info(), which we better not access when we're in
72485 + * interrupt context. Other than that, we want to do the set_fs()
72486 + * as early as possible.
72487 + */
72488 if (unlikely(in_interrupt()))
72489 panic("Aiee, killing interrupt handler!");
72490 - if (unlikely(!tsk->pid))
72491 - panic("Attempted to kill the idle task!");
72492
72493 /*
72494 - * If do_exit is called because this processes oopsed, it's possible
72495 + * If do_exit is called because this processes Oops'ed, it's possible
72496 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
72497 * continuing. Amongst other possible reasons, this is to prevent
72498 * mm_release()->clear_child_tid() from writing to a user-controlled
72499 @@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
72500 */
72501 set_fs(USER_DS);
72502
72503 + profile_task_exit(tsk);
72504 +
72505 + WARN_ON(atomic_read(&tsk->fs_excl));
72506 +
72507 + if (unlikely(!tsk->pid))
72508 + panic("Attempted to kill the idle task!");
72509 +
72510 tracehook_report_exit(&code);
72511
72512 validate_creds_for_do_exit(tsk);
72513 @@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
72514 tsk->exit_code = code;
72515 taskstats_exit(tsk, group_dead);
72516
72517 + gr_acl_handle_psacct(tsk, code);
72518 + gr_acl_handle_exit();
72519 +
72520 exit_mm(tsk);
72521
72522 if (group_dead)
72523 @@ -1020,7 +1049,7 @@ NORET_TYPE void do_exit(long code)
72524 tsk->flags |= PF_EXITPIDONE;
72525
72526 if (tsk->io_context)
72527 - exit_io_context();
72528 + exit_io_context(tsk);
72529
72530 if (tsk->splice_pipe)
72531 __free_pipe_info(tsk->splice_pipe);
72532 @@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
72533
72534 if (unlikely(wo->wo_flags & WNOWAIT)) {
72535 int exit_code = p->exit_code;
72536 - int why, status;
72537 + int why;
72538
72539 get_task_struct(p);
72540 read_unlock(&tasklist_lock);
72541 diff --git a/kernel/fork.c b/kernel/fork.c
72542 index 4bde56f..8976a8f 100644
72543 --- a/kernel/fork.c
72544 +++ b/kernel/fork.c
72545 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
72546 *stackend = STACK_END_MAGIC; /* for overflow detection */
72547
72548 #ifdef CONFIG_CC_STACKPROTECTOR
72549 - tsk->stack_canary = get_random_int();
72550 + tsk->stack_canary = pax_get_random_long();
72551 #endif
72552
72553 /* One for us, one for whoever does the "release_task()" (usually parent) */
72554 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72555 mm->locked_vm = 0;
72556 mm->mmap = NULL;
72557 mm->mmap_cache = NULL;
72558 - mm->free_area_cache = oldmm->mmap_base;
72559 - mm->cached_hole_size = ~0UL;
72560 + mm->free_area_cache = oldmm->free_area_cache;
72561 + mm->cached_hole_size = oldmm->cached_hole_size;
72562 mm->map_count = 0;
72563 cpumask_clear(mm_cpumask(mm));
72564 mm->mm_rb = RB_ROOT;
72565 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72566 tmp->vm_flags &= ~VM_LOCKED;
72567 tmp->vm_mm = mm;
72568 tmp->vm_next = tmp->vm_prev = NULL;
72569 + tmp->vm_mirror = NULL;
72570 anon_vma_link(tmp);
72571 file = tmp->vm_file;
72572 if (file) {
72573 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72574 if (retval)
72575 goto out;
72576 }
72577 +
72578 +#ifdef CONFIG_PAX_SEGMEXEC
72579 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
72580 + struct vm_area_struct *mpnt_m;
72581 +
72582 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
72583 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
72584 +
72585 + if (!mpnt->vm_mirror)
72586 + continue;
72587 +
72588 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
72589 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
72590 + mpnt->vm_mirror = mpnt_m;
72591 + } else {
72592 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
72593 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
72594 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
72595 + mpnt->vm_mirror->vm_mirror = mpnt;
72596 + }
72597 + }
72598 + BUG_ON(mpnt_m);
72599 + }
72600 +#endif
72601 +
72602 /* a new mm has just been created */
72603 arch_dup_mmap(oldmm, mm);
72604 retval = 0;
72605 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
72606 write_unlock(&fs->lock);
72607 return -EAGAIN;
72608 }
72609 - fs->users++;
72610 + atomic_inc(&fs->users);
72611 write_unlock(&fs->lock);
72612 return 0;
72613 }
72614 tsk->fs = copy_fs_struct(fs);
72615 if (!tsk->fs)
72616 return -ENOMEM;
72617 + gr_set_chroot_entries(tsk, &tsk->fs->root);
72618 return 0;
72619 }
72620
72621 @@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
72622 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
72623 #endif
72624 retval = -EAGAIN;
72625 +
72626 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
72627 +
72628 if (atomic_read(&p->real_cred->user->processes) >=
72629 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
72630 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
72631 - p->real_cred->user != INIT_USER)
72632 + if (p->real_cred->user != INIT_USER &&
72633 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
72634 goto bad_fork_free;
72635 }
72636 + current->flags &= ~PF_NPROC_EXCEEDED;
72637
72638 retval = copy_creds(p, clone_flags);
72639 if (retval < 0)
72640 @@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
72641 goto bad_fork_free_pid;
72642 }
72643
72644 + gr_copy_label(p);
72645 +
72646 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
72647 /*
72648 * Clear TID on mm_release()?
72649 @@ -1299,7 +1332,8 @@ bad_fork_free_pid:
72650 if (pid != &init_struct_pid)
72651 free_pid(pid);
72652 bad_fork_cleanup_io:
72653 - put_io_context(p->io_context);
72654 + if (p->io_context)
72655 + exit_io_context(p);
72656 bad_fork_cleanup_namespaces:
72657 exit_task_namespaces(p);
72658 bad_fork_cleanup_mm:
72659 @@ -1333,6 +1367,8 @@ bad_fork_cleanup_count:
72660 bad_fork_free:
72661 free_task(p);
72662 fork_out:
72663 + gr_log_forkfail(retval);
72664 +
72665 return ERR_PTR(retval);
72666 }
72667
72668 @@ -1426,6 +1462,8 @@ long do_fork(unsigned long clone_flags,
72669 if (clone_flags & CLONE_PARENT_SETTID)
72670 put_user(nr, parent_tidptr);
72671
72672 + gr_handle_brute_check();
72673 +
72674 if (clone_flags & CLONE_VFORK) {
72675 p->vfork_done = &vfork;
72676 init_completion(&vfork);
72677 @@ -1558,7 +1596,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
72678 return 0;
72679
72680 /* don't need lock here; in the worst case we'll do useless copy */
72681 - if (fs->users == 1)
72682 + if (atomic_read(&fs->users) == 1)
72683 return 0;
72684
72685 *new_fsp = copy_fs_struct(fs);
72686 @@ -1681,7 +1719,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
72687 fs = current->fs;
72688 write_lock(&fs->lock);
72689 current->fs = new_fs;
72690 - if (--fs->users)
72691 + gr_set_chroot_entries(current, &current->fs->root);
72692 + if (atomic_dec_return(&fs->users))
72693 new_fs = NULL;
72694 else
72695 new_fs = fs;
72696 diff --git a/kernel/futex.c b/kernel/futex.c
72697 index fb98c9f..333faec 100644
72698 --- a/kernel/futex.c
72699 +++ b/kernel/futex.c
72700 @@ -54,6 +54,7 @@
72701 #include <linux/mount.h>
72702 #include <linux/pagemap.h>
72703 #include <linux/syscalls.h>
72704 +#include <linux/ptrace.h>
72705 #include <linux/signal.h>
72706 #include <linux/module.h>
72707 #include <linux/magic.h>
72708 @@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
72709 struct page *page;
72710 int err, ro = 0;
72711
72712 +#ifdef CONFIG_PAX_SEGMEXEC
72713 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
72714 + return -EFAULT;
72715 +#endif
72716 +
72717 /*
72718 * The futex address must be "naturally" aligned.
72719 */
72720 @@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
72721 struct futex_q q;
72722 int ret;
72723
72724 + pax_track_stack();
72725 +
72726 if (!bitset)
72727 return -EINVAL;
72728
72729 @@ -1871,7 +1879,7 @@ retry:
72730
72731 restart = &current_thread_info()->restart_block;
72732 restart->fn = futex_wait_restart;
72733 - restart->futex.uaddr = (u32 *)uaddr;
72734 + restart->futex.uaddr = uaddr;
72735 restart->futex.val = val;
72736 restart->futex.time = abs_time->tv64;
72737 restart->futex.bitset = bitset;
72738 @@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
72739 struct futex_q q;
72740 int res, ret;
72741
72742 + pax_track_stack();
72743 +
72744 if (!bitset)
72745 return -EINVAL;
72746
72747 @@ -2423,6 +2433,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
72748 if (!p)
72749 goto err_unlock;
72750 ret = -EPERM;
72751 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72752 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
72753 + goto err_unlock;
72754 +#endif
72755 pcred = __task_cred(p);
72756 if (cred->euid != pcred->euid &&
72757 cred->euid != pcred->uid &&
72758 @@ -2489,7 +2503,7 @@ retry:
72759 */
72760 static inline int fetch_robust_entry(struct robust_list __user **entry,
72761 struct robust_list __user * __user *head,
72762 - int *pi)
72763 + unsigned int *pi)
72764 {
72765 unsigned long uentry;
72766
72767 @@ -2670,6 +2684,7 @@ static int __init futex_init(void)
72768 {
72769 u32 curval;
72770 int i;
72771 + mm_segment_t oldfs;
72772
72773 /*
72774 * This will fail and we want it. Some arch implementations do
72775 @@ -2681,7 +2696,10 @@ static int __init futex_init(void)
72776 * implementation, the non functional ones will return
72777 * -ENOSYS.
72778 */
72779 + oldfs = get_fs();
72780 + set_fs(USER_DS);
72781 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
72782 + set_fs(oldfs);
72783 if (curval == -EFAULT)
72784 futex_cmpxchg_enabled = 1;
72785
72786 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
72787 index 2357165..eb25501 100644
72788 --- a/kernel/futex_compat.c
72789 +++ b/kernel/futex_compat.c
72790 @@ -10,6 +10,7 @@
72791 #include <linux/compat.h>
72792 #include <linux/nsproxy.h>
72793 #include <linux/futex.h>
72794 +#include <linux/ptrace.h>
72795
72796 #include <asm/uaccess.h>
72797
72798 @@ -135,7 +136,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72799 {
72800 struct compat_robust_list_head __user *head;
72801 unsigned long ret;
72802 - const struct cred *cred = current_cred(), *pcred;
72803 + const struct cred *cred = current_cred();
72804 + const struct cred *pcred;
72805
72806 if (!futex_cmpxchg_enabled)
72807 return -ENOSYS;
72808 @@ -151,6 +153,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72809 if (!p)
72810 goto err_unlock;
72811 ret = -EPERM;
72812 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72813 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
72814 + goto err_unlock;
72815 +#endif
72816 pcred = __task_cred(p);
72817 if (cred->euid != pcred->euid &&
72818 cred->euid != pcred->uid &&
72819 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
72820 index 9b22d03..6295b62 100644
72821 --- a/kernel/gcov/base.c
72822 +++ b/kernel/gcov/base.c
72823 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
72824 }
72825
72826 #ifdef CONFIG_MODULES
72827 -static inline int within(void *addr, void *start, unsigned long size)
72828 -{
72829 - return ((addr >= start) && (addr < start + size));
72830 -}
72831 -
72832 /* Update list and generate events when modules are unloaded. */
72833 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72834 void *data)
72835 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72836 prev = NULL;
72837 /* Remove entries located in module from linked list. */
72838 for (info = gcov_info_head; info; info = info->next) {
72839 - if (within(info, mod->module_core, mod->core_size)) {
72840 + if (within_module_core_rw((unsigned long)info, mod)) {
72841 if (prev)
72842 prev->next = info->next;
72843 else
72844 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
72845 index a6e9d00..a0da4f9 100644
72846 --- a/kernel/hrtimer.c
72847 +++ b/kernel/hrtimer.c
72848 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
72849 local_irq_restore(flags);
72850 }
72851
72852 -static void run_hrtimer_softirq(struct softirq_action *h)
72853 +static void run_hrtimer_softirq(void)
72854 {
72855 hrtimer_peek_ahead_timers();
72856 }
72857 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
72858 index 8b6b8b6..6bc87df 100644
72859 --- a/kernel/kallsyms.c
72860 +++ b/kernel/kallsyms.c
72861 @@ -11,6 +11,9 @@
72862 * Changed the compression method from stem compression to "table lookup"
72863 * compression (see scripts/kallsyms.c for a more complete description)
72864 */
72865 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72866 +#define __INCLUDED_BY_HIDESYM 1
72867 +#endif
72868 #include <linux/kallsyms.h>
72869 #include <linux/module.h>
72870 #include <linux/init.h>
72871 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
72872
72873 static inline int is_kernel_inittext(unsigned long addr)
72874 {
72875 + if (system_state != SYSTEM_BOOTING)
72876 + return 0;
72877 +
72878 if (addr >= (unsigned long)_sinittext
72879 && addr <= (unsigned long)_einittext)
72880 return 1;
72881 return 0;
72882 }
72883
72884 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72885 +#ifdef CONFIG_MODULES
72886 +static inline int is_module_text(unsigned long addr)
72887 +{
72888 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
72889 + return 1;
72890 +
72891 + addr = ktla_ktva(addr);
72892 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
72893 +}
72894 +#else
72895 +static inline int is_module_text(unsigned long addr)
72896 +{
72897 + return 0;
72898 +}
72899 +#endif
72900 +#endif
72901 +
72902 static inline int is_kernel_text(unsigned long addr)
72903 {
72904 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
72905 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
72906
72907 static inline int is_kernel(unsigned long addr)
72908 {
72909 +
72910 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72911 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
72912 + return 1;
72913 +
72914 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
72915 +#else
72916 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
72917 +#endif
72918 +
72919 return 1;
72920 return in_gate_area_no_task(addr);
72921 }
72922
72923 static int is_ksym_addr(unsigned long addr)
72924 {
72925 +
72926 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72927 + if (is_module_text(addr))
72928 + return 0;
72929 +#endif
72930 +
72931 if (all_var)
72932 return is_kernel(addr);
72933
72934 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
72935
72936 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
72937 {
72938 - iter->name[0] = '\0';
72939 iter->nameoff = get_symbol_offset(new_pos);
72940 iter->pos = new_pos;
72941 }
72942 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
72943 {
72944 struct kallsym_iter *iter = m->private;
72945
72946 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72947 + if (current_uid())
72948 + return 0;
72949 +#endif
72950 +
72951 /* Some debugging symbols have no name. Ignore them. */
72952 if (!iter->name[0])
72953 return 0;
72954 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
72955 struct kallsym_iter *iter;
72956 int ret;
72957
72958 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
72959 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
72960 if (!iter)
72961 return -ENOMEM;
72962 reset_iter(iter, 0);
72963 diff --git a/kernel/kexec.c b/kernel/kexec.c
72964 index f336e21..9c1c20b 100644
72965 --- a/kernel/kexec.c
72966 +++ b/kernel/kexec.c
72967 @@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
72968 unsigned long flags)
72969 {
72970 struct compat_kexec_segment in;
72971 - struct kexec_segment out, __user *ksegments;
72972 + struct kexec_segment out;
72973 + struct kexec_segment __user *ksegments;
72974 unsigned long i, result;
72975
72976 /* Don't allow clients that don't understand the native
72977 diff --git a/kernel/kgdb.c b/kernel/kgdb.c
72978 index 53dae4b..9ba3743 100644
72979 --- a/kernel/kgdb.c
72980 +++ b/kernel/kgdb.c
72981 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
72982 /* Guard for recursive entry */
72983 static int exception_level;
72984
72985 -static struct kgdb_io *kgdb_io_ops;
72986 +static const struct kgdb_io *kgdb_io_ops;
72987 static DEFINE_SPINLOCK(kgdb_registration_lock);
72988
72989 /* kgdb console driver is loaded */
72990 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
72991 */
72992 static atomic_t passive_cpu_wait[NR_CPUS];
72993 static atomic_t cpu_in_kgdb[NR_CPUS];
72994 -atomic_t kgdb_setting_breakpoint;
72995 +atomic_unchecked_t kgdb_setting_breakpoint;
72996
72997 struct task_struct *kgdb_usethread;
72998 struct task_struct *kgdb_contthread;
72999 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
73000 sizeof(unsigned long)];
73001
73002 /* to keep track of the CPU which is doing the single stepping*/
73003 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
73004 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
73005
73006 /*
73007 * If you are debugging a problem where roundup (the collection of
73008 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
73009 return 0;
73010 if (kgdb_connected)
73011 return 1;
73012 - if (atomic_read(&kgdb_setting_breakpoint))
73013 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
73014 return 1;
73015 if (print_wait)
73016 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
73017 @@ -1426,8 +1426,8 @@ acquirelock:
73018 * instance of the exception handler wanted to come into the
73019 * debugger on a different CPU via a single step
73020 */
73021 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
73022 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
73023 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
73024 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
73025
73026 atomic_set(&kgdb_active, -1);
73027 touch_softlockup_watchdog();
73028 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
73029 *
73030 * Register it with the KGDB core.
73031 */
73032 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
73033 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
73034 {
73035 int err;
73036
73037 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
73038 *
73039 * Unregister it with the KGDB core.
73040 */
73041 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
73042 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
73043 {
73044 BUG_ON(kgdb_connected);
73045
73046 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
73047 */
73048 void kgdb_breakpoint(void)
73049 {
73050 - atomic_set(&kgdb_setting_breakpoint, 1);
73051 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
73052 wmb(); /* Sync point before breakpoint */
73053 arch_kgdb_breakpoint();
73054 wmb(); /* Sync point after breakpoint */
73055 - atomic_set(&kgdb_setting_breakpoint, 0);
73056 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
73057 }
73058 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
73059
73060 diff --git a/kernel/kmod.c b/kernel/kmod.c
73061 index a061472..40884b6 100644
73062 --- a/kernel/kmod.c
73063 +++ b/kernel/kmod.c
73064 @@ -68,13 +68,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
73065 * If module auto-loading support is disabled then this function
73066 * becomes a no-operation.
73067 */
73068 -int __request_module(bool wait, const char *fmt, ...)
73069 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
73070 {
73071 - va_list args;
73072 char module_name[MODULE_NAME_LEN];
73073 unsigned int max_modprobes;
73074 int ret;
73075 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
73076 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
73077 static char *envp[] = { "HOME=/",
73078 "TERM=linux",
73079 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
73080 @@ -87,12 +86,24 @@ int __request_module(bool wait, const char *fmt, ...)
73081 if (ret)
73082 return ret;
73083
73084 - va_start(args, fmt);
73085 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
73086 - va_end(args);
73087 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
73088 if (ret >= MODULE_NAME_LEN)
73089 return -ENAMETOOLONG;
73090
73091 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73092 + if (!current_uid()) {
73093 + /* hack to workaround consolekit/udisks stupidity */
73094 + read_lock(&tasklist_lock);
73095 + if (!strcmp(current->comm, "mount") &&
73096 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
73097 + read_unlock(&tasklist_lock);
73098 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
73099 + return -EPERM;
73100 + }
73101 + read_unlock(&tasklist_lock);
73102 + }
73103 +#endif
73104 +
73105 /* If modprobe needs a service that is in a module, we get a recursive
73106 * loop. Limit the number of running kmod threads to max_threads/2 or
73107 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
73108 @@ -126,6 +137,48 @@ int __request_module(bool wait, const char *fmt, ...)
73109 atomic_dec(&kmod_concurrent);
73110 return ret;
73111 }
73112 +
73113 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
73114 +{
73115 + va_list args;
73116 + int ret;
73117 +
73118 + va_start(args, fmt);
73119 + ret = ____request_module(wait, module_param, fmt, args);
73120 + va_end(args);
73121 +
73122 + return ret;
73123 +}
73124 +
73125 +int __request_module(bool wait, const char *fmt, ...)
73126 +{
73127 + va_list args;
73128 + int ret;
73129 +
73130 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73131 + if (current_uid()) {
73132 + char module_param[MODULE_NAME_LEN];
73133 +
73134 + memset(module_param, 0, sizeof(module_param));
73135 +
73136 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
73137 +
73138 + va_start(args, fmt);
73139 + ret = ____request_module(wait, module_param, fmt, args);
73140 + va_end(args);
73141 +
73142 + return ret;
73143 + }
73144 +#endif
73145 +
73146 + va_start(args, fmt);
73147 + ret = ____request_module(wait, NULL, fmt, args);
73148 + va_end(args);
73149 +
73150 + return ret;
73151 +}
73152 +
73153 +
73154 EXPORT_SYMBOL(__request_module);
73155 #endif /* CONFIG_MODULES */
73156
73157 @@ -231,7 +284,7 @@ static int wait_for_helper(void *data)
73158 *
73159 * Thus the __user pointer cast is valid here.
73160 */
73161 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
73162 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
73163
73164 /*
73165 * If ret is 0, either ____call_usermodehelper failed and the
73166 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
73167 index 176d825..77fa8ea 100644
73168 --- a/kernel/kprobes.c
73169 +++ b/kernel/kprobes.c
73170 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
73171 * kernel image and loaded module images reside. This is required
73172 * so x86_64 can correctly handle the %rip-relative fixups.
73173 */
73174 - kip->insns = module_alloc(PAGE_SIZE);
73175 + kip->insns = module_alloc_exec(PAGE_SIZE);
73176 if (!kip->insns) {
73177 kfree(kip);
73178 return NULL;
73179 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
73180 */
73181 if (!list_is_singular(&kprobe_insn_pages)) {
73182 list_del(&kip->list);
73183 - module_free(NULL, kip->insns);
73184 + module_free_exec(NULL, kip->insns);
73185 kfree(kip);
73186 }
73187 return 1;
73188 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
73189 {
73190 int i, err = 0;
73191 unsigned long offset = 0, size = 0;
73192 - char *modname, namebuf[128];
73193 + char *modname, namebuf[KSYM_NAME_LEN];
73194 const char *symbol_name;
73195 void *addr;
73196 struct kprobe_blackpoint *kb;
73197 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
73198 const char *sym = NULL;
73199 unsigned int i = *(loff_t *) v;
73200 unsigned long offset = 0;
73201 - char *modname, namebuf[128];
73202 + char *modname, namebuf[KSYM_NAME_LEN];
73203
73204 head = &kprobe_table[i];
73205 preempt_disable();
73206 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
73207 index d86fe89..d12fc66 100644
73208 --- a/kernel/lockdep.c
73209 +++ b/kernel/lockdep.c
73210 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
73211 /*
73212 * Various lockdep statistics:
73213 */
73214 -atomic_t chain_lookup_hits;
73215 -atomic_t chain_lookup_misses;
73216 -atomic_t hardirqs_on_events;
73217 -atomic_t hardirqs_off_events;
73218 -atomic_t redundant_hardirqs_on;
73219 -atomic_t redundant_hardirqs_off;
73220 -atomic_t softirqs_on_events;
73221 -atomic_t softirqs_off_events;
73222 -atomic_t redundant_softirqs_on;
73223 -atomic_t redundant_softirqs_off;
73224 -atomic_t nr_unused_locks;
73225 -atomic_t nr_cyclic_checks;
73226 -atomic_t nr_find_usage_forwards_checks;
73227 -atomic_t nr_find_usage_backwards_checks;
73228 +atomic_unchecked_t chain_lookup_hits;
73229 +atomic_unchecked_t chain_lookup_misses;
73230 +atomic_unchecked_t hardirqs_on_events;
73231 +atomic_unchecked_t hardirqs_off_events;
73232 +atomic_unchecked_t redundant_hardirqs_on;
73233 +atomic_unchecked_t redundant_hardirqs_off;
73234 +atomic_unchecked_t softirqs_on_events;
73235 +atomic_unchecked_t softirqs_off_events;
73236 +atomic_unchecked_t redundant_softirqs_on;
73237 +atomic_unchecked_t redundant_softirqs_off;
73238 +atomic_unchecked_t nr_unused_locks;
73239 +atomic_unchecked_t nr_cyclic_checks;
73240 +atomic_unchecked_t nr_find_usage_forwards_checks;
73241 +atomic_unchecked_t nr_find_usage_backwards_checks;
73242 #endif
73243
73244 /*
73245 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
73246 int i;
73247 #endif
73248
73249 +#ifdef CONFIG_PAX_KERNEXEC
73250 + start = ktla_ktva(start);
73251 +#endif
73252 +
73253 /*
73254 * static variable?
73255 */
73256 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
73257 */
73258 for_each_possible_cpu(i) {
73259 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
73260 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
73261 - + per_cpu_offset(i);
73262 + end = start + PERCPU_ENOUGH_ROOM;
73263
73264 if ((addr >= start) && (addr < end))
73265 return 1;
73266 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
73267 if (!static_obj(lock->key)) {
73268 debug_locks_off();
73269 printk("INFO: trying to register non-static key.\n");
73270 + printk("lock:%pS key:%pS.\n", lock, lock->key);
73271 printk("the code is fine but needs lockdep annotation.\n");
73272 printk("turning off the locking correctness validator.\n");
73273 dump_stack();
73274 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
73275 if (!class)
73276 return 0;
73277 }
73278 - debug_atomic_inc((atomic_t *)&class->ops);
73279 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
73280 if (very_verbose(class)) {
73281 printk("\nacquire class [%p] %s", class->key, class->name);
73282 if (class->name_version > 1)
73283 diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
73284 index a2ee95a..092f0f2 100644
73285 --- a/kernel/lockdep_internals.h
73286 +++ b/kernel/lockdep_internals.h
73287 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
73288 /*
73289 * Various lockdep statistics:
73290 */
73291 -extern atomic_t chain_lookup_hits;
73292 -extern atomic_t chain_lookup_misses;
73293 -extern atomic_t hardirqs_on_events;
73294 -extern atomic_t hardirqs_off_events;
73295 -extern atomic_t redundant_hardirqs_on;
73296 -extern atomic_t redundant_hardirqs_off;
73297 -extern atomic_t softirqs_on_events;
73298 -extern atomic_t softirqs_off_events;
73299 -extern atomic_t redundant_softirqs_on;
73300 -extern atomic_t redundant_softirqs_off;
73301 -extern atomic_t nr_unused_locks;
73302 -extern atomic_t nr_cyclic_checks;
73303 -extern atomic_t nr_cyclic_check_recursions;
73304 -extern atomic_t nr_find_usage_forwards_checks;
73305 -extern atomic_t nr_find_usage_forwards_recursions;
73306 -extern atomic_t nr_find_usage_backwards_checks;
73307 -extern atomic_t nr_find_usage_backwards_recursions;
73308 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
73309 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
73310 -# define debug_atomic_read(ptr) atomic_read(ptr)
73311 +extern atomic_unchecked_t chain_lookup_hits;
73312 +extern atomic_unchecked_t chain_lookup_misses;
73313 +extern atomic_unchecked_t hardirqs_on_events;
73314 +extern atomic_unchecked_t hardirqs_off_events;
73315 +extern atomic_unchecked_t redundant_hardirqs_on;
73316 +extern atomic_unchecked_t redundant_hardirqs_off;
73317 +extern atomic_unchecked_t softirqs_on_events;
73318 +extern atomic_unchecked_t softirqs_off_events;
73319 +extern atomic_unchecked_t redundant_softirqs_on;
73320 +extern atomic_unchecked_t redundant_softirqs_off;
73321 +extern atomic_unchecked_t nr_unused_locks;
73322 +extern atomic_unchecked_t nr_cyclic_checks;
73323 +extern atomic_unchecked_t nr_cyclic_check_recursions;
73324 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
73325 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
73326 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
73327 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
73328 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
73329 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
73330 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
73331 #else
73332 # define debug_atomic_inc(ptr) do { } while (0)
73333 # define debug_atomic_dec(ptr) do { } while (0)
73334 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
73335 index d4aba4f..02a353f 100644
73336 --- a/kernel/lockdep_proc.c
73337 +++ b/kernel/lockdep_proc.c
73338 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
73339
73340 static void print_name(struct seq_file *m, struct lock_class *class)
73341 {
73342 - char str[128];
73343 + char str[KSYM_NAME_LEN];
73344 const char *name = class->name;
73345
73346 if (!name) {
73347 diff --git a/kernel/module.c b/kernel/module.c
73348 index 4b270e6..2226274 100644
73349 --- a/kernel/module.c
73350 +++ b/kernel/module.c
73351 @@ -55,6 +55,7 @@
73352 #include <linux/async.h>
73353 #include <linux/percpu.h>
73354 #include <linux/kmemleak.h>
73355 +#include <linux/grsecurity.h>
73356
73357 #define CREATE_TRACE_POINTS
73358 #include <trace/events/module.h>
73359 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
73360 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
73361
73362 /* Bounds of module allocation, for speeding __module_address */
73363 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
73364 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
73365 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
73366
73367 int register_module_notifier(struct notifier_block * nb)
73368 {
73369 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
73370 return true;
73371
73372 list_for_each_entry_rcu(mod, &modules, list) {
73373 - struct symsearch arr[] = {
73374 + struct symsearch modarr[] = {
73375 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
73376 NOT_GPL_ONLY, false },
73377 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
73378 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
73379 #endif
73380 };
73381
73382 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
73383 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
73384 return true;
73385 }
73386 return false;
73387 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
73388 void *ptr;
73389 int cpu;
73390
73391 - if (align > PAGE_SIZE) {
73392 + if (align-1 >= PAGE_SIZE) {
73393 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
73394 name, align, PAGE_SIZE);
73395 align = PAGE_SIZE;
73396 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
73397 * /sys/module/foo/sections stuff
73398 * J. Corbet <corbet@lwn.net>
73399 */
73400 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
73401 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
73402
73403 static inline bool sect_empty(const Elf_Shdr *sect)
73404 {
73405 @@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
73406 destroy_params(mod->kp, mod->num_kp);
73407
73408 /* This may be NULL, but that's OK */
73409 - module_free(mod, mod->module_init);
73410 + module_free(mod, mod->module_init_rw);
73411 + module_free_exec(mod, mod->module_init_rx);
73412 kfree(mod->args);
73413 if (mod->percpu)
73414 percpu_modfree(mod->percpu);
73415 @@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
73416 percpu_modfree(mod->refptr);
73417 #endif
73418 /* Free lock-classes: */
73419 - lockdep_free_key_range(mod->module_core, mod->core_size);
73420 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
73421 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
73422
73423 /* Finally, free the core (containing the module structure) */
73424 - module_free(mod, mod->module_core);
73425 + module_free_exec(mod, mod->module_core_rx);
73426 + module_free(mod, mod->module_core_rw);
73427
73428 #ifdef CONFIG_MPU
73429 update_protections(current->mm);
73430 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
73431 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
73432 int ret = 0;
73433 const struct kernel_symbol *ksym;
73434 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73435 + int is_fs_load = 0;
73436 + int register_filesystem_found = 0;
73437 + char *p;
73438 +
73439 + p = strstr(mod->args, "grsec_modharden_fs");
73440 +
73441 + if (p) {
73442 + char *endptr = p + strlen("grsec_modharden_fs");
73443 + /* copy \0 as well */
73444 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
73445 + is_fs_load = 1;
73446 + }
73447 +#endif
73448 +
73449
73450 for (i = 1; i < n; i++) {
73451 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73452 + const char *name = strtab + sym[i].st_name;
73453 +
73454 + /* it's a real shame this will never get ripped and copied
73455 + upstream! ;(
73456 + */
73457 + if (is_fs_load && !strcmp(name, "register_filesystem"))
73458 + register_filesystem_found = 1;
73459 +#endif
73460 switch (sym[i].st_shndx) {
73461 case SHN_COMMON:
73462 /* We compiled with -fno-common. These are not
73463 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
73464 strtab + sym[i].st_name, mod);
73465 /* Ok if resolved. */
73466 if (ksym) {
73467 + pax_open_kernel();
73468 sym[i].st_value = ksym->value;
73469 + pax_close_kernel();
73470 break;
73471 }
73472
73473 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
73474 secbase = (unsigned long)mod->percpu;
73475 else
73476 secbase = sechdrs[sym[i].st_shndx].sh_addr;
73477 + pax_open_kernel();
73478 sym[i].st_value += secbase;
73479 + pax_close_kernel();
73480 break;
73481 }
73482 }
73483
73484 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73485 + if (is_fs_load && !register_filesystem_found) {
73486 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
73487 + ret = -EPERM;
73488 + }
73489 +#endif
73490 +
73491 return ret;
73492 }
73493
73494 @@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
73495 || s->sh_entsize != ~0UL
73496 || strstarts(secstrings + s->sh_name, ".init"))
73497 continue;
73498 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
73499 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
73500 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
73501 + else
73502 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
73503 DEBUGP("\t%s\n", secstrings + s->sh_name);
73504 }
73505 - if (m == 0)
73506 - mod->core_text_size = mod->core_size;
73507 }
73508
73509 DEBUGP("Init section allocation order:\n");
73510 @@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
73511 || s->sh_entsize != ~0UL
73512 || !strstarts(secstrings + s->sh_name, ".init"))
73513 continue;
73514 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
73515 - | INIT_OFFSET_MASK);
73516 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
73517 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
73518 + else
73519 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
73520 + s->sh_entsize |= INIT_OFFSET_MASK;
73521 DEBUGP("\t%s\n", secstrings + s->sh_name);
73522 }
73523 - if (m == 0)
73524 - mod->init_text_size = mod->init_size;
73525 }
73526 }
73527
73528 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
73529
73530 /* As per nm */
73531 static char elf_type(const Elf_Sym *sym,
73532 - Elf_Shdr *sechdrs,
73533 - const char *secstrings,
73534 - struct module *mod)
73535 + const Elf_Shdr *sechdrs,
73536 + const char *secstrings)
73537 {
73538 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
73539 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
73540 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
73541
73542 /* Put symbol section at end of init part of module. */
73543 symsect->sh_flags |= SHF_ALLOC;
73544 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
73545 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
73546 symindex) | INIT_OFFSET_MASK;
73547 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
73548
73549 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
73550 }
73551
73552 /* Append room for core symbols at end of core part. */
73553 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
73554 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
73555 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
73556 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
73557
73558 /* Put string table section at end of init part of module. */
73559 strsect->sh_flags |= SHF_ALLOC;
73560 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
73561 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
73562 strindex) | INIT_OFFSET_MASK;
73563 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
73564
73565 /* Append room for core symbols' strings at end of core part. */
73566 - *pstroffs = mod->core_size;
73567 + *pstroffs = mod->core_size_rx;
73568 __set_bit(0, strmap);
73569 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
73570 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
73571
73572 return symoffs;
73573 }
73574 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
73575 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
73576 mod->strtab = (void *)sechdrs[strindex].sh_addr;
73577
73578 + pax_open_kernel();
73579 +
73580 /* Set types up while we still have access to sections. */
73581 for (i = 0; i < mod->num_symtab; i++)
73582 mod->symtab[i].st_info
73583 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
73584 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
73585
73586 - mod->core_symtab = dst = mod->module_core + symoffs;
73587 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
73588 src = mod->symtab;
73589 *dst = *src;
73590 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
73591 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
73592 }
73593 mod->core_num_syms = ndst;
73594
73595 - mod->core_strtab = s = mod->module_core + stroffs;
73596 + mod->core_strtab = s = mod->module_core_rx + stroffs;
73597 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
73598 if (test_bit(i, strmap))
73599 *++s = mod->strtab[i];
73600 +
73601 + pax_close_kernel();
73602 }
73603 #else
73604 static inline unsigned long layout_symtab(struct module *mod,
73605 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
73606 #endif
73607 }
73608
73609 -static void *module_alloc_update_bounds(unsigned long size)
73610 +static void *module_alloc_update_bounds_rw(unsigned long size)
73611 {
73612 void *ret = module_alloc(size);
73613
73614 if (ret) {
73615 /* Update module bounds. */
73616 - if ((unsigned long)ret < module_addr_min)
73617 - module_addr_min = (unsigned long)ret;
73618 - if ((unsigned long)ret + size > module_addr_max)
73619 - module_addr_max = (unsigned long)ret + size;
73620 + if ((unsigned long)ret < module_addr_min_rw)
73621 + module_addr_min_rw = (unsigned long)ret;
73622 + if ((unsigned long)ret + size > module_addr_max_rw)
73623 + module_addr_max_rw = (unsigned long)ret + size;
73624 + }
73625 + return ret;
73626 +}
73627 +
73628 +static void *module_alloc_update_bounds_rx(unsigned long size)
73629 +{
73630 + void *ret = module_alloc_exec(size);
73631 +
73632 + if (ret) {
73633 + /* Update module bounds. */
73634 + if ((unsigned long)ret < module_addr_min_rx)
73635 + module_addr_min_rx = (unsigned long)ret;
73636 + if ((unsigned long)ret + size > module_addr_max_rx)
73637 + module_addr_max_rx = (unsigned long)ret + size;
73638 }
73639 return ret;
73640 }
73641 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
73642 unsigned int i;
73643
73644 /* only scan the sections containing data */
73645 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
73646 - (unsigned long)mod->module_core,
73647 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
73648 + (unsigned long)mod->module_core_rw,
73649 sizeof(struct module), GFP_KERNEL);
73650
73651 for (i = 1; i < hdr->e_shnum; i++) {
73652 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
73653 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
73654 continue;
73655
73656 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
73657 - (unsigned long)mod->module_core,
73658 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
73659 + (unsigned long)mod->module_core_rw,
73660 sechdrs[i].sh_size, GFP_KERNEL);
73661 }
73662 }
73663 @@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
73664 Elf_Ehdr *hdr;
73665 Elf_Shdr *sechdrs;
73666 char *secstrings, *args, *modmagic, *strtab = NULL;
73667 - char *staging;
73668 + char *staging, *license;
73669 unsigned int i;
73670 unsigned int symindex = 0;
73671 unsigned int strindex = 0;
73672 @@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
73673 goto free_hdr;
73674 }
73675
73676 + license = get_modinfo(sechdrs, infoindex, "license");
73677 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
73678 + if (!license || !license_is_gpl_compatible(license)) {
73679 + err -ENOEXEC;
73680 + goto free_hdr;
73681 + }
73682 +#endif
73683 +
73684 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
73685 /* This is allowed: modprobe --force will invalidate it. */
73686 if (!modmagic) {
73687 @@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
73688 secstrings, &stroffs, strmap);
73689
73690 /* Do the allocs. */
73691 - ptr = module_alloc_update_bounds(mod->core_size);
73692 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
73693 /*
73694 * The pointer to this block is stored in the module structure
73695 * which is inside the block. Just mark it as not being a
73696 @@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
73697 err = -ENOMEM;
73698 goto free_percpu;
73699 }
73700 - memset(ptr, 0, mod->core_size);
73701 - mod->module_core = ptr;
73702 + memset(ptr, 0, mod->core_size_rw);
73703 + mod->module_core_rw = ptr;
73704
73705 - ptr = module_alloc_update_bounds(mod->init_size);
73706 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
73707 /*
73708 * The pointer to this block is stored in the module structure
73709 * which is inside the block. This block doesn't need to be
73710 * scanned as it contains data and code that will be freed
73711 * after the module is initialized.
73712 */
73713 - kmemleak_ignore(ptr);
73714 - if (!ptr && mod->init_size) {
73715 + kmemleak_not_leak(ptr);
73716 + if (!ptr && mod->init_size_rw) {
73717 err = -ENOMEM;
73718 - goto free_core;
73719 + goto free_core_rw;
73720 }
73721 - memset(ptr, 0, mod->init_size);
73722 - mod->module_init = ptr;
73723 + memset(ptr, 0, mod->init_size_rw);
73724 + mod->module_init_rw = ptr;
73725 +
73726 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
73727 + kmemleak_not_leak(ptr);
73728 + if (!ptr) {
73729 + err = -ENOMEM;
73730 + goto free_init_rw;
73731 + }
73732 +
73733 + pax_open_kernel();
73734 + memset(ptr, 0, mod->core_size_rx);
73735 + pax_close_kernel();
73736 + mod->module_core_rx = ptr;
73737 +
73738 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
73739 + kmemleak_not_leak(ptr);
73740 + if (!ptr && mod->init_size_rx) {
73741 + err = -ENOMEM;
73742 + goto free_core_rx;
73743 + }
73744 +
73745 + pax_open_kernel();
73746 + memset(ptr, 0, mod->init_size_rx);
73747 + pax_close_kernel();
73748 + mod->module_init_rx = ptr;
73749
73750 /* Transfer each section which specifies SHF_ALLOC */
73751 DEBUGP("final section addresses:\n");
73752 @@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
73753 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
73754 continue;
73755
73756 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
73757 - dest = mod->module_init
73758 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73759 - else
73760 - dest = mod->module_core + sechdrs[i].sh_entsize;
73761 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
73762 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73763 + dest = mod->module_init_rw
73764 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73765 + else
73766 + dest = mod->module_init_rx
73767 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73768 + } else {
73769 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73770 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
73771 + else
73772 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
73773 + }
73774
73775 - if (sechdrs[i].sh_type != SHT_NOBITS)
73776 - memcpy(dest, (void *)sechdrs[i].sh_addr,
73777 - sechdrs[i].sh_size);
73778 + if (sechdrs[i].sh_type != SHT_NOBITS) {
73779 +
73780 +#ifdef CONFIG_PAX_KERNEXEC
73781 +#ifdef CONFIG_X86_64
73782 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
73783 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
73784 +#endif
73785 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
73786 + pax_open_kernel();
73787 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73788 + pax_close_kernel();
73789 + } else
73790 +#endif
73791 +
73792 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73793 + }
73794 /* Update sh_addr to point to copy in image. */
73795 - sechdrs[i].sh_addr = (unsigned long)dest;
73796 +
73797 +#ifdef CONFIG_PAX_KERNEXEC
73798 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
73799 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
73800 + else
73801 +#endif
73802 +
73803 + sechdrs[i].sh_addr = (unsigned long)dest;
73804 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
73805 }
73806 /* Module has been moved. */
73807 @@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
73808 mod->name);
73809 if (!mod->refptr) {
73810 err = -ENOMEM;
73811 - goto free_init;
73812 + goto free_init_rx;
73813 }
73814 #endif
73815 /* Now we've moved module, initialize linked lists, etc. */
73816 @@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
73817 goto free_unload;
73818
73819 /* Set up license info based on the info section */
73820 - set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
73821 + set_license(mod, license);
73822
73823 /*
73824 * ndiswrapper is under GPL by itself, but loads proprietary modules.
73825 @@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
73826 /* Set up MODINFO_ATTR fields */
73827 setup_modinfo(mod, sechdrs, infoindex);
73828
73829 + mod->args = args;
73830 +
73831 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73832 + {
73833 + char *p, *p2;
73834 +
73835 + if (strstr(mod->args, "grsec_modharden_netdev")) {
73836 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
73837 + err = -EPERM;
73838 + goto cleanup;
73839 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
73840 + p += strlen("grsec_modharden_normal");
73841 + p2 = strstr(p, "_");
73842 + if (p2) {
73843 + *p2 = '\0';
73844 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
73845 + *p2 = '_';
73846 + }
73847 + err = -EPERM;
73848 + goto cleanup;
73849 + }
73850 + }
73851 +#endif
73852 +
73853 +
73854 /* Fix up syms, so that st_value is a pointer to location. */
73855 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
73856 mod);
73857 @@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
73858
73859 /* Now do relocations. */
73860 for (i = 1; i < hdr->e_shnum; i++) {
73861 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
73862 unsigned int info = sechdrs[i].sh_info;
73863 + strtab = (char *)sechdrs[strindex].sh_addr;
73864
73865 /* Not a valid relocation section? */
73866 if (info >= hdr->e_shnum)
73867 @@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
73868 * Do it before processing of module parameters, so the module
73869 * can provide parameter accessor functions of its own.
73870 */
73871 - if (mod->module_init)
73872 - flush_icache_range((unsigned long)mod->module_init,
73873 - (unsigned long)mod->module_init
73874 - + mod->init_size);
73875 - flush_icache_range((unsigned long)mod->module_core,
73876 - (unsigned long)mod->module_core + mod->core_size);
73877 + if (mod->module_init_rx)
73878 + flush_icache_range((unsigned long)mod->module_init_rx,
73879 + (unsigned long)mod->module_init_rx
73880 + + mod->init_size_rx);
73881 + flush_icache_range((unsigned long)mod->module_core_rx,
73882 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
73883
73884 set_fs(old_fs);
73885
73886 - mod->args = args;
73887 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
73888 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
73889 mod->name);
73890 @@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
73891 free_unload:
73892 module_unload_free(mod);
73893 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
73894 + free_init_rx:
73895 percpu_modfree(mod->refptr);
73896 - free_init:
73897 #endif
73898 - module_free(mod, mod->module_init);
73899 - free_core:
73900 - module_free(mod, mod->module_core);
73901 + module_free_exec(mod, mod->module_init_rx);
73902 + free_core_rx:
73903 + module_free_exec(mod, mod->module_core_rx);
73904 + free_init_rw:
73905 + module_free(mod, mod->module_init_rw);
73906 + free_core_rw:
73907 + module_free(mod, mod->module_core_rw);
73908 /* mod will be freed with core. Don't access it beyond this line! */
73909 free_percpu:
73910 if (percpu)
73911 @@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
73912 mod->symtab = mod->core_symtab;
73913 mod->strtab = mod->core_strtab;
73914 #endif
73915 - module_free(mod, mod->module_init);
73916 - mod->module_init = NULL;
73917 - mod->init_size = 0;
73918 - mod->init_text_size = 0;
73919 + module_free(mod, mod->module_init_rw);
73920 + module_free_exec(mod, mod->module_init_rx);
73921 + mod->module_init_rw = NULL;
73922 + mod->module_init_rx = NULL;
73923 + mod->init_size_rw = 0;
73924 + mod->init_size_rx = 0;
73925 mutex_unlock(&module_mutex);
73926
73927 return 0;
73928 @@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
73929 unsigned long nextval;
73930
73931 /* At worse, next value is at end of module */
73932 - if (within_module_init(addr, mod))
73933 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
73934 + if (within_module_init_rx(addr, mod))
73935 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
73936 + else if (within_module_init_rw(addr, mod))
73937 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
73938 + else if (within_module_core_rx(addr, mod))
73939 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
73940 + else if (within_module_core_rw(addr, mod))
73941 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
73942 else
73943 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
73944 + return NULL;
73945
73946 /* Scan for closest preceeding symbol, and next symbol. (ELF
73947 starts real symbols at 1). */
73948 @@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
73949 char buf[8];
73950
73951 seq_printf(m, "%s %u",
73952 - mod->name, mod->init_size + mod->core_size);
73953 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
73954 print_unload_info(m, mod);
73955
73956 /* Informative for users. */
73957 @@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
73958 mod->state == MODULE_STATE_COMING ? "Loading":
73959 "Live");
73960 /* Used by oprofile and other similar tools. */
73961 - seq_printf(m, " 0x%p", mod->module_core);
73962 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
73963
73964 /* Taints info */
73965 if (mod->taints)
73966 @@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
73967
73968 static int __init proc_modules_init(void)
73969 {
73970 +#ifndef CONFIG_GRKERNSEC_HIDESYM
73971 +#ifdef CONFIG_GRKERNSEC_PROC_USER
73972 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
73973 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73974 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
73975 +#else
73976 proc_create("modules", 0, NULL, &proc_modules_operations);
73977 +#endif
73978 +#else
73979 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
73980 +#endif
73981 return 0;
73982 }
73983 module_init(proc_modules_init);
73984 @@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
73985 {
73986 struct module *mod;
73987
73988 - if (addr < module_addr_min || addr > module_addr_max)
73989 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
73990 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
73991 return NULL;
73992
73993 list_for_each_entry_rcu(mod, &modules, list)
73994 - if (within_module_core(addr, mod)
73995 - || within_module_init(addr, mod))
73996 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
73997 return mod;
73998 return NULL;
73999 }
74000 @@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
74001 */
74002 struct module *__module_text_address(unsigned long addr)
74003 {
74004 - struct module *mod = __module_address(addr);
74005 + struct module *mod;
74006 +
74007 +#ifdef CONFIG_X86_32
74008 + addr = ktla_ktva(addr);
74009 +#endif
74010 +
74011 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
74012 + return NULL;
74013 +
74014 + mod = __module_address(addr);
74015 +
74016 if (mod) {
74017 /* Make sure it's within the text section. */
74018 - if (!within(addr, mod->module_init, mod->init_text_size)
74019 - && !within(addr, mod->module_core, mod->core_text_size))
74020 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
74021 mod = NULL;
74022 }
74023 return mod;
74024 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
74025 index ec815a9..fe46e99 100644
74026 --- a/kernel/mutex-debug.c
74027 +++ b/kernel/mutex-debug.c
74028 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
74029 }
74030
74031 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
74032 - struct thread_info *ti)
74033 + struct task_struct *task)
74034 {
74035 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
74036
74037 /* Mark the current thread as blocked on the lock: */
74038 - ti->task->blocked_on = waiter;
74039 + task->blocked_on = waiter;
74040 }
74041
74042 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
74043 - struct thread_info *ti)
74044 + struct task_struct *task)
74045 {
74046 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
74047 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
74048 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
74049 - ti->task->blocked_on = NULL;
74050 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
74051 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
74052 + task->blocked_on = NULL;
74053
74054 list_del_init(&waiter->list);
74055 waiter->task = NULL;
74056 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
74057 return;
74058
74059 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
74060 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
74061 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
74062 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
74063 mutex_clear_owner(lock);
74064 }
74065 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
74066 index 6b2d735..372d3c4 100644
74067 --- a/kernel/mutex-debug.h
74068 +++ b/kernel/mutex-debug.h
74069 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
74070 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
74071 extern void debug_mutex_add_waiter(struct mutex *lock,
74072 struct mutex_waiter *waiter,
74073 - struct thread_info *ti);
74074 + struct task_struct *task);
74075 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
74076 - struct thread_info *ti);
74077 + struct task_struct *task);
74078 extern void debug_mutex_unlock(struct mutex *lock);
74079 extern void debug_mutex_init(struct mutex *lock, const char *name,
74080 struct lock_class_key *key);
74081
74082 static inline void mutex_set_owner(struct mutex *lock)
74083 {
74084 - lock->owner = current_thread_info();
74085 + lock->owner = current;
74086 }
74087
74088 static inline void mutex_clear_owner(struct mutex *lock)
74089 diff --git a/kernel/mutex.c b/kernel/mutex.c
74090 index f85644c..5ee9f77 100644
74091 --- a/kernel/mutex.c
74092 +++ b/kernel/mutex.c
74093 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
74094 */
74095
74096 for (;;) {
74097 - struct thread_info *owner;
74098 + struct task_struct *owner;
74099
74100 /*
74101 * If we own the BKL, then don't spin. The owner of
74102 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
74103 spin_lock_mutex(&lock->wait_lock, flags);
74104
74105 debug_mutex_lock_common(lock, &waiter);
74106 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
74107 + debug_mutex_add_waiter(lock, &waiter, task);
74108
74109 /* add waiting tasks to the end of the waitqueue (FIFO): */
74110 list_add_tail(&waiter.list, &lock->wait_list);
74111 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
74112 * TASK_UNINTERRUPTIBLE case.)
74113 */
74114 if (unlikely(signal_pending_state(state, task))) {
74115 - mutex_remove_waiter(lock, &waiter,
74116 - task_thread_info(task));
74117 + mutex_remove_waiter(lock, &waiter, task);
74118 mutex_release(&lock->dep_map, 1, ip);
74119 spin_unlock_mutex(&lock->wait_lock, flags);
74120
74121 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
74122 done:
74123 lock_acquired(&lock->dep_map, ip);
74124 /* got the lock - rejoice! */
74125 - mutex_remove_waiter(lock, &waiter, current_thread_info());
74126 + mutex_remove_waiter(lock, &waiter, task);
74127 mutex_set_owner(lock);
74128
74129 /* set it to 0 if there are no waiters left: */
74130 diff --git a/kernel/mutex.h b/kernel/mutex.h
74131 index 67578ca..4115fbf 100644
74132 --- a/kernel/mutex.h
74133 +++ b/kernel/mutex.h
74134 @@ -19,7 +19,7 @@
74135 #ifdef CONFIG_SMP
74136 static inline void mutex_set_owner(struct mutex *lock)
74137 {
74138 - lock->owner = current_thread_info();
74139 + lock->owner = current;
74140 }
74141
74142 static inline void mutex_clear_owner(struct mutex *lock)
74143 diff --git a/kernel/panic.c b/kernel/panic.c
74144 index 96b45d0..ff70a46 100644
74145 --- a/kernel/panic.c
74146 +++ b/kernel/panic.c
74147 @@ -71,7 +71,11 @@ NORET_TYPE void panic(const char * fmt, ...)
74148 va_end(args);
74149 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
74150 #ifdef CONFIG_DEBUG_BUGVERBOSE
74151 - dump_stack();
74152 + /*
74153 + * Avoid nested stack-dumping if a panic occurs during oops processing
74154 + */
74155 + if (!oops_in_progress)
74156 + dump_stack();
74157 #endif
74158
74159 /*
74160 @@ -352,7 +356,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
74161 const char *board;
74162
74163 printk(KERN_WARNING "------------[ cut here ]------------\n");
74164 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
74165 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
74166 board = dmi_get_system_info(DMI_PRODUCT_NAME);
74167 if (board)
74168 printk(KERN_WARNING "Hardware name: %s\n", board);
74169 @@ -392,7 +396,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
74170 */
74171 void __stack_chk_fail(void)
74172 {
74173 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
74174 + dump_stack();
74175 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
74176 __builtin_return_address(0));
74177 }
74178 EXPORT_SYMBOL(__stack_chk_fail);
74179 diff --git a/kernel/params.c b/kernel/params.c
74180 index d656c27..21e452c 100644
74181 --- a/kernel/params.c
74182 +++ b/kernel/params.c
74183 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
74184 return ret;
74185 }
74186
74187 -static struct sysfs_ops module_sysfs_ops = {
74188 +static const struct sysfs_ops module_sysfs_ops = {
74189 .show = module_attr_show,
74190 .store = module_attr_store,
74191 };
74192 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
74193 return 0;
74194 }
74195
74196 -static struct kset_uevent_ops module_uevent_ops = {
74197 +static const struct kset_uevent_ops module_uevent_ops = {
74198 .filter = uevent_filter,
74199 };
74200
74201 diff --git a/kernel/perf_event.c b/kernel/perf_event.c
74202 index 37ebc14..9c121d9 100644
74203 --- a/kernel/perf_event.c
74204 +++ b/kernel/perf_event.c
74205 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
74206 */
74207 int sysctl_perf_event_sample_rate __read_mostly = 100000;
74208
74209 -static atomic64_t perf_event_id;
74210 +static atomic64_unchecked_t perf_event_id;
74211
74212 /*
74213 * Lock for (sysadmin-configurable) event reservations:
74214 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
74215 * In order to keep per-task stats reliable we need to flip the event
74216 * values when we flip the contexts.
74217 */
74218 - value = atomic64_read(&next_event->count);
74219 - value = atomic64_xchg(&event->count, value);
74220 - atomic64_set(&next_event->count, value);
74221 + value = atomic64_read_unchecked(&next_event->count);
74222 + value = atomic64_xchg_unchecked(&event->count, value);
74223 + atomic64_set_unchecked(&next_event->count, value);
74224
74225 swap(event->total_time_enabled, next_event->total_time_enabled);
74226 swap(event->total_time_running, next_event->total_time_running);
74227 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
74228 update_event_times(event);
74229 }
74230
74231 - return atomic64_read(&event->count);
74232 + return atomic64_read_unchecked(&event->count);
74233 }
74234
74235 /*
74236 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
74237 values[n++] = 1 + leader->nr_siblings;
74238 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74239 values[n++] = leader->total_time_enabled +
74240 - atomic64_read(&leader->child_total_time_enabled);
74241 + atomic64_read_unchecked(&leader->child_total_time_enabled);
74242 }
74243 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74244 values[n++] = leader->total_time_running +
74245 - atomic64_read(&leader->child_total_time_running);
74246 + atomic64_read_unchecked(&leader->child_total_time_running);
74247 }
74248
74249 size = n * sizeof(u64);
74250 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
74251 values[n++] = perf_event_read_value(event);
74252 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74253 values[n++] = event->total_time_enabled +
74254 - atomic64_read(&event->child_total_time_enabled);
74255 + atomic64_read_unchecked(&event->child_total_time_enabled);
74256 }
74257 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74258 values[n++] = event->total_time_running +
74259 - atomic64_read(&event->child_total_time_running);
74260 + atomic64_read_unchecked(&event->child_total_time_running);
74261 }
74262 if (read_format & PERF_FORMAT_ID)
74263 values[n++] = primary_event_id(event);
74264 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
74265 static void perf_event_reset(struct perf_event *event)
74266 {
74267 (void)perf_event_read(event);
74268 - atomic64_set(&event->count, 0);
74269 + atomic64_set_unchecked(&event->count, 0);
74270 perf_event_update_userpage(event);
74271 }
74272
74273 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
74274 ++userpg->lock;
74275 barrier();
74276 userpg->index = perf_event_index(event);
74277 - userpg->offset = atomic64_read(&event->count);
74278 + userpg->offset = atomic64_read_unchecked(&event->count);
74279 if (event->state == PERF_EVENT_STATE_ACTIVE)
74280 - userpg->offset -= atomic64_read(&event->hw.prev_count);
74281 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
74282
74283 userpg->time_enabled = event->total_time_enabled +
74284 - atomic64_read(&event->child_total_time_enabled);
74285 + atomic64_read_unchecked(&event->child_total_time_enabled);
74286
74287 userpg->time_running = event->total_time_running +
74288 - atomic64_read(&event->child_total_time_running);
74289 + atomic64_read_unchecked(&event->child_total_time_running);
74290
74291 barrier();
74292 ++userpg->lock;
74293 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
74294 u64 values[4];
74295 int n = 0;
74296
74297 - values[n++] = atomic64_read(&event->count);
74298 + values[n++] = atomic64_read_unchecked(&event->count);
74299 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74300 values[n++] = event->total_time_enabled +
74301 - atomic64_read(&event->child_total_time_enabled);
74302 + atomic64_read_unchecked(&event->child_total_time_enabled);
74303 }
74304 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74305 values[n++] = event->total_time_running +
74306 - atomic64_read(&event->child_total_time_running);
74307 + atomic64_read_unchecked(&event->child_total_time_running);
74308 }
74309 if (read_format & PERF_FORMAT_ID)
74310 values[n++] = primary_event_id(event);
74311 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
74312 if (leader != event)
74313 leader->pmu->read(leader);
74314
74315 - values[n++] = atomic64_read(&leader->count);
74316 + values[n++] = atomic64_read_unchecked(&leader->count);
74317 if (read_format & PERF_FORMAT_ID)
74318 values[n++] = primary_event_id(leader);
74319
74320 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
74321 if (sub != event)
74322 sub->pmu->read(sub);
74323
74324 - values[n++] = atomic64_read(&sub->count);
74325 + values[n++] = atomic64_read_unchecked(&sub->count);
74326 if (read_format & PERF_FORMAT_ID)
74327 values[n++] = primary_event_id(sub);
74328
74329 @@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
74330 * need to add enough zero bytes after the string to handle
74331 * the 64bit alignment we do later.
74332 */
74333 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
74334 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
74335 if (!buf) {
74336 name = strncpy(tmp, "//enomem", sizeof(tmp));
74337 goto got_name;
74338 }
74339 - name = d_path(&file->f_path, buf, PATH_MAX);
74340 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
74341 if (IS_ERR(name)) {
74342 name = strncpy(tmp, "//toolong", sizeof(tmp));
74343 goto got_name;
74344 @@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
74345 {
74346 struct hw_perf_event *hwc = &event->hw;
74347
74348 - atomic64_add(nr, &event->count);
74349 + atomic64_add_unchecked(nr, &event->count);
74350
74351 if (!hwc->sample_period)
74352 return;
74353 @@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
74354 u64 now;
74355
74356 now = cpu_clock(cpu);
74357 - prev = atomic64_read(&event->hw.prev_count);
74358 - atomic64_set(&event->hw.prev_count, now);
74359 - atomic64_add(now - prev, &event->count);
74360 + prev = atomic64_read_unchecked(&event->hw.prev_count);
74361 + atomic64_set_unchecked(&event->hw.prev_count, now);
74362 + atomic64_add_unchecked(now - prev, &event->count);
74363 }
74364
74365 static int cpu_clock_perf_event_enable(struct perf_event *event)
74366 @@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
74367 struct hw_perf_event *hwc = &event->hw;
74368 int cpu = raw_smp_processor_id();
74369
74370 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
74371 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
74372 perf_swevent_start_hrtimer(event);
74373
74374 return 0;
74375 @@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
74376 u64 prev;
74377 s64 delta;
74378
74379 - prev = atomic64_xchg(&event->hw.prev_count, now);
74380 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
74381 delta = now - prev;
74382 - atomic64_add(delta, &event->count);
74383 + atomic64_add_unchecked(delta, &event->count);
74384 }
74385
74386 static int task_clock_perf_event_enable(struct perf_event *event)
74387 @@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
74388
74389 now = event->ctx->time;
74390
74391 - atomic64_set(&hwc->prev_count, now);
74392 + atomic64_set_unchecked(&hwc->prev_count, now);
74393
74394 perf_swevent_start_hrtimer(event);
74395
74396 @@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
74397 event->parent = parent_event;
74398
74399 event->ns = get_pid_ns(current->nsproxy->pid_ns);
74400 - event->id = atomic64_inc_return(&perf_event_id);
74401 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
74402
74403 event->state = PERF_EVENT_STATE_INACTIVE;
74404
74405 @@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
74406 if (child_event->attr.inherit_stat)
74407 perf_event_read_event(child_event, child);
74408
74409 - child_val = atomic64_read(&child_event->count);
74410 + child_val = atomic64_read_unchecked(&child_event->count);
74411
74412 /*
74413 * Add back the child's count to the parent's count:
74414 */
74415 - atomic64_add(child_val, &parent_event->count);
74416 - atomic64_add(child_event->total_time_enabled,
74417 + atomic64_add_unchecked(child_val, &parent_event->count);
74418 + atomic64_add_unchecked(child_event->total_time_enabled,
74419 &parent_event->child_total_time_enabled);
74420 - atomic64_add(child_event->total_time_running,
74421 + atomic64_add_unchecked(child_event->total_time_running,
74422 &parent_event->child_total_time_running);
74423
74424 /*
74425 diff --git a/kernel/pid.c b/kernel/pid.c
74426 index fce7198..4f23a7e 100644
74427 --- a/kernel/pid.c
74428 +++ b/kernel/pid.c
74429 @@ -33,6 +33,7 @@
74430 #include <linux/rculist.h>
74431 #include <linux/bootmem.h>
74432 #include <linux/hash.h>
74433 +#include <linux/security.h>
74434 #include <linux/pid_namespace.h>
74435 #include <linux/init_task.h>
74436 #include <linux/syscalls.h>
74437 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
74438
74439 int pid_max = PID_MAX_DEFAULT;
74440
74441 -#define RESERVED_PIDS 300
74442 +#define RESERVED_PIDS 500
74443
74444 int pid_max_min = RESERVED_PIDS + 1;
74445 int pid_max_max = PID_MAX_LIMIT;
74446 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
74447 */
74448 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
74449 {
74450 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
74451 + struct task_struct *task;
74452 +
74453 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
74454 +
74455 + if (gr_pid_is_chrooted(task))
74456 + return NULL;
74457 +
74458 + return task;
74459 }
74460
74461 struct task_struct *find_task_by_vpid(pid_t vnr)
74462 @@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
74463 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
74464 }
74465
74466 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
74467 +{
74468 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
74469 +}
74470 +
74471 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
74472 {
74473 struct pid *pid;
74474 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
74475 index 5c9dc22..d271117 100644
74476 --- a/kernel/posix-cpu-timers.c
74477 +++ b/kernel/posix-cpu-timers.c
74478 @@ -6,6 +6,7 @@
74479 #include <linux/posix-timers.h>
74480 #include <linux/errno.h>
74481 #include <linux/math64.h>
74482 +#include <linux/security.h>
74483 #include <asm/uaccess.h>
74484 #include <linux/kernel_stat.h>
74485 #include <trace/events/timer.h>
74486 @@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
74487
74488 static __init int init_posix_cpu_timers(void)
74489 {
74490 - struct k_clock process = {
74491 + static struct k_clock process = {
74492 .clock_getres = process_cpu_clock_getres,
74493 .clock_get = process_cpu_clock_get,
74494 .clock_set = do_posix_clock_nosettime,
74495 @@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
74496 .nsleep = process_cpu_nsleep,
74497 .nsleep_restart = process_cpu_nsleep_restart,
74498 };
74499 - struct k_clock thread = {
74500 + static struct k_clock thread = {
74501 .clock_getres = thread_cpu_clock_getres,
74502 .clock_get = thread_cpu_clock_get,
74503 .clock_set = do_posix_clock_nosettime,
74504 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
74505 index 5e76d22..cf1baeb 100644
74506 --- a/kernel/posix-timers.c
74507 +++ b/kernel/posix-timers.c
74508 @@ -42,6 +42,7 @@
74509 #include <linux/compiler.h>
74510 #include <linux/idr.h>
74511 #include <linux/posix-timers.h>
74512 +#include <linux/grsecurity.h>
74513 #include <linux/syscalls.h>
74514 #include <linux/wait.h>
74515 #include <linux/workqueue.h>
74516 @@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
74517 * which we beg off on and pass to do_sys_settimeofday().
74518 */
74519
74520 -static struct k_clock posix_clocks[MAX_CLOCKS];
74521 +static struct k_clock *posix_clocks[MAX_CLOCKS];
74522
74523 /*
74524 * These ones are defined below.
74525 @@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
74526 */
74527 #define CLOCK_DISPATCH(clock, call, arglist) \
74528 ((clock) < 0 ? posix_cpu_##call arglist : \
74529 - (posix_clocks[clock].call != NULL \
74530 - ? (*posix_clocks[clock].call) arglist : common_##call arglist))
74531 + (posix_clocks[clock]->call != NULL \
74532 + ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
74533
74534 /*
74535 * Default clock hook functions when the struct k_clock passed
74536 @@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
74537 struct timespec *tp)
74538 {
74539 tp->tv_sec = 0;
74540 - tp->tv_nsec = posix_clocks[which_clock].res;
74541 + tp->tv_nsec = posix_clocks[which_clock]->res;
74542 return 0;
74543 }
74544
74545 @@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
74546 return 0;
74547 if ((unsigned) which_clock >= MAX_CLOCKS)
74548 return 1;
74549 - if (posix_clocks[which_clock].clock_getres != NULL)
74550 + if (posix_clocks[which_clock] == NULL)
74551 return 0;
74552 - if (posix_clocks[which_clock].res != 0)
74553 + if (posix_clocks[which_clock]->clock_getres != NULL)
74554 + return 0;
74555 + if (posix_clocks[which_clock]->res != 0)
74556 return 0;
74557 return 1;
74558 }
74559 @@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
74560 */
74561 static __init int init_posix_timers(void)
74562 {
74563 - struct k_clock clock_realtime = {
74564 + static struct k_clock clock_realtime = {
74565 .clock_getres = hrtimer_get_res,
74566 };
74567 - struct k_clock clock_monotonic = {
74568 + static struct k_clock clock_monotonic = {
74569 .clock_getres = hrtimer_get_res,
74570 .clock_get = posix_ktime_get_ts,
74571 .clock_set = do_posix_clock_nosettime,
74572 };
74573 - struct k_clock clock_monotonic_raw = {
74574 + static struct k_clock clock_monotonic_raw = {
74575 .clock_getres = hrtimer_get_res,
74576 .clock_get = posix_get_monotonic_raw,
74577 .clock_set = do_posix_clock_nosettime,
74578 .timer_create = no_timer_create,
74579 .nsleep = no_nsleep,
74580 };
74581 - struct k_clock clock_realtime_coarse = {
74582 + static struct k_clock clock_realtime_coarse = {
74583 .clock_getres = posix_get_coarse_res,
74584 .clock_get = posix_get_realtime_coarse,
74585 .clock_set = do_posix_clock_nosettime,
74586 .timer_create = no_timer_create,
74587 .nsleep = no_nsleep,
74588 };
74589 - struct k_clock clock_monotonic_coarse = {
74590 + static struct k_clock clock_monotonic_coarse = {
74591 .clock_getres = posix_get_coarse_res,
74592 .clock_get = posix_get_monotonic_coarse,
74593 .clock_set = do_posix_clock_nosettime,
74594 @@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
74595 .nsleep = no_nsleep,
74596 };
74597
74598 + pax_track_stack();
74599 +
74600 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
74601 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
74602 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
74603 @@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
74604 return;
74605 }
74606
74607 - posix_clocks[clock_id] = *new_clock;
74608 + posix_clocks[clock_id] = new_clock;
74609 }
74610 EXPORT_SYMBOL_GPL(register_posix_clock);
74611
74612 @@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
74613 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
74614 return -EFAULT;
74615
74616 + /* only the CLOCK_REALTIME clock can be set, all other clocks
74617 + have their clock_set fptr set to a nosettime dummy function
74618 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
74619 + call common_clock_set, which calls do_sys_settimeofday, which
74620 + we hook
74621 + */
74622 +
74623 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
74624 }
74625
74626 diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
74627 index 04a9e90..bc355aa 100644
74628 --- a/kernel/power/hibernate.c
74629 +++ b/kernel/power/hibernate.c
74630 @@ -48,14 +48,14 @@ enum {
74631
74632 static int hibernation_mode = HIBERNATION_SHUTDOWN;
74633
74634 -static struct platform_hibernation_ops *hibernation_ops;
74635 +static const struct platform_hibernation_ops *hibernation_ops;
74636
74637 /**
74638 * hibernation_set_ops - set the global hibernate operations
74639 * @ops: the hibernation operations to use in subsequent hibernation transitions
74640 */
74641
74642 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
74643 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
74644 {
74645 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
74646 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
74647 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
74648 index e8b3370..484c2e4 100644
74649 --- a/kernel/power/poweroff.c
74650 +++ b/kernel/power/poweroff.c
74651 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
74652 .enable_mask = SYSRQ_ENABLE_BOOT,
74653 };
74654
74655 -static int pm_sysrq_init(void)
74656 +static int __init pm_sysrq_init(void)
74657 {
74658 register_sysrq_key('o', &sysrq_poweroff_op);
74659 return 0;
74660 diff --git a/kernel/power/process.c b/kernel/power/process.c
74661 index e7cd671..56d5f459 100644
74662 --- a/kernel/power/process.c
74663 +++ b/kernel/power/process.c
74664 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
74665 struct timeval start, end;
74666 u64 elapsed_csecs64;
74667 unsigned int elapsed_csecs;
74668 + bool timedout = false;
74669
74670 do_gettimeofday(&start);
74671
74672 end_time = jiffies + TIMEOUT;
74673 do {
74674 todo = 0;
74675 + if (time_after(jiffies, end_time))
74676 + timedout = true;
74677 read_lock(&tasklist_lock);
74678 do_each_thread(g, p) {
74679 if (frozen(p) || !freezeable(p))
74680 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
74681 * It is "frozen enough". If the task does wake
74682 * up, it will immediately call try_to_freeze.
74683 */
74684 - if (!task_is_stopped_or_traced(p) &&
74685 - !freezer_should_skip(p))
74686 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
74687 todo++;
74688 + if (timedout) {
74689 + printk(KERN_ERR "Task refusing to freeze:\n");
74690 + sched_show_task(p);
74691 + }
74692 + }
74693 } while_each_thread(g, p);
74694 read_unlock(&tasklist_lock);
74695 yield(); /* Yield is okay here */
74696 - if (time_after(jiffies, end_time))
74697 - break;
74698 - } while (todo);
74699 + } while (todo && !timedout);
74700
74701 do_gettimeofday(&end);
74702 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
74703 diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
74704 index 40dd021..fb30ceb 100644
74705 --- a/kernel/power/suspend.c
74706 +++ b/kernel/power/suspend.c
74707 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
74708 [PM_SUSPEND_MEM] = "mem",
74709 };
74710
74711 -static struct platform_suspend_ops *suspend_ops;
74712 +static const struct platform_suspend_ops *suspend_ops;
74713
74714 /**
74715 * suspend_set_ops - Set the global suspend method table.
74716 * @ops: Pointer to ops structure.
74717 */
74718 -void suspend_set_ops(struct platform_suspend_ops *ops)
74719 +void suspend_set_ops(const struct platform_suspend_ops *ops)
74720 {
74721 mutex_lock(&pm_mutex);
74722 suspend_ops = ops;
74723 diff --git a/kernel/printk.c b/kernel/printk.c
74724 index 4cade47..4d17900 100644
74725 --- a/kernel/printk.c
74726 +++ b/kernel/printk.c
74727 @@ -33,6 +33,7 @@
74728 #include <linux/bootmem.h>
74729 #include <linux/syscalls.h>
74730 #include <linux/kexec.h>
74731 +#include <linux/syslog.h>
74732
74733 #include <asm/uaccess.h>
74734
74735 @@ -256,38 +257,30 @@ static inline void boot_delay_msec(void)
74736 }
74737 #endif
74738
74739 -/*
74740 - * Commands to do_syslog:
74741 - *
74742 - * 0 -- Close the log. Currently a NOP.
74743 - * 1 -- Open the log. Currently a NOP.
74744 - * 2 -- Read from the log.
74745 - * 3 -- Read all messages remaining in the ring buffer.
74746 - * 4 -- Read and clear all messages remaining in the ring buffer
74747 - * 5 -- Clear ring buffer.
74748 - * 6 -- Disable printk's to console
74749 - * 7 -- Enable printk's to console
74750 - * 8 -- Set level of messages printed to console
74751 - * 9 -- Return number of unread characters in the log buffer
74752 - * 10 -- Return size of the log buffer
74753 - */
74754 -int do_syslog(int type, char __user *buf, int len)
74755 +int do_syslog(int type, char __user *buf, int len, bool from_file)
74756 {
74757 unsigned i, j, limit, count;
74758 int do_clear = 0;
74759 char c;
74760 int error = 0;
74761
74762 - error = security_syslog(type);
74763 +#ifdef CONFIG_GRKERNSEC_DMESG
74764 + if (grsec_enable_dmesg &&
74765 + (!from_file || (from_file && type == SYSLOG_ACTION_OPEN)) &&
74766 + !capable(CAP_SYS_ADMIN))
74767 + return -EPERM;
74768 +#endif
74769 +
74770 + error = security_syslog(type, from_file);
74771 if (error)
74772 return error;
74773
74774 switch (type) {
74775 - case 0: /* Close log */
74776 + case SYSLOG_ACTION_CLOSE: /* Close log */
74777 break;
74778 - case 1: /* Open log */
74779 + case SYSLOG_ACTION_OPEN: /* Open log */
74780 break;
74781 - case 2: /* Read from log */
74782 + case SYSLOG_ACTION_READ: /* Read from log */
74783 error = -EINVAL;
74784 if (!buf || len < 0)
74785 goto out;
74786 @@ -318,10 +311,12 @@ int do_syslog(int type, char __user *buf, int len)
74787 if (!error)
74788 error = i;
74789 break;
74790 - case 4: /* Read/clear last kernel messages */
74791 + /* Read/clear last kernel messages */
74792 + case SYSLOG_ACTION_READ_CLEAR:
74793 do_clear = 1;
74794 /* FALL THRU */
74795 - case 3: /* Read last kernel messages */
74796 + /* Read last kernel messages */
74797 + case SYSLOG_ACTION_READ_ALL:
74798 error = -EINVAL;
74799 if (!buf || len < 0)
74800 goto out;
74801 @@ -374,21 +369,25 @@ int do_syslog(int type, char __user *buf, int len)
74802 }
74803 }
74804 break;
74805 - case 5: /* Clear ring buffer */
74806 + /* Clear ring buffer */
74807 + case SYSLOG_ACTION_CLEAR:
74808 logged_chars = 0;
74809 break;
74810 - case 6: /* Disable logging to console */
74811 + /* Disable logging to console */
74812 + case SYSLOG_ACTION_CONSOLE_OFF:
74813 if (saved_console_loglevel == -1)
74814 saved_console_loglevel = console_loglevel;
74815 console_loglevel = minimum_console_loglevel;
74816 break;
74817 - case 7: /* Enable logging to console */
74818 + /* Enable logging to console */
74819 + case SYSLOG_ACTION_CONSOLE_ON:
74820 if (saved_console_loglevel != -1) {
74821 console_loglevel = saved_console_loglevel;
74822 saved_console_loglevel = -1;
74823 }
74824 break;
74825 - case 8: /* Set level of messages printed to console */
74826 + /* Set level of messages printed to console */
74827 + case SYSLOG_ACTION_CONSOLE_LEVEL:
74828 error = -EINVAL;
74829 if (len < 1 || len > 8)
74830 goto out;
74831 @@ -399,10 +398,12 @@ int do_syslog(int type, char __user *buf, int len)
74832 saved_console_loglevel = -1;
74833 error = 0;
74834 break;
74835 - case 9: /* Number of chars in the log buffer */
74836 + /* Number of chars in the log buffer */
74837 + case SYSLOG_ACTION_SIZE_UNREAD:
74838 error = log_end - log_start;
74839 break;
74840 - case 10: /* Size of the log buffer */
74841 + /* Size of the log buffer */
74842 + case SYSLOG_ACTION_SIZE_BUFFER:
74843 error = log_buf_len;
74844 break;
74845 default:
74846 @@ -415,7 +416,7 @@ out:
74847
74848 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
74849 {
74850 - return do_syslog(type, buf, len);
74851 + return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
74852 }
74853
74854 /*
74855 diff --git a/kernel/profile.c b/kernel/profile.c
74856 index dfadc5b..7f59404 100644
74857 --- a/kernel/profile.c
74858 +++ b/kernel/profile.c
74859 @@ -39,7 +39,7 @@ struct profile_hit {
74860 /* Oprofile timer tick hook */
74861 static int (*timer_hook)(struct pt_regs *) __read_mostly;
74862
74863 -static atomic_t *prof_buffer;
74864 +static atomic_unchecked_t *prof_buffer;
74865 static unsigned long prof_len, prof_shift;
74866
74867 int prof_on __read_mostly;
74868 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
74869 hits[i].pc = 0;
74870 continue;
74871 }
74872 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74873 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74874 hits[i].hits = hits[i].pc = 0;
74875 }
74876 }
74877 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74878 * Add the current hit(s) and flush the write-queue out
74879 * to the global buffer:
74880 */
74881 - atomic_add(nr_hits, &prof_buffer[pc]);
74882 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
74883 for (i = 0; i < NR_PROFILE_HIT; ++i) {
74884 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74885 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74886 hits[i].pc = hits[i].hits = 0;
74887 }
74888 out:
74889 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74890 if (prof_on != type || !prof_buffer)
74891 return;
74892 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
74893 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74894 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74895 }
74896 #endif /* !CONFIG_SMP */
74897 EXPORT_SYMBOL_GPL(profile_hits);
74898 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
74899 return -EFAULT;
74900 buf++; p++; count--; read++;
74901 }
74902 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
74903 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
74904 if (copy_to_user(buf, (void *)pnt, count))
74905 return -EFAULT;
74906 read += count;
74907 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
74908 }
74909 #endif
74910 profile_discard_flip_buffers();
74911 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
74912 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
74913 return count;
74914 }
74915
74916 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
74917 index 05625f6..733bf70 100644
74918 --- a/kernel/ptrace.c
74919 +++ b/kernel/ptrace.c
74920 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
74921 return ret;
74922 }
74923
74924 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74925 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
74926 + unsigned int log)
74927 {
74928 const struct cred *cred = current_cred(), *tcred;
74929
74930 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74931 cred->gid != tcred->egid ||
74932 cred->gid != tcred->sgid ||
74933 cred->gid != tcred->gid) &&
74934 - !capable(CAP_SYS_PTRACE)) {
74935 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
74936 + (log && !capable(CAP_SYS_PTRACE)))
74937 + ) {
74938 rcu_read_unlock();
74939 return -EPERM;
74940 }
74941 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74942 smp_rmb();
74943 if (task->mm)
74944 dumpable = get_dumpable(task->mm);
74945 - if (!dumpable && !capable(CAP_SYS_PTRACE))
74946 + if (!dumpable &&
74947 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
74948 + (log && !capable(CAP_SYS_PTRACE))))
74949 return -EPERM;
74950
74951 return security_ptrace_access_check(task, mode);
74952 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
74953 {
74954 int err;
74955 task_lock(task);
74956 - err = __ptrace_may_access(task, mode);
74957 + err = __ptrace_may_access(task, mode, 0);
74958 + task_unlock(task);
74959 + return !err;
74960 +}
74961 +
74962 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
74963 +{
74964 + int err;
74965 + task_lock(task);
74966 + err = __ptrace_may_access(task, mode, 1);
74967 task_unlock(task);
74968 return !err;
74969 }
74970 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
74971 goto out;
74972
74973 task_lock(task);
74974 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
74975 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
74976 task_unlock(task);
74977 if (retval)
74978 goto unlock_creds;
74979 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
74980 goto unlock_tasklist;
74981
74982 task->ptrace = PT_PTRACED;
74983 - if (capable(CAP_SYS_PTRACE))
74984 + if (capable_nolog(CAP_SYS_PTRACE))
74985 task->ptrace |= PT_PTRACE_CAP;
74986
74987 __ptrace_link(task, current);
74988 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
74989 {
74990 int copied = 0;
74991
74992 + pax_track_stack();
74993 +
74994 while (len > 0) {
74995 char buf[128];
74996 int this_len, retval;
74997 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
74998 {
74999 int copied = 0;
75000
75001 + pax_track_stack();
75002 +
75003 while (len > 0) {
75004 char buf[128];
75005 int this_len, retval;
75006 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
75007 int ret = -EIO;
75008 siginfo_t siginfo;
75009
75010 + pax_track_stack();
75011 +
75012 switch (request) {
75013 case PTRACE_PEEKTEXT:
75014 case PTRACE_PEEKDATA:
75015 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
75016 ret = ptrace_setoptions(child, data);
75017 break;
75018 case PTRACE_GETEVENTMSG:
75019 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
75020 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
75021 break;
75022
75023 case PTRACE_GETSIGINFO:
75024 ret = ptrace_getsiginfo(child, &siginfo);
75025 if (!ret)
75026 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
75027 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
75028 &siginfo);
75029 break;
75030
75031 case PTRACE_SETSIGINFO:
75032 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
75033 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
75034 sizeof siginfo))
75035 ret = -EFAULT;
75036 else
75037 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
75038 goto out;
75039 }
75040
75041 + if (gr_handle_ptrace(child, request)) {
75042 + ret = -EPERM;
75043 + goto out_put_task_struct;
75044 + }
75045 +
75046 if (request == PTRACE_ATTACH) {
75047 ret = ptrace_attach(child);
75048 /*
75049 * Some architectures need to do book-keeping after
75050 * a ptrace attach.
75051 */
75052 - if (!ret)
75053 + if (!ret) {
75054 arch_ptrace_attach(child);
75055 + gr_audit_ptrace(child);
75056 + }
75057 goto out_put_task_struct;
75058 }
75059
75060 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
75061 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
75062 if (copied != sizeof(tmp))
75063 return -EIO;
75064 - return put_user(tmp, (unsigned long __user *)data);
75065 + return put_user(tmp, (__force unsigned long __user *)data);
75066 }
75067
75068 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
75069 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
75070 siginfo_t siginfo;
75071 int ret;
75072
75073 + pax_track_stack();
75074 +
75075 switch (request) {
75076 case PTRACE_PEEKTEXT:
75077 case PTRACE_PEEKDATA:
75078 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
75079 goto out;
75080 }
75081
75082 + if (gr_handle_ptrace(child, request)) {
75083 + ret = -EPERM;
75084 + goto out_put_task_struct;
75085 + }
75086 +
75087 if (request == PTRACE_ATTACH) {
75088 ret = ptrace_attach(child);
75089 /*
75090 * Some architectures need to do book-keeping after
75091 * a ptrace attach.
75092 */
75093 - if (!ret)
75094 + if (!ret) {
75095 arch_ptrace_attach(child);
75096 + gr_audit_ptrace(child);
75097 + }
75098 goto out_put_task_struct;
75099 }
75100
75101 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
75102 index 697c0a0..2402696 100644
75103 --- a/kernel/rcutorture.c
75104 +++ b/kernel/rcutorture.c
75105 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
75106 { 0 };
75107 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
75108 { 0 };
75109 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
75110 -static atomic_t n_rcu_torture_alloc;
75111 -static atomic_t n_rcu_torture_alloc_fail;
75112 -static atomic_t n_rcu_torture_free;
75113 -static atomic_t n_rcu_torture_mberror;
75114 -static atomic_t n_rcu_torture_error;
75115 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
75116 +static atomic_unchecked_t n_rcu_torture_alloc;
75117 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
75118 +static atomic_unchecked_t n_rcu_torture_free;
75119 +static atomic_unchecked_t n_rcu_torture_mberror;
75120 +static atomic_unchecked_t n_rcu_torture_error;
75121 static long n_rcu_torture_timers;
75122 static struct list_head rcu_torture_removed;
75123 static cpumask_var_t shuffle_tmp_mask;
75124 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
75125
75126 spin_lock_bh(&rcu_torture_lock);
75127 if (list_empty(&rcu_torture_freelist)) {
75128 - atomic_inc(&n_rcu_torture_alloc_fail);
75129 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
75130 spin_unlock_bh(&rcu_torture_lock);
75131 return NULL;
75132 }
75133 - atomic_inc(&n_rcu_torture_alloc);
75134 + atomic_inc_unchecked(&n_rcu_torture_alloc);
75135 p = rcu_torture_freelist.next;
75136 list_del_init(p);
75137 spin_unlock_bh(&rcu_torture_lock);
75138 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
75139 static void
75140 rcu_torture_free(struct rcu_torture *p)
75141 {
75142 - atomic_inc(&n_rcu_torture_free);
75143 + atomic_inc_unchecked(&n_rcu_torture_free);
75144 spin_lock_bh(&rcu_torture_lock);
75145 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
75146 spin_unlock_bh(&rcu_torture_lock);
75147 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
75148 i = rp->rtort_pipe_count;
75149 if (i > RCU_TORTURE_PIPE_LEN)
75150 i = RCU_TORTURE_PIPE_LEN;
75151 - atomic_inc(&rcu_torture_wcount[i]);
75152 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
75153 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
75154 rp->rtort_mbtest = 0;
75155 rcu_torture_free(rp);
75156 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
75157 i = rp->rtort_pipe_count;
75158 if (i > RCU_TORTURE_PIPE_LEN)
75159 i = RCU_TORTURE_PIPE_LEN;
75160 - atomic_inc(&rcu_torture_wcount[i]);
75161 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
75162 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
75163 rp->rtort_mbtest = 0;
75164 list_del(&rp->rtort_free);
75165 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
75166 i = old_rp->rtort_pipe_count;
75167 if (i > RCU_TORTURE_PIPE_LEN)
75168 i = RCU_TORTURE_PIPE_LEN;
75169 - atomic_inc(&rcu_torture_wcount[i]);
75170 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
75171 old_rp->rtort_pipe_count++;
75172 cur_ops->deferred_free(old_rp);
75173 }
75174 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
75175 return;
75176 }
75177 if (p->rtort_mbtest == 0)
75178 - atomic_inc(&n_rcu_torture_mberror);
75179 + atomic_inc_unchecked(&n_rcu_torture_mberror);
75180 spin_lock(&rand_lock);
75181 cur_ops->read_delay(&rand);
75182 n_rcu_torture_timers++;
75183 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
75184 continue;
75185 }
75186 if (p->rtort_mbtest == 0)
75187 - atomic_inc(&n_rcu_torture_mberror);
75188 + atomic_inc_unchecked(&n_rcu_torture_mberror);
75189 cur_ops->read_delay(&rand);
75190 preempt_disable();
75191 pipe_count = p->rtort_pipe_count;
75192 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
75193 rcu_torture_current,
75194 rcu_torture_current_version,
75195 list_empty(&rcu_torture_freelist),
75196 - atomic_read(&n_rcu_torture_alloc),
75197 - atomic_read(&n_rcu_torture_alloc_fail),
75198 - atomic_read(&n_rcu_torture_free),
75199 - atomic_read(&n_rcu_torture_mberror),
75200 + atomic_read_unchecked(&n_rcu_torture_alloc),
75201 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
75202 + atomic_read_unchecked(&n_rcu_torture_free),
75203 + atomic_read_unchecked(&n_rcu_torture_mberror),
75204 n_rcu_torture_timers);
75205 - if (atomic_read(&n_rcu_torture_mberror) != 0)
75206 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
75207 cnt += sprintf(&page[cnt], " !!!");
75208 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
75209 if (i > 1) {
75210 cnt += sprintf(&page[cnt], "!!! ");
75211 - atomic_inc(&n_rcu_torture_error);
75212 + atomic_inc_unchecked(&n_rcu_torture_error);
75213 WARN_ON_ONCE(1);
75214 }
75215 cnt += sprintf(&page[cnt], "Reader Pipe: ");
75216 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
75217 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
75218 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
75219 cnt += sprintf(&page[cnt], " %d",
75220 - atomic_read(&rcu_torture_wcount[i]));
75221 + atomic_read_unchecked(&rcu_torture_wcount[i]));
75222 }
75223 cnt += sprintf(&page[cnt], "\n");
75224 if (cur_ops->stats)
75225 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
75226
75227 if (cur_ops->cleanup)
75228 cur_ops->cleanup();
75229 - if (atomic_read(&n_rcu_torture_error))
75230 + if (atomic_read_unchecked(&n_rcu_torture_error))
75231 rcu_torture_print_module_parms("End of test: FAILURE");
75232 else
75233 rcu_torture_print_module_parms("End of test: SUCCESS");
75234 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
75235
75236 rcu_torture_current = NULL;
75237 rcu_torture_current_version = 0;
75238 - atomic_set(&n_rcu_torture_alloc, 0);
75239 - atomic_set(&n_rcu_torture_alloc_fail, 0);
75240 - atomic_set(&n_rcu_torture_free, 0);
75241 - atomic_set(&n_rcu_torture_mberror, 0);
75242 - atomic_set(&n_rcu_torture_error, 0);
75243 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
75244 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
75245 + atomic_set_unchecked(&n_rcu_torture_free, 0);
75246 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
75247 + atomic_set_unchecked(&n_rcu_torture_error, 0);
75248 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
75249 - atomic_set(&rcu_torture_wcount[i], 0);
75250 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
75251 for_each_possible_cpu(cpu) {
75252 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
75253 per_cpu(rcu_torture_count, cpu)[i] = 0;
75254 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
75255 index 683c4f3..97f54c6 100644
75256 --- a/kernel/rcutree.c
75257 +++ b/kernel/rcutree.c
75258 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
75259 /*
75260 * Do softirq processing for the current CPU.
75261 */
75262 -static void rcu_process_callbacks(struct softirq_action *unused)
75263 +static void rcu_process_callbacks(void)
75264 {
75265 /*
75266 * Memory references from any prior RCU read-side critical sections
75267 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
75268 index c03edf7..ac1b341 100644
75269 --- a/kernel/rcutree_plugin.h
75270 +++ b/kernel/rcutree_plugin.h
75271 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
75272 */
75273 void __rcu_read_lock(void)
75274 {
75275 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
75276 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
75277 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
75278 }
75279 EXPORT_SYMBOL_GPL(__rcu_read_lock);
75280 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
75281 struct task_struct *t = current;
75282
75283 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
75284 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
75285 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
75286 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
75287 rcu_read_unlock_special(t);
75288 }
75289 diff --git a/kernel/relay.c b/kernel/relay.c
75290 index bf343f5..908e9ee 100644
75291 --- a/kernel/relay.c
75292 +++ b/kernel/relay.c
75293 @@ -1228,7 +1228,7 @@ static int subbuf_splice_actor(struct file *in,
75294 unsigned int flags,
75295 int *nonpad_ret)
75296 {
75297 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
75298 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
75299 struct rchan_buf *rbuf = in->private_data;
75300 unsigned int subbuf_size = rbuf->chan->subbuf_size;
75301 uint64_t pos = (uint64_t) *ppos;
75302 @@ -1247,6 +1247,9 @@ static int subbuf_splice_actor(struct file *in,
75303 .ops = &relay_pipe_buf_ops,
75304 .spd_release = relay_page_release,
75305 };
75306 + ssize_t ret;
75307 +
75308 + pax_track_stack();
75309
75310 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
75311 return 0;
75312 diff --git a/kernel/resource.c b/kernel/resource.c
75313 index fb11a58..4e61ae1 100644
75314 --- a/kernel/resource.c
75315 +++ b/kernel/resource.c
75316 @@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
75317
75318 static int __init ioresources_init(void)
75319 {
75320 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
75321 +#ifdef CONFIG_GRKERNSEC_PROC_USER
75322 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
75323 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
75324 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75325 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
75326 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
75327 +#endif
75328 +#else
75329 proc_create("ioports", 0, NULL, &proc_ioports_operations);
75330 proc_create("iomem", 0, NULL, &proc_iomem_operations);
75331 +#endif
75332 return 0;
75333 }
75334 __initcall(ioresources_init);
75335 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
75336 index a56f629..1fc4989 100644
75337 --- a/kernel/rtmutex-tester.c
75338 +++ b/kernel/rtmutex-tester.c
75339 @@ -21,7 +21,7 @@
75340 #define MAX_RT_TEST_MUTEXES 8
75341
75342 static spinlock_t rttest_lock;
75343 -static atomic_t rttest_event;
75344 +static atomic_unchecked_t rttest_event;
75345
75346 struct test_thread_data {
75347 int opcode;
75348 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75349
75350 case RTTEST_LOCKCONT:
75351 td->mutexes[td->opdata] = 1;
75352 - td->event = atomic_add_return(1, &rttest_event);
75353 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75354 return 0;
75355
75356 case RTTEST_RESET:
75357 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75358 return 0;
75359
75360 case RTTEST_RESETEVENT:
75361 - atomic_set(&rttest_event, 0);
75362 + atomic_set_unchecked(&rttest_event, 0);
75363 return 0;
75364
75365 default:
75366 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75367 return ret;
75368
75369 td->mutexes[id] = 1;
75370 - td->event = atomic_add_return(1, &rttest_event);
75371 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75372 rt_mutex_lock(&mutexes[id]);
75373 - td->event = atomic_add_return(1, &rttest_event);
75374 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75375 td->mutexes[id] = 4;
75376 return 0;
75377
75378 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75379 return ret;
75380
75381 td->mutexes[id] = 1;
75382 - td->event = atomic_add_return(1, &rttest_event);
75383 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75384 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
75385 - td->event = atomic_add_return(1, &rttest_event);
75386 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75387 td->mutexes[id] = ret ? 0 : 4;
75388 return ret ? -EINTR : 0;
75389
75390 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75391 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
75392 return ret;
75393
75394 - td->event = atomic_add_return(1, &rttest_event);
75395 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75396 rt_mutex_unlock(&mutexes[id]);
75397 - td->event = atomic_add_return(1, &rttest_event);
75398 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75399 td->mutexes[id] = 0;
75400 return 0;
75401
75402 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
75403 break;
75404
75405 td->mutexes[dat] = 2;
75406 - td->event = atomic_add_return(1, &rttest_event);
75407 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75408 break;
75409
75410 case RTTEST_LOCKBKL:
75411 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
75412 return;
75413
75414 td->mutexes[dat] = 3;
75415 - td->event = atomic_add_return(1, &rttest_event);
75416 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75417 break;
75418
75419 case RTTEST_LOCKNOWAIT:
75420 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
75421 return;
75422
75423 td->mutexes[dat] = 1;
75424 - td->event = atomic_add_return(1, &rttest_event);
75425 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75426 return;
75427
75428 case RTTEST_LOCKBKL:
75429 diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
75430 index 29bd4ba..8c5de90 100644
75431 --- a/kernel/rtmutex.c
75432 +++ b/kernel/rtmutex.c
75433 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
75434 */
75435 spin_lock_irqsave(&pendowner->pi_lock, flags);
75436
75437 - WARN_ON(!pendowner->pi_blocked_on);
75438 + BUG_ON(!pendowner->pi_blocked_on);
75439 WARN_ON(pendowner->pi_blocked_on != waiter);
75440 WARN_ON(pendowner->pi_blocked_on->lock != lock);
75441
75442 diff --git a/kernel/sched.c b/kernel/sched.c
75443 index 0591df8..e3af3a4 100644
75444 --- a/kernel/sched.c
75445 +++ b/kernel/sched.c
75446 @@ -5043,7 +5043,7 @@ out:
75447 * In CONFIG_NO_HZ case, the idle load balance owner will do the
75448 * rebalancing for all the cpus for whom scheduler ticks are stopped.
75449 */
75450 -static void run_rebalance_domains(struct softirq_action *h)
75451 +static void run_rebalance_domains(void)
75452 {
75453 int this_cpu = smp_processor_id();
75454 struct rq *this_rq = cpu_rq(this_cpu);
75455 @@ -5690,6 +5690,19 @@ pick_next_task(struct rq *rq)
75456 }
75457 }
75458
75459 +#ifdef CONFIG_GRKERNSEC_SETXID
75460 +extern void gr_delayed_cred_worker(void);
75461 +static inline void gr_cred_schedule(void)
75462 +{
75463 + if (unlikely(current->delayed_cred))
75464 + gr_delayed_cred_worker();
75465 +}
75466 +#else
75467 +static inline void gr_cred_schedule(void)
75468 +{
75469 +}
75470 +#endif
75471 +
75472 /*
75473 * schedule() is the main scheduler function.
75474 */
75475 @@ -5700,6 +5713,8 @@ asmlinkage void __sched schedule(void)
75476 struct rq *rq;
75477 int cpu;
75478
75479 + pax_track_stack();
75480 +
75481 need_resched:
75482 preempt_disable();
75483 cpu = smp_processor_id();
75484 @@ -5713,6 +5728,8 @@ need_resched_nonpreemptible:
75485
75486 schedule_debug(prev);
75487
75488 + gr_cred_schedule();
75489 +
75490 if (sched_feat(HRTICK))
75491 hrtick_clear(rq);
75492
75493 @@ -5770,7 +5787,7 @@ EXPORT_SYMBOL(schedule);
75494 * Look out! "owner" is an entirely speculative pointer
75495 * access and not reliable.
75496 */
75497 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
75498 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
75499 {
75500 unsigned int cpu;
75501 struct rq *rq;
75502 @@ -5784,10 +5801,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
75503 * DEBUG_PAGEALLOC could have unmapped it if
75504 * the mutex owner just released it and exited.
75505 */
75506 - if (probe_kernel_address(&owner->cpu, cpu))
75507 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
75508 return 0;
75509 #else
75510 - cpu = owner->cpu;
75511 + cpu = task_thread_info(owner)->cpu;
75512 #endif
75513
75514 /*
75515 @@ -5816,7 +5833,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
75516 /*
75517 * Is that owner really running on that cpu?
75518 */
75519 - if (task_thread_info(rq->curr) != owner || need_resched())
75520 + if (rq->curr != owner || need_resched())
75521 return 0;
75522
75523 cpu_relax();
75524 @@ -6359,6 +6376,8 @@ int can_nice(const struct task_struct *p, const int nice)
75525 /* convert nice value [19,-20] to rlimit style value [1,40] */
75526 int nice_rlim = 20 - nice;
75527
75528 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
75529 +
75530 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
75531 capable(CAP_SYS_NICE));
75532 }
75533 @@ -6392,7 +6411,8 @@ SYSCALL_DEFINE1(nice, int, increment)
75534 if (nice > 19)
75535 nice = 19;
75536
75537 - if (increment < 0 && !can_nice(current, nice))
75538 + if (increment < 0 && (!can_nice(current, nice) ||
75539 + gr_handle_chroot_nice()))
75540 return -EPERM;
75541
75542 retval = security_task_setnice(current, nice);
75543 @@ -8774,7 +8794,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
75544 long power;
75545 int weight;
75546
75547 - WARN_ON(!sd || !sd->groups);
75548 + BUG_ON(!sd || !sd->groups);
75549
75550 if (cpu != group_first_cpu(sd->groups))
75551 return;
75552 diff --git a/kernel/signal.c b/kernel/signal.c
75553 index 2494827..cda80a0 100644
75554 --- a/kernel/signal.c
75555 +++ b/kernel/signal.c
75556 @@ -41,12 +41,12 @@
75557
75558 static struct kmem_cache *sigqueue_cachep;
75559
75560 -static void __user *sig_handler(struct task_struct *t, int sig)
75561 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
75562 {
75563 return t->sighand->action[sig - 1].sa.sa_handler;
75564 }
75565
75566 -static int sig_handler_ignored(void __user *handler, int sig)
75567 +static int sig_handler_ignored(__sighandler_t handler, int sig)
75568 {
75569 /* Is it explicitly or implicitly ignored? */
75570 return handler == SIG_IGN ||
75571 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
75572 static int sig_task_ignored(struct task_struct *t, int sig,
75573 int from_ancestor_ns)
75574 {
75575 - void __user *handler;
75576 + __sighandler_t handler;
75577
75578 handler = sig_handler(t, sig);
75579
75580 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
75581 */
75582 user = get_uid(__task_cred(t)->user);
75583 atomic_inc(&user->sigpending);
75584 +
75585 + if (!override_rlimit)
75586 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
75587 if (override_rlimit ||
75588 atomic_read(&user->sigpending) <=
75589 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
75590 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
75591
75592 int unhandled_signal(struct task_struct *tsk, int sig)
75593 {
75594 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
75595 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
75596 if (is_global_init(tsk))
75597 return 1;
75598 if (handler != SIG_IGN && handler != SIG_DFL)
75599 @@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
75600 }
75601 }
75602
75603 + /* allow glibc communication via tgkill to other threads in our
75604 + thread group */
75605 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
75606 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
75607 + && gr_handle_signal(t, sig))
75608 + return -EPERM;
75609 +
75610 return security_task_kill(t, info, sig, 0);
75611 }
75612
75613 @@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
75614 return send_signal(sig, info, p, 1);
75615 }
75616
75617 -static int
75618 +int
75619 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75620 {
75621 return send_signal(sig, info, t, 0);
75622 @@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75623 unsigned long int flags;
75624 int ret, blocked, ignored;
75625 struct k_sigaction *action;
75626 + int is_unhandled = 0;
75627
75628 spin_lock_irqsave(&t->sighand->siglock, flags);
75629 action = &t->sighand->action[sig-1];
75630 @@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75631 }
75632 if (action->sa.sa_handler == SIG_DFL)
75633 t->signal->flags &= ~SIGNAL_UNKILLABLE;
75634 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
75635 + is_unhandled = 1;
75636 ret = specific_send_sig_info(sig, info, t);
75637 spin_unlock_irqrestore(&t->sighand->siglock, flags);
75638
75639 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
75640 + normal operation */
75641 + if (is_unhandled) {
75642 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
75643 + gr_handle_crash(t, sig);
75644 + }
75645 +
75646 return ret;
75647 }
75648
75649 @@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
75650 {
75651 int ret = check_kill_permission(sig, info, p);
75652
75653 - if (!ret && sig)
75654 + if (!ret && sig) {
75655 ret = do_send_sig_info(sig, info, p, true);
75656 + if (!ret)
75657 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
75658 + }
75659
75660 return ret;
75661 }
75662 @@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
75663 {
75664 siginfo_t info;
75665
75666 + pax_track_stack();
75667 +
75668 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
75669
75670 memset(&info, 0, sizeof info);
75671 @@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
75672 int error = -ESRCH;
75673
75674 rcu_read_lock();
75675 - p = find_task_by_vpid(pid);
75676 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
75677 + /* allow glibc communication via tgkill to other threads in our
75678 + thread group */
75679 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
75680 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
75681 + p = find_task_by_vpid_unrestricted(pid);
75682 + else
75683 +#endif
75684 + p = find_task_by_vpid(pid);
75685 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
75686 error = check_kill_permission(sig, info, p);
75687 /*
75688 diff --git a/kernel/smp.c b/kernel/smp.c
75689 index aa9cff3..631a0de 100644
75690 --- a/kernel/smp.c
75691 +++ b/kernel/smp.c
75692 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
75693 }
75694 EXPORT_SYMBOL(smp_call_function);
75695
75696 -void ipi_call_lock(void)
75697 +void ipi_call_lock(void) __acquires(call_function.lock)
75698 {
75699 spin_lock(&call_function.lock);
75700 }
75701
75702 -void ipi_call_unlock(void)
75703 +void ipi_call_unlock(void) __releases(call_function.lock)
75704 {
75705 spin_unlock(&call_function.lock);
75706 }
75707
75708 -void ipi_call_lock_irq(void)
75709 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
75710 {
75711 spin_lock_irq(&call_function.lock);
75712 }
75713
75714 -void ipi_call_unlock_irq(void)
75715 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
75716 {
75717 spin_unlock_irq(&call_function.lock);
75718 }
75719 diff --git a/kernel/softirq.c b/kernel/softirq.c
75720 index 04a0252..580c512 100644
75721 --- a/kernel/softirq.c
75722 +++ b/kernel/softirq.c
75723 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
75724
75725 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
75726
75727 -char *softirq_to_name[NR_SOFTIRQS] = {
75728 +const char * const softirq_to_name[NR_SOFTIRQS] = {
75729 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
75730 "TASKLET", "SCHED", "HRTIMER", "RCU"
75731 };
75732 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
75733
75734 asmlinkage void __do_softirq(void)
75735 {
75736 - struct softirq_action *h;
75737 + const struct softirq_action *h;
75738 __u32 pending;
75739 int max_restart = MAX_SOFTIRQ_RESTART;
75740 int cpu;
75741 @@ -233,7 +233,7 @@ restart:
75742 kstat_incr_softirqs_this_cpu(h - softirq_vec);
75743
75744 trace_softirq_entry(h, softirq_vec);
75745 - h->action(h);
75746 + h->action();
75747 trace_softirq_exit(h, softirq_vec);
75748 if (unlikely(prev_count != preempt_count())) {
75749 printk(KERN_ERR "huh, entered softirq %td %s %p"
75750 @@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
75751 local_irq_restore(flags);
75752 }
75753
75754 -void open_softirq(int nr, void (*action)(struct softirq_action *))
75755 +void open_softirq(int nr, void (*action)(void))
75756 {
75757 - softirq_vec[nr].action = action;
75758 + pax_open_kernel();
75759 + *(void **)&softirq_vec[nr].action = action;
75760 + pax_close_kernel();
75761 }
75762
75763 /*
75764 @@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
75765
75766 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
75767
75768 -static void tasklet_action(struct softirq_action *a)
75769 +static void tasklet_action(void)
75770 {
75771 struct tasklet_struct *list;
75772
75773 @@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
75774 }
75775 }
75776
75777 -static void tasklet_hi_action(struct softirq_action *a)
75778 +static void tasklet_hi_action(void)
75779 {
75780 struct tasklet_struct *list;
75781
75782 diff --git a/kernel/sys.c b/kernel/sys.c
75783 index e9512b1..f07185f 100644
75784 --- a/kernel/sys.c
75785 +++ b/kernel/sys.c
75786 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
75787 error = -EACCES;
75788 goto out;
75789 }
75790 +
75791 + if (gr_handle_chroot_setpriority(p, niceval)) {
75792 + error = -EACCES;
75793 + goto out;
75794 + }
75795 +
75796 no_nice = security_task_setnice(p, niceval);
75797 if (no_nice) {
75798 error = no_nice;
75799 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
75800 !(user = find_user(who)))
75801 goto out_unlock; /* No processes for this user */
75802
75803 - do_each_thread(g, p)
75804 + do_each_thread(g, p) {
75805 if (__task_cred(p)->uid == who)
75806 error = set_one_prio(p, niceval, error);
75807 - while_each_thread(g, p);
75808 + } while_each_thread(g, p);
75809 if (who != cred->uid)
75810 free_uid(user); /* For find_user() */
75811 break;
75812 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
75813 !(user = find_user(who)))
75814 goto out_unlock; /* No processes for this user */
75815
75816 - do_each_thread(g, p)
75817 + do_each_thread(g, p) {
75818 if (__task_cred(p)->uid == who) {
75819 niceval = 20 - task_nice(p);
75820 if (niceval > retval)
75821 retval = niceval;
75822 }
75823 - while_each_thread(g, p);
75824 + } while_each_thread(g, p);
75825 if (who != cred->uid)
75826 free_uid(user); /* for find_user() */
75827 break;
75828 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
75829 goto error;
75830 }
75831
75832 + if (gr_check_group_change(new->gid, new->egid, -1))
75833 + goto error;
75834 +
75835 if (rgid != (gid_t) -1 ||
75836 (egid != (gid_t) -1 && egid != old->gid))
75837 new->sgid = new->egid;
75838 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
75839 goto error;
75840
75841 retval = -EPERM;
75842 +
75843 + if (gr_check_group_change(gid, gid, gid))
75844 + goto error;
75845 +
75846 if (capable(CAP_SETGID))
75847 new->gid = new->egid = new->sgid = new->fsgid = gid;
75848 else if (gid == old->gid || gid == old->sgid)
75849 @@ -559,7 +572,7 @@ error:
75850 /*
75851 * change the user struct in a credentials set to match the new UID
75852 */
75853 -static int set_user(struct cred *new)
75854 +int set_user(struct cred *new)
75855 {
75856 struct user_struct *new_user;
75857
75858 @@ -567,12 +580,19 @@ static int set_user(struct cred *new)
75859 if (!new_user)
75860 return -EAGAIN;
75861
75862 + /*
75863 + * We don't fail in case of NPROC limit excess here because too many
75864 + * poorly written programs don't check set*uid() return code, assuming
75865 + * it never fails if called by root. We may still enforce NPROC limit
75866 + * for programs doing set*uid()+execve() by harmlessly deferring the
75867 + * failure to the execve() stage.
75868 + */
75869 if (atomic_read(&new_user->processes) >=
75870 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
75871 - new_user != INIT_USER) {
75872 - free_uid(new_user);
75873 - return -EAGAIN;
75874 - }
75875 + new_user != INIT_USER)
75876 + current->flags |= PF_NPROC_EXCEEDED;
75877 + else
75878 + current->flags &= ~PF_NPROC_EXCEEDED;
75879
75880 free_uid(new->user);
75881 new->user = new_user;
75882 @@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
75883 goto error;
75884 }
75885
75886 + if (gr_check_user_change(new->uid, new->euid, -1))
75887 + goto error;
75888 +
75889 if (new->uid != old->uid) {
75890 retval = set_user(new);
75891 if (retval < 0)
75892 @@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
75893 goto error;
75894
75895 retval = -EPERM;
75896 +
75897 + if (gr_check_crash_uid(uid))
75898 + goto error;
75899 + if (gr_check_user_change(uid, uid, uid))
75900 + goto error;
75901 +
75902 if (capable(CAP_SETUID)) {
75903 new->suid = new->uid = uid;
75904 if (uid != old->uid) {
75905 @@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
75906 goto error;
75907 }
75908
75909 + if (gr_check_user_change(ruid, euid, -1))
75910 + goto error;
75911 +
75912 if (ruid != (uid_t) -1) {
75913 new->uid = ruid;
75914 if (ruid != old->uid) {
75915 @@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
75916 goto error;
75917 }
75918
75919 + if (gr_check_group_change(rgid, egid, -1))
75920 + goto error;
75921 +
75922 if (rgid != (gid_t) -1)
75923 new->gid = rgid;
75924 if (egid != (gid_t) -1)
75925 @@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
75926 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
75927 goto error;
75928
75929 + if (gr_check_user_change(-1, -1, uid))
75930 + goto error;
75931 +
75932 if (uid == old->uid || uid == old->euid ||
75933 uid == old->suid || uid == old->fsuid ||
75934 capable(CAP_SETUID)) {
75935 @@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
75936 if (gid == old->gid || gid == old->egid ||
75937 gid == old->sgid || gid == old->fsgid ||
75938 capable(CAP_SETGID)) {
75939 + if (gr_check_group_change(-1, -1, gid))
75940 + goto error;
75941 +
75942 if (gid != old_fsgid) {
75943 new->fsgid = gid;
75944 goto change_okay;
75945 @@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
75946 error = get_dumpable(me->mm);
75947 break;
75948 case PR_SET_DUMPABLE:
75949 - if (arg2 < 0 || arg2 > 1) {
75950 + if (arg2 > 1) {
75951 error = -EINVAL;
75952 break;
75953 }
75954 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
75955 index b8bd058..ab6a76be 100644
75956 --- a/kernel/sysctl.c
75957 +++ b/kernel/sysctl.c
75958 @@ -63,6 +63,13 @@
75959 static int deprecated_sysctl_warning(struct __sysctl_args *args);
75960
75961 #if defined(CONFIG_SYSCTL)
75962 +#include <linux/grsecurity.h>
75963 +#include <linux/grinternal.h>
75964 +
75965 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
75966 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
75967 + const int op);
75968 +extern int gr_handle_chroot_sysctl(const int op);
75969
75970 /* External variables not in a header file. */
75971 extern int C_A_D;
75972 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
75973 static int proc_taint(struct ctl_table *table, int write,
75974 void __user *buffer, size_t *lenp, loff_t *ppos);
75975 #endif
75976 +extern ctl_table grsecurity_table[];
75977
75978 static struct ctl_table root_table[];
75979 static struct ctl_table_root sysctl_table_root;
75980 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
75981 int sysctl_legacy_va_layout;
75982 #endif
75983
75984 +#ifdef CONFIG_PAX_SOFTMODE
75985 +static ctl_table pax_table[] = {
75986 + {
75987 + .ctl_name = CTL_UNNUMBERED,
75988 + .procname = "softmode",
75989 + .data = &pax_softmode,
75990 + .maxlen = sizeof(unsigned int),
75991 + .mode = 0600,
75992 + .proc_handler = &proc_dointvec,
75993 + },
75994 +
75995 + { .ctl_name = 0 }
75996 +};
75997 +#endif
75998 +
75999 extern int prove_locking;
76000 extern int lock_stat;
76001
76002 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
76003 #endif
76004
76005 static struct ctl_table kern_table[] = {
76006 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
76007 + {
76008 + .ctl_name = CTL_UNNUMBERED,
76009 + .procname = "grsecurity",
76010 + .mode = 0500,
76011 + .child = grsecurity_table,
76012 + },
76013 +#endif
76014 +
76015 +#ifdef CONFIG_PAX_SOFTMODE
76016 + {
76017 + .ctl_name = CTL_UNNUMBERED,
76018 + .procname = "pax",
76019 + .mode = 0500,
76020 + .child = pax_table,
76021 + },
76022 +#endif
76023 +
76024 {
76025 .ctl_name = CTL_UNNUMBERED,
76026 .procname = "sched_child_runs_first",
76027 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
76028 .data = &modprobe_path,
76029 .maxlen = KMOD_PATH_LEN,
76030 .mode = 0644,
76031 - .proc_handler = &proc_dostring,
76032 - .strategy = &sysctl_string,
76033 + .proc_handler = &proc_dostring_modpriv,
76034 + .strategy = &sysctl_string_modpriv,
76035 },
76036 {
76037 .ctl_name = CTL_UNNUMBERED,
76038 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
76039 .mode = 0644,
76040 .proc_handler = &proc_dointvec
76041 },
76042 + {
76043 + .procname = "heap_stack_gap",
76044 + .data = &sysctl_heap_stack_gap,
76045 + .maxlen = sizeof(sysctl_heap_stack_gap),
76046 + .mode = 0644,
76047 + .proc_handler = proc_doulongvec_minmax,
76048 + },
76049 #else
76050 {
76051 .ctl_name = CTL_UNNUMBERED,
76052 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
76053 return 0;
76054 }
76055
76056 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
76057 +
76058 static int parse_table(int __user *name, int nlen,
76059 void __user *oldval, size_t __user *oldlenp,
76060 void __user *newval, size_t newlen,
76061 @@ -1821,7 +1871,7 @@ repeat:
76062 if (n == table->ctl_name) {
76063 int error;
76064 if (table->child) {
76065 - if (sysctl_perm(root, table, MAY_EXEC))
76066 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
76067 return -EPERM;
76068 name++;
76069 nlen--;
76070 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
76071 int error;
76072 int mode;
76073
76074 + if (table->parent != NULL && table->parent->procname != NULL &&
76075 + table->procname != NULL &&
76076 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
76077 + return -EACCES;
76078 + if (gr_handle_chroot_sysctl(op))
76079 + return -EACCES;
76080 + error = gr_handle_sysctl(table, op);
76081 + if (error)
76082 + return error;
76083 +
76084 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
76085 + if (error)
76086 + return error;
76087 +
76088 + if (root->permissions)
76089 + mode = root->permissions(root, current->nsproxy, table);
76090 + else
76091 + mode = table->mode;
76092 +
76093 + return test_perm(mode, op);
76094 +}
76095 +
76096 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
76097 +{
76098 + int error;
76099 + int mode;
76100 +
76101 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
76102 if (error)
76103 return error;
76104 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
76105 buffer, lenp, ppos);
76106 }
76107
76108 +int proc_dostring_modpriv(struct ctl_table *table, int write,
76109 + void __user *buffer, size_t *lenp, loff_t *ppos)
76110 +{
76111 + if (write && !capable(CAP_SYS_MODULE))
76112 + return -EPERM;
76113 +
76114 + return _proc_do_string(table->data, table->maxlen, write,
76115 + buffer, lenp, ppos);
76116 +}
76117 +
76118
76119 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
76120 int *valp,
76121 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
76122 vleft = table->maxlen / sizeof(unsigned long);
76123 left = *lenp;
76124
76125 - for (; left && vleft--; i++, min++, max++, first=0) {
76126 + for (; left && vleft--; i++, first=0) {
76127 if (write) {
76128 while (left) {
76129 char c;
76130 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
76131 return -ENOSYS;
76132 }
76133
76134 +int proc_dostring_modpriv(struct ctl_table *table, int write,
76135 + void __user *buffer, size_t *lenp, loff_t *ppos)
76136 +{
76137 + return -ENOSYS;
76138 +}
76139 +
76140 int proc_dointvec(struct ctl_table *table, int write,
76141 void __user *buffer, size_t *lenp, loff_t *ppos)
76142 {
76143 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
76144 return 1;
76145 }
76146
76147 +int sysctl_string_modpriv(struct ctl_table *table,
76148 + void __user *oldval, size_t __user *oldlenp,
76149 + void __user *newval, size_t newlen)
76150 +{
76151 + if (newval && newlen && !capable(CAP_SYS_MODULE))
76152 + return -EPERM;
76153 +
76154 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
76155 +}
76156 +
76157 /*
76158 * This function makes sure that all of the integers in the vector
76159 * are between the minimum and maximum values given in the arrays
76160 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
76161 return -ENOSYS;
76162 }
76163
76164 +int sysctl_string_modpriv(struct ctl_table *table,
76165 + void __user *oldval, size_t __user *oldlenp,
76166 + void __user *newval, size_t newlen)
76167 +{
76168 + return -ENOSYS;
76169 +}
76170 +
76171 int sysctl_intvec(struct ctl_table *table,
76172 void __user *oldval, size_t __user *oldlenp,
76173 void __user *newval, size_t newlen)
76174 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
76175 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
76176 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
76177 EXPORT_SYMBOL(proc_dostring);
76178 +EXPORT_SYMBOL(proc_dostring_modpriv);
76179 EXPORT_SYMBOL(proc_doulongvec_minmax);
76180 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
76181 EXPORT_SYMBOL(register_sysctl_table);
76182 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
76183 EXPORT_SYMBOL(sysctl_jiffies);
76184 EXPORT_SYMBOL(sysctl_ms_jiffies);
76185 EXPORT_SYMBOL(sysctl_string);
76186 +EXPORT_SYMBOL(sysctl_string_modpriv);
76187 EXPORT_SYMBOL(sysctl_data);
76188 EXPORT_SYMBOL(unregister_sysctl_table);
76189 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
76190 index 469193c..ea3ecb2 100644
76191 --- a/kernel/sysctl_check.c
76192 +++ b/kernel/sysctl_check.c
76193 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
76194 } else {
76195 if ((table->strategy == sysctl_data) ||
76196 (table->strategy == sysctl_string) ||
76197 + (table->strategy == sysctl_string_modpriv) ||
76198 (table->strategy == sysctl_intvec) ||
76199 (table->strategy == sysctl_jiffies) ||
76200 (table->strategy == sysctl_ms_jiffies) ||
76201 (table->proc_handler == proc_dostring) ||
76202 + (table->proc_handler == proc_dostring_modpriv) ||
76203 (table->proc_handler == proc_dointvec) ||
76204 (table->proc_handler == proc_dointvec_minmax) ||
76205 (table->proc_handler == proc_dointvec_jiffies) ||
76206 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
76207 index a4ef542..798bcd7 100644
76208 --- a/kernel/taskstats.c
76209 +++ b/kernel/taskstats.c
76210 @@ -26,9 +26,12 @@
76211 #include <linux/cgroup.h>
76212 #include <linux/fs.h>
76213 #include <linux/file.h>
76214 +#include <linux/grsecurity.h>
76215 #include <net/genetlink.h>
76216 #include <asm/atomic.h>
76217
76218 +extern int gr_is_taskstats_denied(int pid);
76219 +
76220 /*
76221 * Maximum length of a cpumask that can be specified in
76222 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
76223 @@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
76224 size_t size;
76225 cpumask_var_t mask;
76226
76227 + if (gr_is_taskstats_denied(current->pid))
76228 + return -EACCES;
76229 +
76230 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
76231 return -ENOMEM;
76232
76233 diff --git a/kernel/time.c b/kernel/time.c
76234 index 33df60e..ca768bd 100644
76235 --- a/kernel/time.c
76236 +++ b/kernel/time.c
76237 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
76238 return error;
76239
76240 if (tz) {
76241 + /* we log in do_settimeofday called below, so don't log twice
76242 + */
76243 + if (!tv)
76244 + gr_log_timechange();
76245 +
76246 /* SMP safe, global irq locking makes it work. */
76247 sys_tz = *tz;
76248 update_vsyscall_tz();
76249 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
76250 * Avoid unnecessary multiplications/divisions in the
76251 * two most common HZ cases:
76252 */
76253 -unsigned int inline jiffies_to_msecs(const unsigned long j)
76254 +inline unsigned int jiffies_to_msecs(const unsigned long j)
76255 {
76256 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
76257 return (MSEC_PER_SEC / HZ) * j;
76258 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
76259 }
76260 EXPORT_SYMBOL(jiffies_to_msecs);
76261
76262 -unsigned int inline jiffies_to_usecs(const unsigned long j)
76263 +inline unsigned int jiffies_to_usecs(const unsigned long j)
76264 {
76265 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
76266 return (USEC_PER_SEC / HZ) * j;
76267 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
76268 index 57b953f..06f149f 100644
76269 --- a/kernel/time/tick-broadcast.c
76270 +++ b/kernel/time/tick-broadcast.c
76271 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
76272 * then clear the broadcast bit.
76273 */
76274 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
76275 - int cpu = smp_processor_id();
76276 + cpu = smp_processor_id();
76277
76278 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
76279 tick_broadcast_clear_oneshot(cpu);
76280 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
76281 index 4a71cff..ffb5548 100644
76282 --- a/kernel/time/timekeeping.c
76283 +++ b/kernel/time/timekeeping.c
76284 @@ -14,6 +14,7 @@
76285 #include <linux/init.h>
76286 #include <linux/mm.h>
76287 #include <linux/sched.h>
76288 +#include <linux/grsecurity.h>
76289 #include <linux/sysdev.h>
76290 #include <linux/clocksource.h>
76291 #include <linux/jiffies.h>
76292 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
76293 */
76294 struct timespec ts = xtime;
76295 timespec_add_ns(&ts, nsec);
76296 - ACCESS_ONCE(xtime_cache) = ts;
76297 + ACCESS_ONCE_RW(xtime_cache) = ts;
76298 }
76299
76300 /* must hold xtime_lock */
76301 @@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv)
76302 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
76303 return -EINVAL;
76304
76305 + gr_log_timechange();
76306 +
76307 write_seqlock_irqsave(&xtime_lock, flags);
76308
76309 timekeeping_forward_now();
76310 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
76311 index 54c0dda..e9095d9 100644
76312 --- a/kernel/time/timer_list.c
76313 +++ b/kernel/time/timer_list.c
76314 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
76315
76316 static void print_name_offset(struct seq_file *m, void *sym)
76317 {
76318 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76319 + SEQ_printf(m, "<%p>", NULL);
76320 +#else
76321 char symname[KSYM_NAME_LEN];
76322
76323 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
76324 SEQ_printf(m, "<%p>", sym);
76325 else
76326 SEQ_printf(m, "%s", symname);
76327 +#endif
76328 }
76329
76330 static void
76331 @@ -112,7 +116,11 @@ next_one:
76332 static void
76333 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
76334 {
76335 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76336 + SEQ_printf(m, " .base: %p\n", NULL);
76337 +#else
76338 SEQ_printf(m, " .base: %p\n", base);
76339 +#endif
76340 SEQ_printf(m, " .index: %d\n",
76341 base->index);
76342 SEQ_printf(m, " .resolution: %Lu nsecs\n",
76343 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
76344 {
76345 struct proc_dir_entry *pe;
76346
76347 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
76348 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
76349 +#else
76350 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
76351 +#endif
76352 if (!pe)
76353 return -ENOMEM;
76354 return 0;
76355 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
76356 index ee5681f..634089b 100644
76357 --- a/kernel/time/timer_stats.c
76358 +++ b/kernel/time/timer_stats.c
76359 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
76360 static unsigned long nr_entries;
76361 static struct entry entries[MAX_ENTRIES];
76362
76363 -static atomic_t overflow_count;
76364 +static atomic_unchecked_t overflow_count;
76365
76366 /*
76367 * The entries are in a hash-table, for fast lookup:
76368 @@ -140,7 +140,7 @@ static void reset_entries(void)
76369 nr_entries = 0;
76370 memset(entries, 0, sizeof(entries));
76371 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
76372 - atomic_set(&overflow_count, 0);
76373 + atomic_set_unchecked(&overflow_count, 0);
76374 }
76375
76376 static struct entry *alloc_entry(void)
76377 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
76378 if (likely(entry))
76379 entry->count++;
76380 else
76381 - atomic_inc(&overflow_count);
76382 + atomic_inc_unchecked(&overflow_count);
76383
76384 out_unlock:
76385 spin_unlock_irqrestore(lock, flags);
76386 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
76387
76388 static void print_name_offset(struct seq_file *m, unsigned long addr)
76389 {
76390 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76391 + seq_printf(m, "<%p>", NULL);
76392 +#else
76393 char symname[KSYM_NAME_LEN];
76394
76395 if (lookup_symbol_name(addr, symname) < 0)
76396 seq_printf(m, "<%p>", (void *)addr);
76397 else
76398 seq_printf(m, "%s", symname);
76399 +#endif
76400 }
76401
76402 static int tstats_show(struct seq_file *m, void *v)
76403 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
76404
76405 seq_puts(m, "Timer Stats Version: v0.2\n");
76406 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
76407 - if (atomic_read(&overflow_count))
76408 + if (atomic_read_unchecked(&overflow_count))
76409 seq_printf(m, "Overflow: %d entries\n",
76410 - atomic_read(&overflow_count));
76411 + atomic_read_unchecked(&overflow_count));
76412
76413 for (i = 0; i < nr_entries; i++) {
76414 entry = entries + i;
76415 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
76416 {
76417 struct proc_dir_entry *pe;
76418
76419 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
76420 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
76421 +#else
76422 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
76423 +#endif
76424 if (!pe)
76425 return -ENOMEM;
76426 return 0;
76427 diff --git a/kernel/timer.c b/kernel/timer.c
76428 index cb3c1f1..8bf5526 100644
76429 --- a/kernel/timer.c
76430 +++ b/kernel/timer.c
76431 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
76432 /*
76433 * This function runs timers and the timer-tq in bottom half context.
76434 */
76435 -static void run_timer_softirq(struct softirq_action *h)
76436 +static void run_timer_softirq(void)
76437 {
76438 struct tvec_base *base = __get_cpu_var(tvec_bases);
76439
76440 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
76441 index d9d6206..f19467e 100644
76442 --- a/kernel/trace/blktrace.c
76443 +++ b/kernel/trace/blktrace.c
76444 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
76445 struct blk_trace *bt = filp->private_data;
76446 char buf[16];
76447
76448 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
76449 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
76450
76451 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
76452 }
76453 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
76454 return 1;
76455
76456 bt = buf->chan->private_data;
76457 - atomic_inc(&bt->dropped);
76458 + atomic_inc_unchecked(&bt->dropped);
76459 return 0;
76460 }
76461
76462 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
76463
76464 bt->dir = dir;
76465 bt->dev = dev;
76466 - atomic_set(&bt->dropped, 0);
76467 + atomic_set_unchecked(&bt->dropped, 0);
76468
76469 ret = -EIO;
76470 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
76471 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
76472 index 4872937..c794d40 100644
76473 --- a/kernel/trace/ftrace.c
76474 +++ b/kernel/trace/ftrace.c
76475 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
76476
76477 ip = rec->ip;
76478
76479 + ret = ftrace_arch_code_modify_prepare();
76480 + FTRACE_WARN_ON(ret);
76481 + if (ret)
76482 + return 0;
76483 +
76484 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
76485 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
76486 if (ret) {
76487 ftrace_bug(ret, ip);
76488 rec->flags |= FTRACE_FL_FAILED;
76489 - return 0;
76490 }
76491 - return 1;
76492 + return ret ? 0 : 1;
76493 }
76494
76495 /*
76496 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
76497 index e749a05..19c6e94 100644
76498 --- a/kernel/trace/ring_buffer.c
76499 +++ b/kernel/trace/ring_buffer.c
76500 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
76501 * the reader page). But if the next page is a header page,
76502 * its flags will be non zero.
76503 */
76504 -static int inline
76505 +static inline int
76506 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
76507 struct buffer_page *page, struct list_head *list)
76508 {
76509 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
76510 index a2a2d1f..7f32b09 100644
76511 --- a/kernel/trace/trace.c
76512 +++ b/kernel/trace/trace.c
76513 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
76514 size_t rem;
76515 unsigned int i;
76516
76517 + pax_track_stack();
76518 +
76519 /* copy the tracer to avoid using a global lock all around */
76520 mutex_lock(&trace_types_lock);
76521 if (unlikely(old_tracer != current_trace && current_trace)) {
76522 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
76523 int entries, size, i;
76524 size_t ret;
76525
76526 + pax_track_stack();
76527 +
76528 if (*ppos & (PAGE_SIZE - 1)) {
76529 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
76530 return -EINVAL;
76531 @@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
76532 };
76533 #endif
76534
76535 -static struct dentry *d_tracer;
76536 -
76537 struct dentry *tracing_init_dentry(void)
76538 {
76539 + static struct dentry *d_tracer;
76540 static int once;
76541
76542 if (d_tracer)
76543 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
76544 return d_tracer;
76545 }
76546
76547 -static struct dentry *d_percpu;
76548 -
76549 struct dentry *tracing_dentry_percpu(void)
76550 {
76551 + static struct dentry *d_percpu;
76552 static int once;
76553 struct dentry *d_tracer;
76554
76555 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
76556 index d128f65..f37b4af 100644
76557 --- a/kernel/trace/trace_events.c
76558 +++ b/kernel/trace/trace_events.c
76559 @@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
76560 * Modules must own their file_operations to keep up with
76561 * reference counting.
76562 */
76563 +
76564 struct ftrace_module_file_ops {
76565 struct list_head list;
76566 struct module *mod;
76567 - struct file_operations id;
76568 - struct file_operations enable;
76569 - struct file_operations format;
76570 - struct file_operations filter;
76571 };
76572
76573 static void remove_subsystem_dir(const char *name)
76574 @@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
76575
76576 file_ops->mod = mod;
76577
76578 - file_ops->id = ftrace_event_id_fops;
76579 - file_ops->id.owner = mod;
76580 -
76581 - file_ops->enable = ftrace_enable_fops;
76582 - file_ops->enable.owner = mod;
76583 -
76584 - file_ops->filter = ftrace_event_filter_fops;
76585 - file_ops->filter.owner = mod;
76586 -
76587 - file_ops->format = ftrace_event_format_fops;
76588 - file_ops->format.owner = mod;
76589 + pax_open_kernel();
76590 + *(void **)&mod->trace_id.owner = mod;
76591 + *(void **)&mod->trace_enable.owner = mod;
76592 + *(void **)&mod->trace_filter.owner = mod;
76593 + *(void **)&mod->trace_format.owner = mod;
76594 + pax_close_kernel();
76595
76596 list_add(&file_ops->list, &ftrace_module_file_list);
76597
76598 @@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
76599 call->mod = mod;
76600 list_add(&call->list, &ftrace_events);
76601 event_create_dir(call, d_events,
76602 - &file_ops->id, &file_ops->enable,
76603 - &file_ops->filter, &file_ops->format);
76604 + &mod->trace_id, &mod->trace_enable,
76605 + &mod->trace_filter, &mod->trace_format);
76606 }
76607 }
76608
76609 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
76610 index 0acd834..b800b56 100644
76611 --- a/kernel/trace/trace_mmiotrace.c
76612 +++ b/kernel/trace/trace_mmiotrace.c
76613 @@ -23,7 +23,7 @@ struct header_iter {
76614 static struct trace_array *mmio_trace_array;
76615 static bool overrun_detected;
76616 static unsigned long prev_overruns;
76617 -static atomic_t dropped_count;
76618 +static atomic_unchecked_t dropped_count;
76619
76620 static void mmio_reset_data(struct trace_array *tr)
76621 {
76622 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
76623
76624 static unsigned long count_overruns(struct trace_iterator *iter)
76625 {
76626 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
76627 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
76628 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
76629
76630 if (over > prev_overruns)
76631 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
76632 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
76633 sizeof(*entry), 0, pc);
76634 if (!event) {
76635 - atomic_inc(&dropped_count);
76636 + atomic_inc_unchecked(&dropped_count);
76637 return;
76638 }
76639 entry = ring_buffer_event_data(event);
76640 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
76641 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
76642 sizeof(*entry), 0, pc);
76643 if (!event) {
76644 - atomic_inc(&dropped_count);
76645 + atomic_inc_unchecked(&dropped_count);
76646 return;
76647 }
76648 entry = ring_buffer_event_data(event);
76649 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
76650 index b6c12c6..41fdc53 100644
76651 --- a/kernel/trace/trace_output.c
76652 +++ b/kernel/trace/trace_output.c
76653 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
76654 return 0;
76655 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
76656 if (!IS_ERR(p)) {
76657 - p = mangle_path(s->buffer + s->len, p, "\n");
76658 + p = mangle_path(s->buffer + s->len, p, "\n\\");
76659 if (p) {
76660 s->len = p - s->buffer;
76661 return 1;
76662 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
76663 index 8504ac7..ecf0adb 100644
76664 --- a/kernel/trace/trace_stack.c
76665 +++ b/kernel/trace/trace_stack.c
76666 @@ -50,7 +50,7 @@ static inline void check_stack(void)
76667 return;
76668
76669 /* we do not handle interrupt stacks yet */
76670 - if (!object_is_on_stack(&this_size))
76671 + if (!object_starts_on_stack(&this_size))
76672 return;
76673
76674 local_irq_save(flags);
76675 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
76676 index 40cafb0..d5ead43 100644
76677 --- a/kernel/trace/trace_workqueue.c
76678 +++ b/kernel/trace/trace_workqueue.c
76679 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
76680 int cpu;
76681 pid_t pid;
76682 /* Can be inserted from interrupt or user context, need to be atomic */
76683 - atomic_t inserted;
76684 + atomic_unchecked_t inserted;
76685 /*
76686 * Don't need to be atomic, works are serialized in a single workqueue thread
76687 * on a single CPU.
76688 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
76689 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
76690 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
76691 if (node->pid == wq_thread->pid) {
76692 - atomic_inc(&node->inserted);
76693 + atomic_inc_unchecked(&node->inserted);
76694 goto found;
76695 }
76696 }
76697 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
76698 tsk = get_pid_task(pid, PIDTYPE_PID);
76699 if (tsk) {
76700 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
76701 - atomic_read(&cws->inserted), cws->executed,
76702 + atomic_read_unchecked(&cws->inserted), cws->executed,
76703 tsk->comm);
76704 put_task_struct(tsk);
76705 }
76706 diff --git a/kernel/user.c b/kernel/user.c
76707 index 1b91701..8795237 100644
76708 --- a/kernel/user.c
76709 +++ b/kernel/user.c
76710 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
76711 spin_lock_irq(&uidhash_lock);
76712 up = uid_hash_find(uid, hashent);
76713 if (up) {
76714 + put_user_ns(ns);
76715 key_put(new->uid_keyring);
76716 key_put(new->session_keyring);
76717 kmem_cache_free(uid_cachep, new);
76718 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
76719 index 234ceb1..ad74049 100644
76720 --- a/lib/Kconfig.debug
76721 +++ b/lib/Kconfig.debug
76722 @@ -905,7 +905,7 @@ config LATENCYTOP
76723 select STACKTRACE
76724 select SCHEDSTATS
76725 select SCHED_DEBUG
76726 - depends on HAVE_LATENCYTOP_SUPPORT
76727 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
76728 help
76729 Enable this option if you want to use the LatencyTOP tool
76730 to find out which userspace is blocking on what kernel operations.
76731 diff --git a/lib/bitmap.c b/lib/bitmap.c
76732 index 7025658..8d14cab 100644
76733 --- a/lib/bitmap.c
76734 +++ b/lib/bitmap.c
76735 @@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
76736 {
76737 int c, old_c, totaldigits, ndigits, nchunks, nbits;
76738 u32 chunk;
76739 - const char __user *ubuf = buf;
76740 + const char __user *ubuf = (const char __force_user *)buf;
76741
76742 bitmap_zero(maskp, nmaskbits);
76743
76744 @@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
76745 {
76746 if (!access_ok(VERIFY_READ, ubuf, ulen))
76747 return -EFAULT;
76748 - return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
76749 + return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
76750 }
76751 EXPORT_SYMBOL(bitmap_parse_user);
76752
76753 diff --git a/lib/bug.c b/lib/bug.c
76754 index 300e41a..2779eb0 100644
76755 --- a/lib/bug.c
76756 +++ b/lib/bug.c
76757 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
76758 return BUG_TRAP_TYPE_NONE;
76759
76760 bug = find_bug(bugaddr);
76761 + if (!bug)
76762 + return BUG_TRAP_TYPE_NONE;
76763
76764 printk(KERN_EMERG "------------[ cut here ]------------\n");
76765
76766 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
76767 index 2b413db..e21d207 100644
76768 --- a/lib/debugobjects.c
76769 +++ b/lib/debugobjects.c
76770 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
76771 if (limit > 4)
76772 return;
76773
76774 - is_on_stack = object_is_on_stack(addr);
76775 + is_on_stack = object_starts_on_stack(addr);
76776 if (is_on_stack == onstack)
76777 return;
76778
76779 diff --git a/lib/devres.c b/lib/devres.c
76780 index 72c8909..7543868 100644
76781 --- a/lib/devres.c
76782 +++ b/lib/devres.c
76783 @@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
76784 {
76785 iounmap(addr);
76786 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
76787 - (void *)addr));
76788 + (void __force *)addr));
76789 }
76790 EXPORT_SYMBOL(devm_iounmap);
76791
76792 @@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
76793 {
76794 ioport_unmap(addr);
76795 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
76796 - devm_ioport_map_match, (void *)addr));
76797 + devm_ioport_map_match, (void __force *)addr));
76798 }
76799 EXPORT_SYMBOL(devm_ioport_unmap);
76800
76801 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
76802 index 084e879..0674448 100644
76803 --- a/lib/dma-debug.c
76804 +++ b/lib/dma-debug.c
76805 @@ -861,7 +861,7 @@ out:
76806
76807 static void check_for_stack(struct device *dev, void *addr)
76808 {
76809 - if (object_is_on_stack(addr))
76810 + if (object_starts_on_stack(addr))
76811 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
76812 "stack [addr=%p]\n", addr);
76813 }
76814 diff --git a/lib/idr.c b/lib/idr.c
76815 index eda7ba3..915dfae 100644
76816 --- a/lib/idr.c
76817 +++ b/lib/idr.c
76818 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
76819 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
76820
76821 /* if already at the top layer, we need to grow */
76822 - if (id >= 1 << (idp->layers * IDR_BITS)) {
76823 + if (id >= (1 << (idp->layers * IDR_BITS))) {
76824 *starting_id = id;
76825 return IDR_NEED_TO_GROW;
76826 }
76827 diff --git a/lib/inflate.c b/lib/inflate.c
76828 index d102559..4215f31 100644
76829 --- a/lib/inflate.c
76830 +++ b/lib/inflate.c
76831 @@ -266,7 +266,7 @@ static void free(void *where)
76832 malloc_ptr = free_mem_ptr;
76833 }
76834 #else
76835 -#define malloc(a) kmalloc(a, GFP_KERNEL)
76836 +#define malloc(a) kmalloc((a), GFP_KERNEL)
76837 #define free(a) kfree(a)
76838 #endif
76839
76840 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
76841 index bd2bea9..6b3c95e 100644
76842 --- a/lib/is_single_threaded.c
76843 +++ b/lib/is_single_threaded.c
76844 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
76845 struct task_struct *p, *t;
76846 bool ret;
76847
76848 + if (!mm)
76849 + return true;
76850 +
76851 if (atomic_read(&task->signal->live) != 1)
76852 return false;
76853
76854 diff --git a/lib/kobject.c b/lib/kobject.c
76855 index b512b74..8115eb1 100644
76856 --- a/lib/kobject.c
76857 +++ b/lib/kobject.c
76858 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
76859 return ret;
76860 }
76861
76862 -struct sysfs_ops kobj_sysfs_ops = {
76863 +const struct sysfs_ops kobj_sysfs_ops = {
76864 .show = kobj_attr_show,
76865 .store = kobj_attr_store,
76866 };
76867 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
76868 * If the kset was not able to be created, NULL will be returned.
76869 */
76870 static struct kset *kset_create(const char *name,
76871 - struct kset_uevent_ops *uevent_ops,
76872 + const struct kset_uevent_ops *uevent_ops,
76873 struct kobject *parent_kobj)
76874 {
76875 struct kset *kset;
76876 @@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
76877 * If the kset was not able to be created, NULL will be returned.
76878 */
76879 struct kset *kset_create_and_add(const char *name,
76880 - struct kset_uevent_ops *uevent_ops,
76881 + const struct kset_uevent_ops *uevent_ops,
76882 struct kobject *parent_kobj)
76883 {
76884 struct kset *kset;
76885 diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
76886 index 507b821..0bf8ed0 100644
76887 --- a/lib/kobject_uevent.c
76888 +++ b/lib/kobject_uevent.c
76889 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
76890 const char *subsystem;
76891 struct kobject *top_kobj;
76892 struct kset *kset;
76893 - struct kset_uevent_ops *uevent_ops;
76894 + const struct kset_uevent_ops *uevent_ops;
76895 u64 seq;
76896 int i = 0;
76897 int retval = 0;
76898 diff --git a/lib/kref.c b/lib/kref.c
76899 index 9ecd6e8..12c94c1 100644
76900 --- a/lib/kref.c
76901 +++ b/lib/kref.c
76902 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
76903 */
76904 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
76905 {
76906 - WARN_ON(release == NULL);
76907 + BUG_ON(release == NULL);
76908 WARN_ON(release == (void (*)(struct kref *))kfree);
76909
76910 if (atomic_dec_and_test(&kref->refcount)) {
76911 diff --git a/lib/parser.c b/lib/parser.c
76912 index b00d020..1b34325 100644
76913 --- a/lib/parser.c
76914 +++ b/lib/parser.c
76915 @@ -126,7 +126,7 @@ static int match_number(substring_t *s, int *result, int base)
76916 char *buf;
76917 int ret;
76918
76919 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
76920 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
76921 if (!buf)
76922 return -ENOMEM;
76923 memcpy(buf, s->from, s->to - s->from);
76924 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
76925 index 92cdd99..a8149d7 100644
76926 --- a/lib/radix-tree.c
76927 +++ b/lib/radix-tree.c
76928 @@ -81,7 +81,7 @@ struct radix_tree_preload {
76929 int nr;
76930 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
76931 };
76932 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
76933 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
76934
76935 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
76936 {
76937 diff --git a/lib/random32.c b/lib/random32.c
76938 index 217d5c4..45aba8a 100644
76939 --- a/lib/random32.c
76940 +++ b/lib/random32.c
76941 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
76942 */
76943 static inline u32 __seed(u32 x, u32 m)
76944 {
76945 - return (x < m) ? x + m : x;
76946 + return (x <= m) ? x + m + 1 : x;
76947 }
76948
76949 /**
76950 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
76951 index 33bed5e..1477e46 100644
76952 --- a/lib/vsprintf.c
76953 +++ b/lib/vsprintf.c
76954 @@ -16,6 +16,9 @@
76955 * - scnprintf and vscnprintf
76956 */
76957
76958 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76959 +#define __INCLUDED_BY_HIDESYM 1
76960 +#endif
76961 #include <stdarg.h>
76962 #include <linux/module.h>
76963 #include <linux/types.h>
76964 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
76965 return buf;
76966 }
76967
76968 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
76969 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
76970 {
76971 int len, i;
76972
76973 if ((unsigned long)s < PAGE_SIZE)
76974 - s = "<NULL>";
76975 + s = "(null)";
76976
76977 len = strnlen(s, spec.precision);
76978
76979 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
76980 unsigned long value = (unsigned long) ptr;
76981 #ifdef CONFIG_KALLSYMS
76982 char sym[KSYM_SYMBOL_LEN];
76983 - if (ext != 'f' && ext != 's')
76984 + if (ext != 'f' && ext != 's' && ext != 'a')
76985 sprint_symbol(sym, value);
76986 else
76987 kallsyms_lookup(value, NULL, NULL, NULL, sym);
76988 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
76989 * - 'f' For simple symbolic function names without offset
76990 * - 'S' For symbolic direct pointers with offset
76991 * - 's' For symbolic direct pointers without offset
76992 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
76993 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
76994 * - 'R' For a struct resource pointer, it prints the range of
76995 * addresses (not the name nor the flags)
76996 * - 'M' For a 6-byte MAC address, it prints the address in the
76997 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
76998 struct printf_spec spec)
76999 {
77000 if (!ptr)
77001 - return string(buf, end, "(null)", spec);
77002 + return string(buf, end, "(nil)", spec);
77003
77004 switch (*fmt) {
77005 case 'F':
77006 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
77007 case 's':
77008 /* Fallthrough */
77009 case 'S':
77010 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77011 + break;
77012 +#else
77013 + return symbol_string(buf, end, ptr, spec, *fmt);
77014 +#endif
77015 + case 'a':
77016 + /* Fallthrough */
77017 + case 'A':
77018 return symbol_string(buf, end, ptr, spec, *fmt);
77019 case 'R':
77020 return resource_string(buf, end, ptr, spec);
77021 @@ -1445,7 +1458,7 @@ do { \
77022 size_t len;
77023 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
77024 || (unsigned long)save_str < PAGE_SIZE)
77025 - save_str = "<NULL>";
77026 + save_str = "(null)";
77027 len = strlen(save_str);
77028 if (str + len + 1 < end)
77029 memcpy(str, save_str, len + 1);
77030 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
77031 typeof(type) value; \
77032 if (sizeof(type) == 8) { \
77033 args = PTR_ALIGN(args, sizeof(u32)); \
77034 - *(u32 *)&value = *(u32 *)args; \
77035 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
77036 + *(u32 *)&value = *(const u32 *)args; \
77037 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
77038 } else { \
77039 args = PTR_ALIGN(args, sizeof(type)); \
77040 - value = *(typeof(type) *)args; \
77041 + value = *(const typeof(type) *)args; \
77042 } \
77043 args += sizeof(type); \
77044 value; \
77045 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
77046 const char *str_arg = args;
77047 size_t len = strlen(str_arg);
77048 args += len + 1;
77049 - str = string(str, end, (char *)str_arg, spec);
77050 + str = string(str, end, str_arg, spec);
77051 break;
77052 }
77053
77054 diff --git a/localversion-grsec b/localversion-grsec
77055 new file mode 100644
77056 index 0000000..7cd6065
77057 --- /dev/null
77058 +++ b/localversion-grsec
77059 @@ -0,0 +1 @@
77060 +-grsec
77061 diff --git a/mm/Kconfig b/mm/Kconfig
77062 index 2c19c0b..f3c3f83 100644
77063 --- a/mm/Kconfig
77064 +++ b/mm/Kconfig
77065 @@ -228,7 +228,7 @@ config KSM
77066 config DEFAULT_MMAP_MIN_ADDR
77067 int "Low address space to protect from user allocation"
77068 depends on MMU
77069 - default 4096
77070 + default 65536
77071 help
77072 This is the portion of low virtual memory which should be protected
77073 from userspace allocation. Keeping a user from writing to low pages
77074 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
77075 index 67a33a5..094dcf1 100644
77076 --- a/mm/backing-dev.c
77077 +++ b/mm/backing-dev.c
77078 @@ -272,7 +272,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
77079 list_add_tail_rcu(&wb->list, &bdi->wb_list);
77080 spin_unlock(&bdi->wb_lock);
77081
77082 - tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
77083 + tsk->flags |= PF_SWAPWRITE;
77084 set_freezable();
77085
77086 /*
77087 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
77088 * Add the default flusher task that gets created for any bdi
77089 * that has dirty data pending writeout
77090 */
77091 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
77092 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
77093 {
77094 if (!bdi_cap_writeback_dirty(bdi))
77095 return;
77096 diff --git a/mm/filemap.c b/mm/filemap.c
77097 index a1fe378..e26702f 100644
77098 --- a/mm/filemap.c
77099 +++ b/mm/filemap.c
77100 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
77101 struct address_space *mapping = file->f_mapping;
77102
77103 if (!mapping->a_ops->readpage)
77104 - return -ENOEXEC;
77105 + return -ENODEV;
77106 file_accessed(file);
77107 vma->vm_ops = &generic_file_vm_ops;
77108 vma->vm_flags |= VM_CAN_NONLINEAR;
77109 @@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
77110 *pos = i_size_read(inode);
77111
77112 if (limit != RLIM_INFINITY) {
77113 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
77114 if (*pos >= limit) {
77115 send_sig(SIGXFSZ, current, 0);
77116 return -EFBIG;
77117 diff --git a/mm/fremap.c b/mm/fremap.c
77118 index b6ec85a..a24ac22 100644
77119 --- a/mm/fremap.c
77120 +++ b/mm/fremap.c
77121 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
77122 retry:
77123 vma = find_vma(mm, start);
77124
77125 +#ifdef CONFIG_PAX_SEGMEXEC
77126 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
77127 + goto out;
77128 +#endif
77129 +
77130 /*
77131 * Make sure the vma is shared, that it supports prefaulting,
77132 * and that the remapped range is valid and fully within
77133 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
77134 /*
77135 * drop PG_Mlocked flag for over-mapped range
77136 */
77137 - unsigned int saved_flags = vma->vm_flags;
77138 + unsigned long saved_flags = vma->vm_flags;
77139 munlock_vma_pages_range(vma, start, start + size);
77140 vma->vm_flags = saved_flags;
77141 }
77142 diff --git a/mm/highmem.c b/mm/highmem.c
77143 index 9c1e627..5ca9447 100644
77144 --- a/mm/highmem.c
77145 +++ b/mm/highmem.c
77146 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
77147 * So no dangers, even with speculative execution.
77148 */
77149 page = pte_page(pkmap_page_table[i]);
77150 + pax_open_kernel();
77151 pte_clear(&init_mm, (unsigned long)page_address(page),
77152 &pkmap_page_table[i]);
77153 -
77154 + pax_close_kernel();
77155 set_page_address(page, NULL);
77156 need_flush = 1;
77157 }
77158 @@ -177,9 +178,11 @@ start:
77159 }
77160 }
77161 vaddr = PKMAP_ADDR(last_pkmap_nr);
77162 +
77163 + pax_open_kernel();
77164 set_pte_at(&init_mm, vaddr,
77165 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
77166 -
77167 + pax_close_kernel();
77168 pkmap_count[last_pkmap_nr] = 1;
77169 set_page_address(page, (void *)vaddr);
77170
77171 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
77172 index 5e1e508..ac70275 100644
77173 --- a/mm/hugetlb.c
77174 +++ b/mm/hugetlb.c
77175 @@ -869,6 +869,7 @@ free:
77176 list_del(&page->lru);
77177 enqueue_huge_page(h, page);
77178 }
77179 + spin_unlock(&hugetlb_lock);
77180
77181 /* Free unnecessary surplus pages to the buddy allocator */
77182 if (!list_empty(&surplus_list)) {
77183 @@ -1933,6 +1934,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
77184 return 1;
77185 }
77186
77187 +#ifdef CONFIG_PAX_SEGMEXEC
77188 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
77189 +{
77190 + struct mm_struct *mm = vma->vm_mm;
77191 + struct vm_area_struct *vma_m;
77192 + unsigned long address_m;
77193 + pte_t *ptep_m;
77194 +
77195 + vma_m = pax_find_mirror_vma(vma);
77196 + if (!vma_m)
77197 + return;
77198 +
77199 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77200 + address_m = address + SEGMEXEC_TASK_SIZE;
77201 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
77202 + get_page(page_m);
77203 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
77204 +}
77205 +#endif
77206 +
77207 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
77208 unsigned long address, pte_t *ptep, pte_t pte,
77209 struct page *pagecache_page)
77210 @@ -2004,6 +2025,11 @@ retry_avoidcopy:
77211 huge_ptep_clear_flush(vma, address, ptep);
77212 set_huge_pte_at(mm, address, ptep,
77213 make_huge_pte(vma, new_page, 1));
77214 +
77215 +#ifdef CONFIG_PAX_SEGMEXEC
77216 + pax_mirror_huge_pte(vma, address, new_page);
77217 +#endif
77218 +
77219 /* Make the old page be freed below */
77220 new_page = old_page;
77221 }
77222 @@ -2135,6 +2161,10 @@ retry:
77223 && (vma->vm_flags & VM_SHARED)));
77224 set_huge_pte_at(mm, address, ptep, new_pte);
77225
77226 +#ifdef CONFIG_PAX_SEGMEXEC
77227 + pax_mirror_huge_pte(vma, address, page);
77228 +#endif
77229 +
77230 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
77231 /* Optimization, do the COW without a second fault */
77232 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
77233 @@ -2163,6 +2193,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77234 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
77235 struct hstate *h = hstate_vma(vma);
77236
77237 +#ifdef CONFIG_PAX_SEGMEXEC
77238 + struct vm_area_struct *vma_m;
77239 +
77240 + vma_m = pax_find_mirror_vma(vma);
77241 + if (vma_m) {
77242 + unsigned long address_m;
77243 +
77244 + if (vma->vm_start > vma_m->vm_start) {
77245 + address_m = address;
77246 + address -= SEGMEXEC_TASK_SIZE;
77247 + vma = vma_m;
77248 + h = hstate_vma(vma);
77249 + } else
77250 + address_m = address + SEGMEXEC_TASK_SIZE;
77251 +
77252 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
77253 + return VM_FAULT_OOM;
77254 + address_m &= HPAGE_MASK;
77255 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
77256 + }
77257 +#endif
77258 +
77259 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
77260 if (!ptep)
77261 return VM_FAULT_OOM;
77262 diff --git a/mm/internal.h b/mm/internal.h
77263 index f03e8e2..7354343 100644
77264 --- a/mm/internal.h
77265 +++ b/mm/internal.h
77266 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
77267 * in mm/page_alloc.c
77268 */
77269 extern void __free_pages_bootmem(struct page *page, unsigned int order);
77270 +extern void free_compound_page(struct page *page);
77271 extern void prep_compound_page(struct page *page, unsigned long order);
77272
77273
77274 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
77275 index c346660..b47382f 100644
77276 --- a/mm/kmemleak.c
77277 +++ b/mm/kmemleak.c
77278 @@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
77279
77280 for (i = 0; i < object->trace_len; i++) {
77281 void *ptr = (void *)object->trace[i];
77282 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
77283 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
77284 }
77285 }
77286
77287 diff --git a/mm/maccess.c b/mm/maccess.c
77288 index 9073695..1127f348 100644
77289 --- a/mm/maccess.c
77290 +++ b/mm/maccess.c
77291 @@ -14,7 +14,7 @@
77292 * Safely read from address @src to the buffer at @dst. If a kernel fault
77293 * happens, handle that and return -EFAULT.
77294 */
77295 -long probe_kernel_read(void *dst, void *src, size_t size)
77296 +long probe_kernel_read(void *dst, const void *src, size_t size)
77297 {
77298 long ret;
77299 mm_segment_t old_fs = get_fs();
77300 @@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
77301 set_fs(KERNEL_DS);
77302 pagefault_disable();
77303 ret = __copy_from_user_inatomic(dst,
77304 - (__force const void __user *)src, size);
77305 + (const void __force_user *)src, size);
77306 pagefault_enable();
77307 set_fs(old_fs);
77308
77309 @@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
77310 * Safely write to address @dst from the buffer at @src. If a kernel fault
77311 * happens, handle that and return -EFAULT.
77312 */
77313 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
77314 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
77315 {
77316 long ret;
77317 mm_segment_t old_fs = get_fs();
77318
77319 set_fs(KERNEL_DS);
77320 pagefault_disable();
77321 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
77322 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
77323 pagefault_enable();
77324 set_fs(old_fs);
77325
77326 diff --git a/mm/madvise.c b/mm/madvise.c
77327 index 35b1479..499f7d4 100644
77328 --- a/mm/madvise.c
77329 +++ b/mm/madvise.c
77330 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
77331 pgoff_t pgoff;
77332 unsigned long new_flags = vma->vm_flags;
77333
77334 +#ifdef CONFIG_PAX_SEGMEXEC
77335 + struct vm_area_struct *vma_m;
77336 +#endif
77337 +
77338 switch (behavior) {
77339 case MADV_NORMAL:
77340 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
77341 @@ -103,6 +107,13 @@ success:
77342 /*
77343 * vm_flags is protected by the mmap_sem held in write mode.
77344 */
77345 +
77346 +#ifdef CONFIG_PAX_SEGMEXEC
77347 + vma_m = pax_find_mirror_vma(vma);
77348 + if (vma_m)
77349 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
77350 +#endif
77351 +
77352 vma->vm_flags = new_flags;
77353
77354 out:
77355 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
77356 struct vm_area_struct ** prev,
77357 unsigned long start, unsigned long end)
77358 {
77359 +
77360 +#ifdef CONFIG_PAX_SEGMEXEC
77361 + struct vm_area_struct *vma_m;
77362 +#endif
77363 +
77364 *prev = vma;
77365 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
77366 return -EINVAL;
77367 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
77368 zap_page_range(vma, start, end - start, &details);
77369 } else
77370 zap_page_range(vma, start, end - start, NULL);
77371 +
77372 +#ifdef CONFIG_PAX_SEGMEXEC
77373 + vma_m = pax_find_mirror_vma(vma);
77374 + if (vma_m) {
77375 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
77376 + struct zap_details details = {
77377 + .nonlinear_vma = vma_m,
77378 + .last_index = ULONG_MAX,
77379 + };
77380 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
77381 + } else
77382 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
77383 + }
77384 +#endif
77385 +
77386 return 0;
77387 }
77388
77389 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
77390 if (end < start)
77391 goto out;
77392
77393 +#ifdef CONFIG_PAX_SEGMEXEC
77394 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
77395 + if (end > SEGMEXEC_TASK_SIZE)
77396 + goto out;
77397 + } else
77398 +#endif
77399 +
77400 + if (end > TASK_SIZE)
77401 + goto out;
77402 +
77403 error = 0;
77404 if (end == start)
77405 goto out;
77406 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
77407 index 8aeba53..b4a4198 100644
77408 --- a/mm/memory-failure.c
77409 +++ b/mm/memory-failure.c
77410 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
77411
77412 int sysctl_memory_failure_recovery __read_mostly = 1;
77413
77414 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
77415 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
77416
77417 /*
77418 * Send all the processes who have the page mapped an ``action optional''
77419 @@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
77420 si.si_signo = SIGBUS;
77421 si.si_errno = 0;
77422 si.si_code = BUS_MCEERR_AO;
77423 - si.si_addr = (void *)addr;
77424 + si.si_addr = (void __user *)addr;
77425 #ifdef __ARCH_SI_TRAPNO
77426 si.si_trapno = trapno;
77427 #endif
77428 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
77429 return 0;
77430 }
77431
77432 - atomic_long_add(1, &mce_bad_pages);
77433 + atomic_long_add_unchecked(1, &mce_bad_pages);
77434
77435 /*
77436 * We need/can do nothing about count=0 pages.
77437 diff --git a/mm/memory.c b/mm/memory.c
77438 index 6c836d3..48f3264 100644
77439 --- a/mm/memory.c
77440 +++ b/mm/memory.c
77441 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
77442 return;
77443
77444 pmd = pmd_offset(pud, start);
77445 +
77446 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
77447 pud_clear(pud);
77448 pmd_free_tlb(tlb, pmd, start);
77449 +#endif
77450 +
77451 }
77452
77453 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
77454 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
77455 if (end - 1 > ceiling - 1)
77456 return;
77457
77458 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
77459 pud = pud_offset(pgd, start);
77460 pgd_clear(pgd);
77461 pud_free_tlb(tlb, pud, start);
77462 +#endif
77463 +
77464 }
77465
77466 /*
77467 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77468 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
77469 i = 0;
77470
77471 - do {
77472 + while (nr_pages) {
77473 struct vm_area_struct *vma;
77474
77475 - vma = find_extend_vma(mm, start);
77476 + vma = find_vma(mm, start);
77477 if (!vma && in_gate_area(tsk, start)) {
77478 unsigned long pg = start & PAGE_MASK;
77479 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
77480 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77481 continue;
77482 }
77483
77484 - if (!vma ||
77485 + if (!vma || start < vma->vm_start ||
77486 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
77487 !(vm_flags & vma->vm_flags))
77488 return i ? : -EFAULT;
77489 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77490 start += PAGE_SIZE;
77491 nr_pages--;
77492 } while (nr_pages && start < vma->vm_end);
77493 - } while (nr_pages);
77494 + }
77495 return i;
77496 }
77497
77498 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
77499 page_add_file_rmap(page);
77500 set_pte_at(mm, addr, pte, mk_pte(page, prot));
77501
77502 +#ifdef CONFIG_PAX_SEGMEXEC
77503 + pax_mirror_file_pte(vma, addr, page, ptl);
77504 +#endif
77505 +
77506 retval = 0;
77507 pte_unmap_unlock(pte, ptl);
77508 return retval;
77509 @@ -1560,10 +1571,22 @@ out:
77510 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
77511 struct page *page)
77512 {
77513 +
77514 +#ifdef CONFIG_PAX_SEGMEXEC
77515 + struct vm_area_struct *vma_m;
77516 +#endif
77517 +
77518 if (addr < vma->vm_start || addr >= vma->vm_end)
77519 return -EFAULT;
77520 if (!page_count(page))
77521 return -EINVAL;
77522 +
77523 +#ifdef CONFIG_PAX_SEGMEXEC
77524 + vma_m = pax_find_mirror_vma(vma);
77525 + if (vma_m)
77526 + vma_m->vm_flags |= VM_INSERTPAGE;
77527 +#endif
77528 +
77529 vma->vm_flags |= VM_INSERTPAGE;
77530 return insert_page(vma, addr, page, vma->vm_page_prot);
77531 }
77532 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
77533 unsigned long pfn)
77534 {
77535 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
77536 + BUG_ON(vma->vm_mirror);
77537
77538 if (addr < vma->vm_start || addr >= vma->vm_end)
77539 return -EFAULT;
77540 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
77541 copy_user_highpage(dst, src, va, vma);
77542 }
77543
77544 +#ifdef CONFIG_PAX_SEGMEXEC
77545 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
77546 +{
77547 + struct mm_struct *mm = vma->vm_mm;
77548 + spinlock_t *ptl;
77549 + pte_t *pte, entry;
77550 +
77551 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
77552 + entry = *pte;
77553 + if (!pte_present(entry)) {
77554 + if (!pte_none(entry)) {
77555 + BUG_ON(pte_file(entry));
77556 + free_swap_and_cache(pte_to_swp_entry(entry));
77557 + pte_clear_not_present_full(mm, address, pte, 0);
77558 + }
77559 + } else {
77560 + struct page *page;
77561 +
77562 + flush_cache_page(vma, address, pte_pfn(entry));
77563 + entry = ptep_clear_flush(vma, address, pte);
77564 + BUG_ON(pte_dirty(entry));
77565 + page = vm_normal_page(vma, address, entry);
77566 + if (page) {
77567 + update_hiwater_rss(mm);
77568 + if (PageAnon(page))
77569 + dec_mm_counter(mm, anon_rss);
77570 + else
77571 + dec_mm_counter(mm, file_rss);
77572 + page_remove_rmap(page);
77573 + page_cache_release(page);
77574 + }
77575 + }
77576 + pte_unmap_unlock(pte, ptl);
77577 +}
77578 +
77579 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
77580 + *
77581 + * the ptl of the lower mapped page is held on entry and is not released on exit
77582 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
77583 + */
77584 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
77585 +{
77586 + struct mm_struct *mm = vma->vm_mm;
77587 + unsigned long address_m;
77588 + spinlock_t *ptl_m;
77589 + struct vm_area_struct *vma_m;
77590 + pmd_t *pmd_m;
77591 + pte_t *pte_m, entry_m;
77592 +
77593 + BUG_ON(!page_m || !PageAnon(page_m));
77594 +
77595 + vma_m = pax_find_mirror_vma(vma);
77596 + if (!vma_m)
77597 + return;
77598 +
77599 + BUG_ON(!PageLocked(page_m));
77600 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77601 + address_m = address + SEGMEXEC_TASK_SIZE;
77602 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77603 + pte_m = pte_offset_map_nested(pmd_m, address_m);
77604 + ptl_m = pte_lockptr(mm, pmd_m);
77605 + if (ptl != ptl_m) {
77606 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77607 + if (!pte_none(*pte_m))
77608 + goto out;
77609 + }
77610 +
77611 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
77612 + page_cache_get(page_m);
77613 + page_add_anon_rmap(page_m, vma_m, address_m);
77614 + inc_mm_counter(mm, anon_rss);
77615 + set_pte_at(mm, address_m, pte_m, entry_m);
77616 + update_mmu_cache(vma_m, address_m, entry_m);
77617 +out:
77618 + if (ptl != ptl_m)
77619 + spin_unlock(ptl_m);
77620 + pte_unmap_nested(pte_m);
77621 + unlock_page(page_m);
77622 +}
77623 +
77624 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
77625 +{
77626 + struct mm_struct *mm = vma->vm_mm;
77627 + unsigned long address_m;
77628 + spinlock_t *ptl_m;
77629 + struct vm_area_struct *vma_m;
77630 + pmd_t *pmd_m;
77631 + pte_t *pte_m, entry_m;
77632 +
77633 + BUG_ON(!page_m || PageAnon(page_m));
77634 +
77635 + vma_m = pax_find_mirror_vma(vma);
77636 + if (!vma_m)
77637 + return;
77638 +
77639 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77640 + address_m = address + SEGMEXEC_TASK_SIZE;
77641 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77642 + pte_m = pte_offset_map_nested(pmd_m, address_m);
77643 + ptl_m = pte_lockptr(mm, pmd_m);
77644 + if (ptl != ptl_m) {
77645 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77646 + if (!pte_none(*pte_m))
77647 + goto out;
77648 + }
77649 +
77650 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
77651 + page_cache_get(page_m);
77652 + page_add_file_rmap(page_m);
77653 + inc_mm_counter(mm, file_rss);
77654 + set_pte_at(mm, address_m, pte_m, entry_m);
77655 + update_mmu_cache(vma_m, address_m, entry_m);
77656 +out:
77657 + if (ptl != ptl_m)
77658 + spin_unlock(ptl_m);
77659 + pte_unmap_nested(pte_m);
77660 +}
77661 +
77662 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
77663 +{
77664 + struct mm_struct *mm = vma->vm_mm;
77665 + unsigned long address_m;
77666 + spinlock_t *ptl_m;
77667 + struct vm_area_struct *vma_m;
77668 + pmd_t *pmd_m;
77669 + pte_t *pte_m, entry_m;
77670 +
77671 + vma_m = pax_find_mirror_vma(vma);
77672 + if (!vma_m)
77673 + return;
77674 +
77675 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77676 + address_m = address + SEGMEXEC_TASK_SIZE;
77677 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77678 + pte_m = pte_offset_map_nested(pmd_m, address_m);
77679 + ptl_m = pte_lockptr(mm, pmd_m);
77680 + if (ptl != ptl_m) {
77681 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77682 + if (!pte_none(*pte_m))
77683 + goto out;
77684 + }
77685 +
77686 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
77687 + set_pte_at(mm, address_m, pte_m, entry_m);
77688 +out:
77689 + if (ptl != ptl_m)
77690 + spin_unlock(ptl_m);
77691 + pte_unmap_nested(pte_m);
77692 +}
77693 +
77694 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
77695 +{
77696 + struct page *page_m;
77697 + pte_t entry;
77698 +
77699 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
77700 + goto out;
77701 +
77702 + entry = *pte;
77703 + page_m = vm_normal_page(vma, address, entry);
77704 + if (!page_m)
77705 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
77706 + else if (PageAnon(page_m)) {
77707 + if (pax_find_mirror_vma(vma)) {
77708 + pte_unmap_unlock(pte, ptl);
77709 + lock_page(page_m);
77710 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
77711 + if (pte_same(entry, *pte))
77712 + pax_mirror_anon_pte(vma, address, page_m, ptl);
77713 + else
77714 + unlock_page(page_m);
77715 + }
77716 + } else
77717 + pax_mirror_file_pte(vma, address, page_m, ptl);
77718 +
77719 +out:
77720 + pte_unmap_unlock(pte, ptl);
77721 +}
77722 +#endif
77723 +
77724 /*
77725 * This routine handles present pages, when users try to write
77726 * to a shared page. It is done by copying the page to a new address
77727 @@ -2156,6 +2360,12 @@ gotten:
77728 */
77729 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77730 if (likely(pte_same(*page_table, orig_pte))) {
77731 +
77732 +#ifdef CONFIG_PAX_SEGMEXEC
77733 + if (pax_find_mirror_vma(vma))
77734 + BUG_ON(!trylock_page(new_page));
77735 +#endif
77736 +
77737 if (old_page) {
77738 if (!PageAnon(old_page)) {
77739 dec_mm_counter(mm, file_rss);
77740 @@ -2207,6 +2417,10 @@ gotten:
77741 page_remove_rmap(old_page);
77742 }
77743
77744 +#ifdef CONFIG_PAX_SEGMEXEC
77745 + pax_mirror_anon_pte(vma, address, new_page, ptl);
77746 +#endif
77747 +
77748 /* Free the old page.. */
77749 new_page = old_page;
77750 ret |= VM_FAULT_WRITE;
77751 @@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77752 swap_free(entry);
77753 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
77754 try_to_free_swap(page);
77755 +
77756 +#ifdef CONFIG_PAX_SEGMEXEC
77757 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
77758 +#endif
77759 +
77760 unlock_page(page);
77761
77762 if (flags & FAULT_FLAG_WRITE) {
77763 @@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77764
77765 /* No need to invalidate - it was non-present before */
77766 update_mmu_cache(vma, address, pte);
77767 +
77768 +#ifdef CONFIG_PAX_SEGMEXEC
77769 + pax_mirror_anon_pte(vma, address, page, ptl);
77770 +#endif
77771 +
77772 unlock:
77773 pte_unmap_unlock(page_table, ptl);
77774 out:
77775 @@ -2632,40 +2856,6 @@ out_release:
77776 }
77777
77778 /*
77779 - * This is like a special single-page "expand_{down|up}wards()",
77780 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
77781 - * doesn't hit another vma.
77782 - */
77783 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
77784 -{
77785 - address &= PAGE_MASK;
77786 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
77787 - struct vm_area_struct *prev = vma->vm_prev;
77788 -
77789 - /*
77790 - * Is there a mapping abutting this one below?
77791 - *
77792 - * That's only ok if it's the same stack mapping
77793 - * that has gotten split..
77794 - */
77795 - if (prev && prev->vm_end == address)
77796 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
77797 -
77798 - expand_stack(vma, address - PAGE_SIZE);
77799 - }
77800 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
77801 - struct vm_area_struct *next = vma->vm_next;
77802 -
77803 - /* As VM_GROWSDOWN but s/below/above/ */
77804 - if (next && next->vm_start == address + PAGE_SIZE)
77805 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
77806 -
77807 - expand_upwards(vma, address + PAGE_SIZE);
77808 - }
77809 - return 0;
77810 -}
77811 -
77812 -/*
77813 * We enter with non-exclusive mmap_sem (to exclude vma changes,
77814 * but allow concurrent faults), and pte mapped but not yet locked.
77815 * We return with mmap_sem still held, but pte unmapped and unlocked.
77816 @@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77817 unsigned long address, pte_t *page_table, pmd_t *pmd,
77818 unsigned int flags)
77819 {
77820 - struct page *page;
77821 + struct page *page = NULL;
77822 spinlock_t *ptl;
77823 pte_t entry;
77824
77825 - pte_unmap(page_table);
77826 -
77827 - /* Check if we need to add a guard page to the stack */
77828 - if (check_stack_guard_page(vma, address) < 0)
77829 - return VM_FAULT_SIGBUS;
77830 -
77831 - /* Use the zero-page for reads */
77832 if (!(flags & FAULT_FLAG_WRITE)) {
77833 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
77834 vma->vm_page_prot));
77835 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77836 + ptl = pte_lockptr(mm, pmd);
77837 + spin_lock(ptl);
77838 if (!pte_none(*page_table))
77839 goto unlock;
77840 goto setpte;
77841 }
77842
77843 /* Allocate our own private page. */
77844 + pte_unmap(page_table);
77845 +
77846 if (unlikely(anon_vma_prepare(vma)))
77847 goto oom;
77848 page = alloc_zeroed_user_highpage_movable(vma, address);
77849 @@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77850 if (!pte_none(*page_table))
77851 goto release;
77852
77853 +#ifdef CONFIG_PAX_SEGMEXEC
77854 + if (pax_find_mirror_vma(vma))
77855 + BUG_ON(!trylock_page(page));
77856 +#endif
77857 +
77858 inc_mm_counter(mm, anon_rss);
77859 page_add_new_anon_rmap(page, vma, address);
77860 setpte:
77861 @@ -2720,6 +2911,12 @@ setpte:
77862
77863 /* No need to invalidate - it was non-present before */
77864 update_mmu_cache(vma, address, entry);
77865 +
77866 +#ifdef CONFIG_PAX_SEGMEXEC
77867 + if (page)
77868 + pax_mirror_anon_pte(vma, address, page, ptl);
77869 +#endif
77870 +
77871 unlock:
77872 pte_unmap_unlock(page_table, ptl);
77873 return 0;
77874 @@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77875 */
77876 /* Only go through if we didn't race with anybody else... */
77877 if (likely(pte_same(*page_table, orig_pte))) {
77878 +
77879 +#ifdef CONFIG_PAX_SEGMEXEC
77880 + if (anon && pax_find_mirror_vma(vma))
77881 + BUG_ON(!trylock_page(page));
77882 +#endif
77883 +
77884 flush_icache_page(vma, page);
77885 entry = mk_pte(page, vma->vm_page_prot);
77886 if (flags & FAULT_FLAG_WRITE)
77887 @@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77888
77889 /* no need to invalidate: a not-present page won't be cached */
77890 update_mmu_cache(vma, address, entry);
77891 +
77892 +#ifdef CONFIG_PAX_SEGMEXEC
77893 + if (anon)
77894 + pax_mirror_anon_pte(vma, address, page, ptl);
77895 + else
77896 + pax_mirror_file_pte(vma, address, page, ptl);
77897 +#endif
77898 +
77899 } else {
77900 if (charged)
77901 mem_cgroup_uncharge_page(page);
77902 @@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
77903 if (flags & FAULT_FLAG_WRITE)
77904 flush_tlb_page(vma, address);
77905 }
77906 +
77907 +#ifdef CONFIG_PAX_SEGMEXEC
77908 + pax_mirror_pte(vma, address, pte, pmd, ptl);
77909 + return 0;
77910 +#endif
77911 +
77912 unlock:
77913 pte_unmap_unlock(pte, ptl);
77914 return 0;
77915 @@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77916 pmd_t *pmd;
77917 pte_t *pte;
77918
77919 +#ifdef CONFIG_PAX_SEGMEXEC
77920 + struct vm_area_struct *vma_m;
77921 +#endif
77922 +
77923 __set_current_state(TASK_RUNNING);
77924
77925 count_vm_event(PGFAULT);
77926 @@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77927 if (unlikely(is_vm_hugetlb_page(vma)))
77928 return hugetlb_fault(mm, vma, address, flags);
77929
77930 +#ifdef CONFIG_PAX_SEGMEXEC
77931 + vma_m = pax_find_mirror_vma(vma);
77932 + if (vma_m) {
77933 + unsigned long address_m;
77934 + pgd_t *pgd_m;
77935 + pud_t *pud_m;
77936 + pmd_t *pmd_m;
77937 +
77938 + if (vma->vm_start > vma_m->vm_start) {
77939 + address_m = address;
77940 + address -= SEGMEXEC_TASK_SIZE;
77941 + vma = vma_m;
77942 + } else
77943 + address_m = address + SEGMEXEC_TASK_SIZE;
77944 +
77945 + pgd_m = pgd_offset(mm, address_m);
77946 + pud_m = pud_alloc(mm, pgd_m, address_m);
77947 + if (!pud_m)
77948 + return VM_FAULT_OOM;
77949 + pmd_m = pmd_alloc(mm, pud_m, address_m);
77950 + if (!pmd_m)
77951 + return VM_FAULT_OOM;
77952 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
77953 + return VM_FAULT_OOM;
77954 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
77955 + }
77956 +#endif
77957 +
77958 pgd = pgd_offset(mm, address);
77959 pud = pud_alloc(mm, pgd, address);
77960 if (!pud)
77961 @@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
77962 gate_vma.vm_start = FIXADDR_USER_START;
77963 gate_vma.vm_end = FIXADDR_USER_END;
77964 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
77965 - gate_vma.vm_page_prot = __P101;
77966 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
77967 /*
77968 * Make sure the vDSO gets into every core dump.
77969 * Dumping its contents makes post-mortem fully interpretable later
77970 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
77971 index 3c6e3e2..b1ddbb8 100644
77972 --- a/mm/mempolicy.c
77973 +++ b/mm/mempolicy.c
77974 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
77975 struct vm_area_struct *next;
77976 int err;
77977
77978 +#ifdef CONFIG_PAX_SEGMEXEC
77979 + struct vm_area_struct *vma_m;
77980 +#endif
77981 +
77982 err = 0;
77983 for (; vma && vma->vm_start < end; vma = next) {
77984 next = vma->vm_next;
77985 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
77986 err = policy_vma(vma, new);
77987 if (err)
77988 break;
77989 +
77990 +#ifdef CONFIG_PAX_SEGMEXEC
77991 + vma_m = pax_find_mirror_vma(vma);
77992 + if (vma_m) {
77993 + err = policy_vma(vma_m, new);
77994 + if (err)
77995 + break;
77996 + }
77997 +#endif
77998 +
77999 }
78000 return err;
78001 }
78002 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
78003
78004 if (end < start)
78005 return -EINVAL;
78006 +
78007 +#ifdef CONFIG_PAX_SEGMEXEC
78008 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
78009 + if (end > SEGMEXEC_TASK_SIZE)
78010 + return -EINVAL;
78011 + } else
78012 +#endif
78013 +
78014 + if (end > TASK_SIZE)
78015 + return -EINVAL;
78016 +
78017 if (end == start)
78018 return 0;
78019
78020 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
78021 if (!mm)
78022 return -EINVAL;
78023
78024 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78025 + if (mm != current->mm &&
78026 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
78027 + err = -EPERM;
78028 + goto out;
78029 + }
78030 +#endif
78031 +
78032 /*
78033 * Check if this process has the right to modify the specified
78034 * process. The right exists if the process has administrative
78035 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
78036 rcu_read_lock();
78037 tcred = __task_cred(task);
78038 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
78039 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
78040 - !capable(CAP_SYS_NICE)) {
78041 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
78042 rcu_read_unlock();
78043 err = -EPERM;
78044 goto out;
78045 @@ -2367,6 +2399,12 @@ static inline void check_huge_range(struct vm_area_struct *vma,
78046 }
78047 #endif
78048
78049 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78050 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
78051 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
78052 + _mm->pax_flags & MF_PAX_SEGMEXEC))
78053 +#endif
78054 +
78055 /*
78056 * Display pages allocated per node and memory policy via /proc.
78057 */
78058 @@ -2381,6 +2419,13 @@ int show_numa_map(struct seq_file *m, void *v)
78059 int n;
78060 char buffer[50];
78061
78062 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78063 + if (current->exec_id != m->exec_id) {
78064 + gr_log_badprocpid("numa_maps");
78065 + return 0;
78066 + }
78067 +#endif
78068 +
78069 if (!mm)
78070 return 0;
78071
78072 @@ -2392,11 +2437,15 @@ int show_numa_map(struct seq_file *m, void *v)
78073 mpol_to_str(buffer, sizeof(buffer), pol, 0);
78074 mpol_cond_put(pol);
78075
78076 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78077 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
78078 +#else
78079 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
78080 +#endif
78081
78082 if (file) {
78083 seq_printf(m, " file=");
78084 - seq_path(m, &file->f_path, "\n\t= ");
78085 + seq_path(m, &file->f_path, "\n\t\\= ");
78086 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
78087 seq_printf(m, " heap");
78088 } else if (vma->vm_start <= mm->start_stack &&
78089 diff --git a/mm/migrate.c b/mm/migrate.c
78090 index aaca868..2ebecdc 100644
78091 --- a/mm/migrate.c
78092 +++ b/mm/migrate.c
78093 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
78094 unsigned long chunk_start;
78095 int err;
78096
78097 + pax_track_stack();
78098 +
78099 task_nodes = cpuset_mems_allowed(task);
78100
78101 err = -ENOMEM;
78102 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
78103 if (!mm)
78104 return -EINVAL;
78105
78106 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78107 + if (mm != current->mm &&
78108 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
78109 + err = -EPERM;
78110 + goto out;
78111 + }
78112 +#endif
78113 +
78114 /*
78115 * Check if this process has the right to modify the specified
78116 * process. The right exists if the process has administrative
78117 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
78118 rcu_read_lock();
78119 tcred = __task_cred(task);
78120 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
78121 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
78122 - !capable(CAP_SYS_NICE)) {
78123 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
78124 rcu_read_unlock();
78125 err = -EPERM;
78126 goto out;
78127 diff --git a/mm/mlock.c b/mm/mlock.c
78128 index 2d846cf..98134d2 100644
78129 --- a/mm/mlock.c
78130 +++ b/mm/mlock.c
78131 @@ -13,6 +13,7 @@
78132 #include <linux/pagemap.h>
78133 #include <linux/mempolicy.h>
78134 #include <linux/syscalls.h>
78135 +#include <linux/security.h>
78136 #include <linux/sched.h>
78137 #include <linux/module.h>
78138 #include <linux/rmap.h>
78139 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
78140 }
78141 }
78142
78143 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
78144 -{
78145 - return (vma->vm_flags & VM_GROWSDOWN) &&
78146 - (vma->vm_start == addr) &&
78147 - !vma_stack_continue(vma->vm_prev, addr);
78148 -}
78149 -
78150 /**
78151 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
78152 * @vma: target vma
78153 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
78154 if (vma->vm_flags & VM_WRITE)
78155 gup_flags |= FOLL_WRITE;
78156
78157 - /* We don't try to access the guard page of a stack vma */
78158 - if (stack_guard_page(vma, start)) {
78159 - addr += PAGE_SIZE;
78160 - nr_pages--;
78161 - }
78162 -
78163 while (nr_pages > 0) {
78164 int i;
78165
78166 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
78167 {
78168 unsigned long nstart, end, tmp;
78169 struct vm_area_struct * vma, * prev;
78170 - int error;
78171 + int error = -EINVAL;
78172
78173 len = PAGE_ALIGN(len);
78174 end = start + len;
78175 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
78176 return -EINVAL;
78177 if (end == start)
78178 return 0;
78179 + if (end > TASK_SIZE)
78180 + return -EINVAL;
78181 +
78182 vma = find_vma_prev(current->mm, start, &prev);
78183 if (!vma || vma->vm_start > start)
78184 return -ENOMEM;
78185 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
78186 for (nstart = start ; ; ) {
78187 unsigned int newflags;
78188
78189 +#ifdef CONFIG_PAX_SEGMEXEC
78190 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
78191 + break;
78192 +#endif
78193 +
78194 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
78195
78196 newflags = vma->vm_flags | VM_LOCKED;
78197 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
78198 lock_limit >>= PAGE_SHIFT;
78199
78200 /* check against resource limits */
78201 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
78202 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
78203 error = do_mlock(start, len, 1);
78204 up_write(&current->mm->mmap_sem);
78205 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
78206 static int do_mlockall(int flags)
78207 {
78208 struct vm_area_struct * vma, * prev = NULL;
78209 - unsigned int def_flags = 0;
78210
78211 if (flags & MCL_FUTURE)
78212 - def_flags = VM_LOCKED;
78213 - current->mm->def_flags = def_flags;
78214 + current->mm->def_flags |= VM_LOCKED;
78215 + else
78216 + current->mm->def_flags &= ~VM_LOCKED;
78217 if (flags == MCL_FUTURE)
78218 goto out;
78219
78220 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
78221 - unsigned int newflags;
78222 + unsigned long newflags;
78223
78224 +#ifdef CONFIG_PAX_SEGMEXEC
78225 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
78226 + break;
78227 +#endif
78228 +
78229 + BUG_ON(vma->vm_end > TASK_SIZE);
78230 newflags = vma->vm_flags | VM_LOCKED;
78231 if (!(flags & MCL_CURRENT))
78232 newflags &= ~VM_LOCKED;
78233 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
78234 lock_limit >>= PAGE_SHIFT;
78235
78236 ret = -ENOMEM;
78237 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
78238 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
78239 capable(CAP_IPC_LOCK))
78240 ret = do_mlockall(flags);
78241 diff --git a/mm/mmap.c b/mm/mmap.c
78242 index 4b80cbf..cd3731c 100644
78243 --- a/mm/mmap.c
78244 +++ b/mm/mmap.c
78245 @@ -45,6 +45,16 @@
78246 #define arch_rebalance_pgtables(addr, len) (addr)
78247 #endif
78248
78249 +static inline void verify_mm_writelocked(struct mm_struct *mm)
78250 +{
78251 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
78252 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
78253 + up_read(&mm->mmap_sem);
78254 + BUG();
78255 + }
78256 +#endif
78257 +}
78258 +
78259 static void unmap_region(struct mm_struct *mm,
78260 struct vm_area_struct *vma, struct vm_area_struct *prev,
78261 unsigned long start, unsigned long end);
78262 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
78263 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
78264 *
78265 */
78266 -pgprot_t protection_map[16] = {
78267 +pgprot_t protection_map[16] __read_only = {
78268 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
78269 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
78270 };
78271
78272 pgprot_t vm_get_page_prot(unsigned long vm_flags)
78273 {
78274 - return __pgprot(pgprot_val(protection_map[vm_flags &
78275 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
78276 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
78277 pgprot_val(arch_vm_get_page_prot(vm_flags)));
78278 +
78279 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78280 + if (!nx_enabled &&
78281 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
78282 + (vm_flags & (VM_READ | VM_WRITE)))
78283 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
78284 +#endif
78285 +
78286 + return prot;
78287 }
78288 EXPORT_SYMBOL(vm_get_page_prot);
78289
78290 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
78291 int sysctl_overcommit_ratio = 50; /* default is 50% */
78292 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
78293 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
78294 struct percpu_counter vm_committed_as;
78295
78296 /*
78297 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
78298 struct vm_area_struct *next = vma->vm_next;
78299
78300 might_sleep();
78301 + BUG_ON(vma->vm_mirror);
78302 if (vma->vm_ops && vma->vm_ops->close)
78303 vma->vm_ops->close(vma);
78304 if (vma->vm_file) {
78305 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
78306 * not page aligned -Ram Gupta
78307 */
78308 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
78309 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
78310 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
78311 (mm->end_data - mm->start_data) > rlim)
78312 goto out;
78313 @@ -704,6 +726,12 @@ static int
78314 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
78315 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
78316 {
78317 +
78318 +#ifdef CONFIG_PAX_SEGMEXEC
78319 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
78320 + return 0;
78321 +#endif
78322 +
78323 if (is_mergeable_vma(vma, file, vm_flags) &&
78324 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
78325 if (vma->vm_pgoff == vm_pgoff)
78326 @@ -723,6 +751,12 @@ static int
78327 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
78328 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
78329 {
78330 +
78331 +#ifdef CONFIG_PAX_SEGMEXEC
78332 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
78333 + return 0;
78334 +#endif
78335 +
78336 if (is_mergeable_vma(vma, file, vm_flags) &&
78337 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
78338 pgoff_t vm_pglen;
78339 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
78340 struct vm_area_struct *vma_merge(struct mm_struct *mm,
78341 struct vm_area_struct *prev, unsigned long addr,
78342 unsigned long end, unsigned long vm_flags,
78343 - struct anon_vma *anon_vma, struct file *file,
78344 + struct anon_vma *anon_vma, struct file *file,
78345 pgoff_t pgoff, struct mempolicy *policy)
78346 {
78347 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
78348 struct vm_area_struct *area, *next;
78349
78350 +#ifdef CONFIG_PAX_SEGMEXEC
78351 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
78352 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
78353 +
78354 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
78355 +#endif
78356 +
78357 /*
78358 * We later require that vma->vm_flags == vm_flags,
78359 * so this tests vma->vm_flags & VM_SPECIAL, too.
78360 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
78361 if (next && next->vm_end == end) /* cases 6, 7, 8 */
78362 next = next->vm_next;
78363
78364 +#ifdef CONFIG_PAX_SEGMEXEC
78365 + if (prev)
78366 + prev_m = pax_find_mirror_vma(prev);
78367 + if (area)
78368 + area_m = pax_find_mirror_vma(area);
78369 + if (next)
78370 + next_m = pax_find_mirror_vma(next);
78371 +#endif
78372 +
78373 /*
78374 * Can it merge with the predecessor?
78375 */
78376 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
78377 /* cases 1, 6 */
78378 vma_adjust(prev, prev->vm_start,
78379 next->vm_end, prev->vm_pgoff, NULL);
78380 - } else /* cases 2, 5, 7 */
78381 +
78382 +#ifdef CONFIG_PAX_SEGMEXEC
78383 + if (prev_m)
78384 + vma_adjust(prev_m, prev_m->vm_start,
78385 + next_m->vm_end, prev_m->vm_pgoff, NULL);
78386 +#endif
78387 +
78388 + } else { /* cases 2, 5, 7 */
78389 vma_adjust(prev, prev->vm_start,
78390 end, prev->vm_pgoff, NULL);
78391 +
78392 +#ifdef CONFIG_PAX_SEGMEXEC
78393 + if (prev_m)
78394 + vma_adjust(prev_m, prev_m->vm_start,
78395 + end_m, prev_m->vm_pgoff, NULL);
78396 +#endif
78397 +
78398 + }
78399 return prev;
78400 }
78401
78402 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
78403 mpol_equal(policy, vma_policy(next)) &&
78404 can_vma_merge_before(next, vm_flags,
78405 anon_vma, file, pgoff+pglen)) {
78406 - if (prev && addr < prev->vm_end) /* case 4 */
78407 + if (prev && addr < prev->vm_end) { /* case 4 */
78408 vma_adjust(prev, prev->vm_start,
78409 addr, prev->vm_pgoff, NULL);
78410 - else /* cases 3, 8 */
78411 +
78412 +#ifdef CONFIG_PAX_SEGMEXEC
78413 + if (prev_m)
78414 + vma_adjust(prev_m, prev_m->vm_start,
78415 + addr_m, prev_m->vm_pgoff, NULL);
78416 +#endif
78417 +
78418 + } else { /* cases 3, 8 */
78419 vma_adjust(area, addr, next->vm_end,
78420 next->vm_pgoff - pglen, NULL);
78421 +
78422 +#ifdef CONFIG_PAX_SEGMEXEC
78423 + if (area_m)
78424 + vma_adjust(area_m, addr_m, next_m->vm_end,
78425 + next_m->vm_pgoff - pglen, NULL);
78426 +#endif
78427 +
78428 + }
78429 return area;
78430 }
78431
78432 @@ -898,14 +978,11 @@ none:
78433 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
78434 struct file *file, long pages)
78435 {
78436 - const unsigned long stack_flags
78437 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
78438 -
78439 if (file) {
78440 mm->shared_vm += pages;
78441 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
78442 mm->exec_vm += pages;
78443 - } else if (flags & stack_flags)
78444 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
78445 mm->stack_vm += pages;
78446 if (flags & (VM_RESERVED|VM_IO))
78447 mm->reserved_vm += pages;
78448 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78449 * (the exception is when the underlying filesystem is noexec
78450 * mounted, in which case we dont add PROT_EXEC.)
78451 */
78452 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
78453 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
78454 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
78455 prot |= PROT_EXEC;
78456
78457 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78458 /* Obtain the address to map to. we verify (or select) it and ensure
78459 * that it represents a valid section of the address space.
78460 */
78461 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
78462 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
78463 if (addr & ~PAGE_MASK)
78464 return addr;
78465
78466 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78467 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
78468 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
78469
78470 +#ifdef CONFIG_PAX_MPROTECT
78471 + if (mm->pax_flags & MF_PAX_MPROTECT) {
78472 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
78473 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
78474 + gr_log_rwxmmap(file);
78475 +
78476 +#ifdef CONFIG_PAX_EMUPLT
78477 + vm_flags &= ~VM_EXEC;
78478 +#else
78479 + return -EPERM;
78480 +#endif
78481 +
78482 + }
78483 +
78484 + if (!(vm_flags & VM_EXEC))
78485 + vm_flags &= ~VM_MAYEXEC;
78486 +#else
78487 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
78488 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
78489 +#endif
78490 + else
78491 + vm_flags &= ~VM_MAYWRITE;
78492 + }
78493 +#endif
78494 +
78495 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78496 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
78497 + vm_flags &= ~VM_PAGEEXEC;
78498 +#endif
78499 +
78500 if (flags & MAP_LOCKED)
78501 if (!can_do_mlock())
78502 return -EPERM;
78503 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78504 locked += mm->locked_vm;
78505 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
78506 lock_limit >>= PAGE_SHIFT;
78507 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
78508 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
78509 return -EAGAIN;
78510 }
78511 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78512 if (error)
78513 return error;
78514
78515 + if (!gr_acl_handle_mmap(file, prot))
78516 + return -EACCES;
78517 +
78518 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
78519 }
78520 EXPORT_SYMBOL(do_mmap_pgoff);
78521 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
78522 */
78523 int vma_wants_writenotify(struct vm_area_struct *vma)
78524 {
78525 - unsigned int vm_flags = vma->vm_flags;
78526 + unsigned long vm_flags = vma->vm_flags;
78527
78528 /* If it was private or non-writable, the write bit is already clear */
78529 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
78530 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
78531 return 0;
78532
78533 /* The backer wishes to know when pages are first written to? */
78534 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
78535 unsigned long charged = 0;
78536 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
78537
78538 +#ifdef CONFIG_PAX_SEGMEXEC
78539 + struct vm_area_struct *vma_m = NULL;
78540 +#endif
78541 +
78542 + /*
78543 + * mm->mmap_sem is required to protect against another thread
78544 + * changing the mappings in case we sleep.
78545 + */
78546 + verify_mm_writelocked(mm);
78547 +
78548 /* Clear old maps */
78549 error = -ENOMEM;
78550 -munmap_back:
78551 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78552 if (vma && vma->vm_start < addr + len) {
78553 if (do_munmap(mm, addr, len))
78554 return -ENOMEM;
78555 - goto munmap_back;
78556 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78557 + BUG_ON(vma && vma->vm_start < addr + len);
78558 }
78559
78560 /* Check against address space limit. */
78561 @@ -1173,6 +1294,16 @@ munmap_back:
78562 goto unacct_error;
78563 }
78564
78565 +#ifdef CONFIG_PAX_SEGMEXEC
78566 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
78567 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78568 + if (!vma_m) {
78569 + error = -ENOMEM;
78570 + goto free_vma;
78571 + }
78572 + }
78573 +#endif
78574 +
78575 vma->vm_mm = mm;
78576 vma->vm_start = addr;
78577 vma->vm_end = addr + len;
78578 @@ -1195,6 +1326,19 @@ munmap_back:
78579 error = file->f_op->mmap(file, vma);
78580 if (error)
78581 goto unmap_and_free_vma;
78582 +
78583 +#ifdef CONFIG_PAX_SEGMEXEC
78584 + if (vma_m && (vm_flags & VM_EXECUTABLE))
78585 + added_exe_file_vma(mm);
78586 +#endif
78587 +
78588 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78589 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
78590 + vma->vm_flags |= VM_PAGEEXEC;
78591 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
78592 + }
78593 +#endif
78594 +
78595 if (vm_flags & VM_EXECUTABLE)
78596 added_exe_file_vma(mm);
78597
78598 @@ -1218,6 +1362,11 @@ munmap_back:
78599 vma_link(mm, vma, prev, rb_link, rb_parent);
78600 file = vma->vm_file;
78601
78602 +#ifdef CONFIG_PAX_SEGMEXEC
78603 + if (vma_m)
78604 + pax_mirror_vma(vma_m, vma);
78605 +#endif
78606 +
78607 /* Once vma denies write, undo our temporary denial count */
78608 if (correct_wcount)
78609 atomic_inc(&inode->i_writecount);
78610 @@ -1226,6 +1375,7 @@ out:
78611
78612 mm->total_vm += len >> PAGE_SHIFT;
78613 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
78614 + track_exec_limit(mm, addr, addr + len, vm_flags);
78615 if (vm_flags & VM_LOCKED) {
78616 /*
78617 * makes pages present; downgrades, drops, reacquires mmap_sem
78618 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
78619 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
78620 charged = 0;
78621 free_vma:
78622 +
78623 +#ifdef CONFIG_PAX_SEGMEXEC
78624 + if (vma_m)
78625 + kmem_cache_free(vm_area_cachep, vma_m);
78626 +#endif
78627 +
78628 kmem_cache_free(vm_area_cachep, vma);
78629 unacct_error:
78630 if (charged)
78631 @@ -1255,6 +1411,44 @@ unacct_error:
78632 return error;
78633 }
78634
78635 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
78636 +{
78637 + if (!vma) {
78638 +#ifdef CONFIG_STACK_GROWSUP
78639 + if (addr > sysctl_heap_stack_gap)
78640 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
78641 + else
78642 + vma = find_vma(current->mm, 0);
78643 + if (vma && (vma->vm_flags & VM_GROWSUP))
78644 + return false;
78645 +#endif
78646 + return true;
78647 + }
78648 +
78649 + if (addr + len > vma->vm_start)
78650 + return false;
78651 +
78652 + if (vma->vm_flags & VM_GROWSDOWN)
78653 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
78654 +#ifdef CONFIG_STACK_GROWSUP
78655 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
78656 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
78657 +#endif
78658 +
78659 + return true;
78660 +}
78661 +
78662 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
78663 +{
78664 + if (vma->vm_start < len)
78665 + return -ENOMEM;
78666 + if (!(vma->vm_flags & VM_GROWSDOWN))
78667 + return vma->vm_start - len;
78668 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
78669 + return vma->vm_start - len - sysctl_heap_stack_gap;
78670 + return -ENOMEM;
78671 +}
78672 +
78673 /* Get an address range which is currently unmapped.
78674 * For shmat() with addr=0.
78675 *
78676 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
78677 if (flags & MAP_FIXED)
78678 return addr;
78679
78680 +#ifdef CONFIG_PAX_RANDMMAP
78681 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
78682 +#endif
78683 +
78684 if (addr) {
78685 addr = PAGE_ALIGN(addr);
78686 - vma = find_vma(mm, addr);
78687 - if (TASK_SIZE - len >= addr &&
78688 - (!vma || addr + len <= vma->vm_start))
78689 - return addr;
78690 + if (TASK_SIZE - len >= addr) {
78691 + vma = find_vma(mm, addr);
78692 + if (check_heap_stack_gap(vma, addr, len))
78693 + return addr;
78694 + }
78695 }
78696 if (len > mm->cached_hole_size) {
78697 - start_addr = addr = mm->free_area_cache;
78698 + start_addr = addr = mm->free_area_cache;
78699 } else {
78700 - start_addr = addr = TASK_UNMAPPED_BASE;
78701 - mm->cached_hole_size = 0;
78702 + start_addr = addr = mm->mmap_base;
78703 + mm->cached_hole_size = 0;
78704 }
78705
78706 full_search:
78707 @@ -1303,34 +1502,40 @@ full_search:
78708 * Start a new search - just in case we missed
78709 * some holes.
78710 */
78711 - if (start_addr != TASK_UNMAPPED_BASE) {
78712 - addr = TASK_UNMAPPED_BASE;
78713 - start_addr = addr;
78714 + if (start_addr != mm->mmap_base) {
78715 + start_addr = addr = mm->mmap_base;
78716 mm->cached_hole_size = 0;
78717 goto full_search;
78718 }
78719 return -ENOMEM;
78720 }
78721 - if (!vma || addr + len <= vma->vm_start) {
78722 - /*
78723 - * Remember the place where we stopped the search:
78724 - */
78725 - mm->free_area_cache = addr + len;
78726 - return addr;
78727 - }
78728 + if (check_heap_stack_gap(vma, addr, len))
78729 + break;
78730 if (addr + mm->cached_hole_size < vma->vm_start)
78731 mm->cached_hole_size = vma->vm_start - addr;
78732 addr = vma->vm_end;
78733 }
78734 +
78735 + /*
78736 + * Remember the place where we stopped the search:
78737 + */
78738 + mm->free_area_cache = addr + len;
78739 + return addr;
78740 }
78741 #endif
78742
78743 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
78744 {
78745 +
78746 +#ifdef CONFIG_PAX_SEGMEXEC
78747 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
78748 + return;
78749 +#endif
78750 +
78751 /*
78752 * Is this a new hole at the lowest possible address?
78753 */
78754 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
78755 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
78756 mm->free_area_cache = addr;
78757 mm->cached_hole_size = ~0UL;
78758 }
78759 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78760 {
78761 struct vm_area_struct *vma;
78762 struct mm_struct *mm = current->mm;
78763 - unsigned long addr = addr0;
78764 + unsigned long base = mm->mmap_base, addr = addr0;
78765
78766 /* requested length too big for entire address space */
78767 if (len > TASK_SIZE)
78768 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78769 if (flags & MAP_FIXED)
78770 return addr;
78771
78772 +#ifdef CONFIG_PAX_RANDMMAP
78773 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
78774 +#endif
78775 +
78776 /* requesting a specific address */
78777 if (addr) {
78778 addr = PAGE_ALIGN(addr);
78779 - vma = find_vma(mm, addr);
78780 - if (TASK_SIZE - len >= addr &&
78781 - (!vma || addr + len <= vma->vm_start))
78782 - return addr;
78783 + if (TASK_SIZE - len >= addr) {
78784 + vma = find_vma(mm, addr);
78785 + if (check_heap_stack_gap(vma, addr, len))
78786 + return addr;
78787 + }
78788 }
78789
78790 /* check if free_area_cache is useful for us */
78791 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78792 /* make sure it can fit in the remaining address space */
78793 if (addr > len) {
78794 vma = find_vma(mm, addr-len);
78795 - if (!vma || addr <= vma->vm_start)
78796 + if (check_heap_stack_gap(vma, addr - len, len))
78797 /* remember the address as a hint for next time */
78798 return (mm->free_area_cache = addr-len);
78799 }
78800 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78801 * return with success:
78802 */
78803 vma = find_vma(mm, addr);
78804 - if (!vma || addr+len <= vma->vm_start)
78805 + if (check_heap_stack_gap(vma, addr, len))
78806 /* remember the address as a hint for next time */
78807 return (mm->free_area_cache = addr);
78808
78809 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78810 mm->cached_hole_size = vma->vm_start - addr;
78811
78812 /* try just below the current vma->vm_start */
78813 - addr = vma->vm_start-len;
78814 - } while (len < vma->vm_start);
78815 + addr = skip_heap_stack_gap(vma, len);
78816 + } while (!IS_ERR_VALUE(addr));
78817
78818 bottomup:
78819 /*
78820 @@ -1414,13 +1624,21 @@ bottomup:
78821 * can happen with large stack limits and large mmap()
78822 * allocations.
78823 */
78824 + mm->mmap_base = TASK_UNMAPPED_BASE;
78825 +
78826 +#ifdef CONFIG_PAX_RANDMMAP
78827 + if (mm->pax_flags & MF_PAX_RANDMMAP)
78828 + mm->mmap_base += mm->delta_mmap;
78829 +#endif
78830 +
78831 + mm->free_area_cache = mm->mmap_base;
78832 mm->cached_hole_size = ~0UL;
78833 - mm->free_area_cache = TASK_UNMAPPED_BASE;
78834 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
78835 /*
78836 * Restore the topdown base:
78837 */
78838 - mm->free_area_cache = mm->mmap_base;
78839 + mm->mmap_base = base;
78840 + mm->free_area_cache = base;
78841 mm->cached_hole_size = ~0UL;
78842
78843 return addr;
78844 @@ -1429,6 +1647,12 @@ bottomup:
78845
78846 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78847 {
78848 +
78849 +#ifdef CONFIG_PAX_SEGMEXEC
78850 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
78851 + return;
78852 +#endif
78853 +
78854 /*
78855 * Is this a new hole at the highest possible address?
78856 */
78857 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78858 mm->free_area_cache = addr;
78859
78860 /* dont allow allocations above current base */
78861 - if (mm->free_area_cache > mm->mmap_base)
78862 + if (mm->free_area_cache > mm->mmap_base) {
78863 mm->free_area_cache = mm->mmap_base;
78864 + mm->cached_hole_size = ~0UL;
78865 + }
78866 }
78867
78868 unsigned long
78869 @@ -1510,40 +1736,41 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
78870
78871 EXPORT_SYMBOL(find_vma);
78872
78873 -/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
78874 +/*
78875 + * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
78876 + * Note: pprev is set to NULL when return value is NULL.
78877 + */
78878 struct vm_area_struct *
78879 find_vma_prev(struct mm_struct *mm, unsigned long addr,
78880 struct vm_area_struct **pprev)
78881 {
78882 - struct vm_area_struct *vma = NULL, *prev = NULL;
78883 - struct rb_node *rb_node;
78884 - if (!mm)
78885 - goto out;
78886 + struct vm_area_struct *vma;
78887
78888 - /* Guard against addr being lower than the first VMA */
78889 - vma = mm->mmap;
78890 + vma = find_vma(mm, addr);
78891 + *pprev = vma ? vma->vm_prev : NULL;
78892 + return vma;
78893 +}
78894
78895 - /* Go through the RB tree quickly. */
78896 - rb_node = mm->mm_rb.rb_node;
78897 +#ifdef CONFIG_PAX_SEGMEXEC
78898 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
78899 +{
78900 + struct vm_area_struct *vma_m;
78901
78902 - while (rb_node) {
78903 - struct vm_area_struct *vma_tmp;
78904 - vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
78905 -
78906 - if (addr < vma_tmp->vm_end) {
78907 - rb_node = rb_node->rb_left;
78908 - } else {
78909 - prev = vma_tmp;
78910 - if (!prev->vm_next || (addr < prev->vm_next->vm_end))
78911 - break;
78912 - rb_node = rb_node->rb_right;
78913 - }
78914 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
78915 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
78916 + BUG_ON(vma->vm_mirror);
78917 + return NULL;
78918 }
78919 -
78920 -out:
78921 - *pprev = prev;
78922 - return prev ? prev->vm_next : vma;
78923 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
78924 + vma_m = vma->vm_mirror;
78925 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
78926 + BUG_ON(vma->vm_file != vma_m->vm_file);
78927 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
78928 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
78929 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
78930 + return vma_m;
78931 }
78932 +#endif
78933
78934 /*
78935 * Verify that the stack growth is acceptable and
78936 @@ -1561,6 +1788,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78937 return -ENOMEM;
78938
78939 /* Stack limit test */
78940 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
78941 if (size > rlim[RLIMIT_STACK].rlim_cur)
78942 return -ENOMEM;
78943
78944 @@ -1570,6 +1798,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78945 unsigned long limit;
78946 locked = mm->locked_vm + grow;
78947 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
78948 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
78949 if (locked > limit && !capable(CAP_IPC_LOCK))
78950 return -ENOMEM;
78951 }
78952 @@ -1600,37 +1829,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78953 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
78954 * vma is the last one with address > vma->vm_end. Have to extend vma.
78955 */
78956 +#ifndef CONFIG_IA64
78957 +static
78958 +#endif
78959 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
78960 {
78961 int error;
78962 + bool locknext;
78963
78964 if (!(vma->vm_flags & VM_GROWSUP))
78965 return -EFAULT;
78966
78967 + /* Also guard against wrapping around to address 0. */
78968 + if (address < PAGE_ALIGN(address+1))
78969 + address = PAGE_ALIGN(address+1);
78970 + else
78971 + return -ENOMEM;
78972 +
78973 /*
78974 * We must make sure the anon_vma is allocated
78975 * so that the anon_vma locking is not a noop.
78976 */
78977 if (unlikely(anon_vma_prepare(vma)))
78978 return -ENOMEM;
78979 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
78980 + if (locknext && anon_vma_prepare(vma->vm_next))
78981 + return -ENOMEM;
78982 anon_vma_lock(vma);
78983 + if (locknext)
78984 + anon_vma_lock(vma->vm_next);
78985
78986 /*
78987 * vma->vm_start/vm_end cannot change under us because the caller
78988 * is required to hold the mmap_sem in read mode. We need the
78989 - * anon_vma lock to serialize against concurrent expand_stacks.
78990 - * Also guard against wrapping around to address 0.
78991 + * anon_vma locks to serialize against concurrent expand_stacks
78992 + * and expand_upwards.
78993 */
78994 - if (address < PAGE_ALIGN(address+4))
78995 - address = PAGE_ALIGN(address+4);
78996 - else {
78997 - anon_vma_unlock(vma);
78998 - return -ENOMEM;
78999 - }
79000 error = 0;
79001
79002 /* Somebody else might have raced and expanded it already */
79003 - if (address > vma->vm_end) {
79004 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
79005 + error = -ENOMEM;
79006 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
79007 unsigned long size, grow;
79008
79009 size = address - vma->vm_start;
79010 @@ -1643,6 +1883,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
79011 vma->vm_end = address;
79012 }
79013 }
79014 + if (locknext)
79015 + anon_vma_unlock(vma->vm_next);
79016 anon_vma_unlock(vma);
79017 return error;
79018 }
79019 @@ -1655,6 +1897,8 @@ static int expand_downwards(struct vm_area_struct *vma,
79020 unsigned long address)
79021 {
79022 int error;
79023 + bool lockprev = false;
79024 + struct vm_area_struct *prev;
79025
79026 /*
79027 * We must make sure the anon_vma is allocated
79028 @@ -1668,6 +1912,15 @@ static int expand_downwards(struct vm_area_struct *vma,
79029 if (error)
79030 return error;
79031
79032 + prev = vma->vm_prev;
79033 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
79034 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
79035 +#endif
79036 + if (lockprev && anon_vma_prepare(prev))
79037 + return -ENOMEM;
79038 + if (lockprev)
79039 + anon_vma_lock(prev);
79040 +
79041 anon_vma_lock(vma);
79042
79043 /*
79044 @@ -1677,9 +1930,17 @@ static int expand_downwards(struct vm_area_struct *vma,
79045 */
79046
79047 /* Somebody else might have raced and expanded it already */
79048 - if (address < vma->vm_start) {
79049 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
79050 + error = -ENOMEM;
79051 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
79052 unsigned long size, grow;
79053
79054 +#ifdef CONFIG_PAX_SEGMEXEC
79055 + struct vm_area_struct *vma_m;
79056 +
79057 + vma_m = pax_find_mirror_vma(vma);
79058 +#endif
79059 +
79060 size = vma->vm_end - address;
79061 grow = (vma->vm_start - address) >> PAGE_SHIFT;
79062
79063 @@ -1689,10 +1950,22 @@ static int expand_downwards(struct vm_area_struct *vma,
79064 if (!error) {
79065 vma->vm_start = address;
79066 vma->vm_pgoff -= grow;
79067 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
79068 +
79069 +#ifdef CONFIG_PAX_SEGMEXEC
79070 + if (vma_m) {
79071 + vma_m->vm_start -= grow << PAGE_SHIFT;
79072 + vma_m->vm_pgoff -= grow;
79073 + }
79074 +#endif
79075 +
79076 +
79077 }
79078 }
79079 }
79080 anon_vma_unlock(vma);
79081 + if (lockprev)
79082 + anon_vma_unlock(prev);
79083 return error;
79084 }
79085
79086 @@ -1768,6 +2041,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
79087 do {
79088 long nrpages = vma_pages(vma);
79089
79090 +#ifdef CONFIG_PAX_SEGMEXEC
79091 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
79092 + vma = remove_vma(vma);
79093 + continue;
79094 + }
79095 +#endif
79096 +
79097 mm->total_vm -= nrpages;
79098 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
79099 vma = remove_vma(vma);
79100 @@ -1813,6 +2093,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
79101 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
79102 vma->vm_prev = NULL;
79103 do {
79104 +
79105 +#ifdef CONFIG_PAX_SEGMEXEC
79106 + if (vma->vm_mirror) {
79107 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
79108 + vma->vm_mirror->vm_mirror = NULL;
79109 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
79110 + vma->vm_mirror = NULL;
79111 + }
79112 +#endif
79113 +
79114 rb_erase(&vma->vm_rb, &mm->mm_rb);
79115 mm->map_count--;
79116 tail_vma = vma;
79117 @@ -1840,10 +2130,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79118 struct mempolicy *pol;
79119 struct vm_area_struct *new;
79120
79121 +#ifdef CONFIG_PAX_SEGMEXEC
79122 + struct vm_area_struct *vma_m, *new_m = NULL;
79123 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
79124 +#endif
79125 +
79126 if (is_vm_hugetlb_page(vma) && (addr &
79127 ~(huge_page_mask(hstate_vma(vma)))))
79128 return -EINVAL;
79129
79130 +#ifdef CONFIG_PAX_SEGMEXEC
79131 + vma_m = pax_find_mirror_vma(vma);
79132 +
79133 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
79134 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
79135 + if (mm->map_count >= sysctl_max_map_count-1)
79136 + return -ENOMEM;
79137 + } else
79138 +#endif
79139 +
79140 if (mm->map_count >= sysctl_max_map_count)
79141 return -ENOMEM;
79142
79143 @@ -1851,6 +2156,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79144 if (!new)
79145 return -ENOMEM;
79146
79147 +#ifdef CONFIG_PAX_SEGMEXEC
79148 + if (vma_m) {
79149 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
79150 + if (!new_m) {
79151 + kmem_cache_free(vm_area_cachep, new);
79152 + return -ENOMEM;
79153 + }
79154 + }
79155 +#endif
79156 +
79157 /* most fields are the same, copy all, and then fixup */
79158 *new = *vma;
79159
79160 @@ -1861,8 +2176,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79161 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
79162 }
79163
79164 +#ifdef CONFIG_PAX_SEGMEXEC
79165 + if (vma_m) {
79166 + *new_m = *vma_m;
79167 + new_m->vm_mirror = new;
79168 + new->vm_mirror = new_m;
79169 +
79170 + if (new_below)
79171 + new_m->vm_end = addr_m;
79172 + else {
79173 + new_m->vm_start = addr_m;
79174 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
79175 + }
79176 + }
79177 +#endif
79178 +
79179 pol = mpol_dup(vma_policy(vma));
79180 if (IS_ERR(pol)) {
79181 +
79182 +#ifdef CONFIG_PAX_SEGMEXEC
79183 + if (new_m)
79184 + kmem_cache_free(vm_area_cachep, new_m);
79185 +#endif
79186 +
79187 kmem_cache_free(vm_area_cachep, new);
79188 return PTR_ERR(pol);
79189 }
79190 @@ -1883,6 +2219,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79191 else
79192 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
79193
79194 +#ifdef CONFIG_PAX_SEGMEXEC
79195 + if (vma_m) {
79196 + mpol_get(pol);
79197 + vma_set_policy(new_m, pol);
79198 +
79199 + if (new_m->vm_file) {
79200 + get_file(new_m->vm_file);
79201 + if (vma_m->vm_flags & VM_EXECUTABLE)
79202 + added_exe_file_vma(mm);
79203 + }
79204 +
79205 + if (new_m->vm_ops && new_m->vm_ops->open)
79206 + new_m->vm_ops->open(new_m);
79207 +
79208 + if (new_below)
79209 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
79210 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
79211 + else
79212 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
79213 + }
79214 +#endif
79215 +
79216 return 0;
79217 }
79218
79219 @@ -1891,11 +2249,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79220 * work. This now handles partial unmappings.
79221 * Jeremy Fitzhardinge <jeremy@goop.org>
79222 */
79223 +#ifdef CONFIG_PAX_SEGMEXEC
79224 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79225 {
79226 + int ret = __do_munmap(mm, start, len);
79227 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
79228 + return ret;
79229 +
79230 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
79231 +}
79232 +
79233 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79234 +#else
79235 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79236 +#endif
79237 +{
79238 unsigned long end;
79239 struct vm_area_struct *vma, *prev, *last;
79240
79241 + /*
79242 + * mm->mmap_sem is required to protect against another thread
79243 + * changing the mappings in case we sleep.
79244 + */
79245 + verify_mm_writelocked(mm);
79246 +
79247 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
79248 return -EINVAL;
79249
79250 @@ -1959,6 +2336,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79251 /* Fix up all other VM information */
79252 remove_vma_list(mm, vma);
79253
79254 + track_exec_limit(mm, start, end, 0UL);
79255 +
79256 return 0;
79257 }
79258
79259 @@ -1971,22 +2350,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
79260
79261 profile_munmap(addr);
79262
79263 +#ifdef CONFIG_PAX_SEGMEXEC
79264 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
79265 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
79266 + return -EINVAL;
79267 +#endif
79268 +
79269 down_write(&mm->mmap_sem);
79270 ret = do_munmap(mm, addr, len);
79271 up_write(&mm->mmap_sem);
79272 return ret;
79273 }
79274
79275 -static inline void verify_mm_writelocked(struct mm_struct *mm)
79276 -{
79277 -#ifdef CONFIG_DEBUG_VM
79278 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
79279 - WARN_ON(1);
79280 - up_read(&mm->mmap_sem);
79281 - }
79282 -#endif
79283 -}
79284 -
79285 /*
79286 * this is really a simplified "do_mmap". it only handles
79287 * anonymous maps. eventually we may be able to do some
79288 @@ -2000,6 +2375,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79289 struct rb_node ** rb_link, * rb_parent;
79290 pgoff_t pgoff = addr >> PAGE_SHIFT;
79291 int error;
79292 + unsigned long charged;
79293
79294 len = PAGE_ALIGN(len);
79295 if (!len)
79296 @@ -2011,16 +2387,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79297
79298 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
79299
79300 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
79301 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
79302 + flags &= ~VM_EXEC;
79303 +
79304 +#ifdef CONFIG_PAX_MPROTECT
79305 + if (mm->pax_flags & MF_PAX_MPROTECT)
79306 + flags &= ~VM_MAYEXEC;
79307 +#endif
79308 +
79309 + }
79310 +#endif
79311 +
79312 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
79313 if (error & ~PAGE_MASK)
79314 return error;
79315
79316 + charged = len >> PAGE_SHIFT;
79317 +
79318 /*
79319 * mlock MCL_FUTURE?
79320 */
79321 if (mm->def_flags & VM_LOCKED) {
79322 unsigned long locked, lock_limit;
79323 - locked = len >> PAGE_SHIFT;
79324 + locked = charged;
79325 locked += mm->locked_vm;
79326 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
79327 lock_limit >>= PAGE_SHIFT;
79328 @@ -2037,22 +2427,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79329 /*
79330 * Clear old maps. this also does some error checking for us
79331 */
79332 - munmap_back:
79333 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
79334 if (vma && vma->vm_start < addr + len) {
79335 if (do_munmap(mm, addr, len))
79336 return -ENOMEM;
79337 - goto munmap_back;
79338 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
79339 + BUG_ON(vma && vma->vm_start < addr + len);
79340 }
79341
79342 /* Check against address space limits *after* clearing old maps... */
79343 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
79344 + if (!may_expand_vm(mm, charged))
79345 return -ENOMEM;
79346
79347 if (mm->map_count > sysctl_max_map_count)
79348 return -ENOMEM;
79349
79350 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
79351 + if (security_vm_enough_memory(charged))
79352 return -ENOMEM;
79353
79354 /* Can we just expand an old private anonymous mapping? */
79355 @@ -2066,7 +2456,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79356 */
79357 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79358 if (!vma) {
79359 - vm_unacct_memory(len >> PAGE_SHIFT);
79360 + vm_unacct_memory(charged);
79361 return -ENOMEM;
79362 }
79363
79364 @@ -2078,11 +2468,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79365 vma->vm_page_prot = vm_get_page_prot(flags);
79366 vma_link(mm, vma, prev, rb_link, rb_parent);
79367 out:
79368 - mm->total_vm += len >> PAGE_SHIFT;
79369 + mm->total_vm += charged;
79370 if (flags & VM_LOCKED) {
79371 if (!mlock_vma_pages_range(vma, addr, addr + len))
79372 - mm->locked_vm += (len >> PAGE_SHIFT);
79373 + mm->locked_vm += charged;
79374 }
79375 + track_exec_limit(mm, addr, addr + len, flags);
79376 return addr;
79377 }
79378
79379 @@ -2129,8 +2520,10 @@ void exit_mmap(struct mm_struct *mm)
79380 * Walk the list again, actually closing and freeing it,
79381 * with preemption enabled, without holding any MM locks.
79382 */
79383 - while (vma)
79384 + while (vma) {
79385 + vma->vm_mirror = NULL;
79386 vma = remove_vma(vma);
79387 + }
79388
79389 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
79390 }
79391 @@ -2144,6 +2537,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
79392 struct vm_area_struct * __vma, * prev;
79393 struct rb_node ** rb_link, * rb_parent;
79394
79395 +#ifdef CONFIG_PAX_SEGMEXEC
79396 + struct vm_area_struct *vma_m = NULL;
79397 +#endif
79398 +
79399 /*
79400 * The vm_pgoff of a purely anonymous vma should be irrelevant
79401 * until its first write fault, when page's anon_vma and index
79402 @@ -2166,7 +2563,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
79403 if ((vma->vm_flags & VM_ACCOUNT) &&
79404 security_vm_enough_memory_mm(mm, vma_pages(vma)))
79405 return -ENOMEM;
79406 +
79407 +#ifdef CONFIG_PAX_SEGMEXEC
79408 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
79409 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79410 + if (!vma_m)
79411 + return -ENOMEM;
79412 + }
79413 +#endif
79414 +
79415 vma_link(mm, vma, prev, rb_link, rb_parent);
79416 +
79417 +#ifdef CONFIG_PAX_SEGMEXEC
79418 + if (vma_m)
79419 + pax_mirror_vma(vma_m, vma);
79420 +#endif
79421 +
79422 return 0;
79423 }
79424
79425 @@ -2184,6 +2596,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
79426 struct rb_node **rb_link, *rb_parent;
79427 struct mempolicy *pol;
79428
79429 + BUG_ON(vma->vm_mirror);
79430 +
79431 /*
79432 * If anonymous vma has not yet been faulted, update new pgoff
79433 * to match new location, to increase its chance of merging.
79434 @@ -2227,6 +2641,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
79435 return new_vma;
79436 }
79437
79438 +#ifdef CONFIG_PAX_SEGMEXEC
79439 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
79440 +{
79441 + struct vm_area_struct *prev_m;
79442 + struct rb_node **rb_link_m, *rb_parent_m;
79443 + struct mempolicy *pol_m;
79444 +
79445 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
79446 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
79447 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
79448 + *vma_m = *vma;
79449 + pol_m = vma_policy(vma_m);
79450 + mpol_get(pol_m);
79451 + vma_set_policy(vma_m, pol_m);
79452 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
79453 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
79454 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
79455 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
79456 + if (vma_m->vm_file)
79457 + get_file(vma_m->vm_file);
79458 + if (vma_m->vm_ops && vma_m->vm_ops->open)
79459 + vma_m->vm_ops->open(vma_m);
79460 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
79461 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
79462 + vma_m->vm_mirror = vma;
79463 + vma->vm_mirror = vma_m;
79464 +}
79465 +#endif
79466 +
79467 /*
79468 * Return true if the calling process may expand its vm space by the passed
79469 * number of pages
79470 @@ -2237,7 +2680,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
79471 unsigned long lim;
79472
79473 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
79474 -
79475 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
79476 if (cur + npages > lim)
79477 return 0;
79478 return 1;
79479 @@ -2307,6 +2750,22 @@ int install_special_mapping(struct mm_struct *mm,
79480 vma->vm_start = addr;
79481 vma->vm_end = addr + len;
79482
79483 +#ifdef CONFIG_PAX_MPROTECT
79484 + if (mm->pax_flags & MF_PAX_MPROTECT) {
79485 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
79486 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
79487 + return -EPERM;
79488 + if (!(vm_flags & VM_EXEC))
79489 + vm_flags &= ~VM_MAYEXEC;
79490 +#else
79491 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
79492 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
79493 +#endif
79494 + else
79495 + vm_flags &= ~VM_MAYWRITE;
79496 + }
79497 +#endif
79498 +
79499 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
79500 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
79501
79502 diff --git a/mm/mprotect.c b/mm/mprotect.c
79503 index 1737c7e..c7faeb4 100644
79504 --- a/mm/mprotect.c
79505 +++ b/mm/mprotect.c
79506 @@ -24,10 +24,16 @@
79507 #include <linux/mmu_notifier.h>
79508 #include <linux/migrate.h>
79509 #include <linux/perf_event.h>
79510 +
79511 +#ifdef CONFIG_PAX_MPROTECT
79512 +#include <linux/elf.h>
79513 +#endif
79514 +
79515 #include <asm/uaccess.h>
79516 #include <asm/pgtable.h>
79517 #include <asm/cacheflush.h>
79518 #include <asm/tlbflush.h>
79519 +#include <asm/mmu_context.h>
79520
79521 #ifndef pgprot_modify
79522 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
79523 @@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
79524 flush_tlb_range(vma, start, end);
79525 }
79526
79527 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
79528 +/* called while holding the mmap semaphor for writing except stack expansion */
79529 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
79530 +{
79531 + unsigned long oldlimit, newlimit = 0UL;
79532 +
79533 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
79534 + return;
79535 +
79536 + spin_lock(&mm->page_table_lock);
79537 + oldlimit = mm->context.user_cs_limit;
79538 + if ((prot & VM_EXEC) && oldlimit < end)
79539 + /* USER_CS limit moved up */
79540 + newlimit = end;
79541 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
79542 + /* USER_CS limit moved down */
79543 + newlimit = start;
79544 +
79545 + if (newlimit) {
79546 + mm->context.user_cs_limit = newlimit;
79547 +
79548 +#ifdef CONFIG_SMP
79549 + wmb();
79550 + cpus_clear(mm->context.cpu_user_cs_mask);
79551 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
79552 +#endif
79553 +
79554 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
79555 + }
79556 + spin_unlock(&mm->page_table_lock);
79557 + if (newlimit == end) {
79558 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
79559 +
79560 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
79561 + if (is_vm_hugetlb_page(vma))
79562 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
79563 + else
79564 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
79565 + }
79566 +}
79567 +#endif
79568 +
79569 int
79570 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79571 unsigned long start, unsigned long end, unsigned long newflags)
79572 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79573 int error;
79574 int dirty_accountable = 0;
79575
79576 +#ifdef CONFIG_PAX_SEGMEXEC
79577 + struct vm_area_struct *vma_m = NULL;
79578 + unsigned long start_m, end_m;
79579 +
79580 + start_m = start + SEGMEXEC_TASK_SIZE;
79581 + end_m = end + SEGMEXEC_TASK_SIZE;
79582 +#endif
79583 +
79584 if (newflags == oldflags) {
79585 *pprev = vma;
79586 return 0;
79587 }
79588
79589 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
79590 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
79591 +
79592 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
79593 + return -ENOMEM;
79594 +
79595 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
79596 + return -ENOMEM;
79597 + }
79598 +
79599 /*
79600 * If we make a private mapping writable we increase our commit;
79601 * but (without finer accounting) cannot reduce our commit if we
79602 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79603 }
79604 }
79605
79606 +#ifdef CONFIG_PAX_SEGMEXEC
79607 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
79608 + if (start != vma->vm_start) {
79609 + error = split_vma(mm, vma, start, 1);
79610 + if (error)
79611 + goto fail;
79612 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
79613 + *pprev = (*pprev)->vm_next;
79614 + }
79615 +
79616 + if (end != vma->vm_end) {
79617 + error = split_vma(mm, vma, end, 0);
79618 + if (error)
79619 + goto fail;
79620 + }
79621 +
79622 + if (pax_find_mirror_vma(vma)) {
79623 + error = __do_munmap(mm, start_m, end_m - start_m);
79624 + if (error)
79625 + goto fail;
79626 + } else {
79627 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79628 + if (!vma_m) {
79629 + error = -ENOMEM;
79630 + goto fail;
79631 + }
79632 + vma->vm_flags = newflags;
79633 + pax_mirror_vma(vma_m, vma);
79634 + }
79635 + }
79636 +#endif
79637 +
79638 /*
79639 * First try to merge with previous and/or next vma.
79640 */
79641 @@ -195,9 +293,21 @@ success:
79642 * vm_flags and vm_page_prot are protected by the mmap_sem
79643 * held in write mode.
79644 */
79645 +
79646 +#ifdef CONFIG_PAX_SEGMEXEC
79647 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
79648 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
79649 +#endif
79650 +
79651 vma->vm_flags = newflags;
79652 +
79653 +#ifdef CONFIG_PAX_MPROTECT
79654 + if (mm->binfmt && mm->binfmt->handle_mprotect)
79655 + mm->binfmt->handle_mprotect(vma, newflags);
79656 +#endif
79657 +
79658 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
79659 - vm_get_page_prot(newflags));
79660 + vm_get_page_prot(vma->vm_flags));
79661
79662 if (vma_wants_writenotify(vma)) {
79663 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
79664 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79665 end = start + len;
79666 if (end <= start)
79667 return -ENOMEM;
79668 +
79669 +#ifdef CONFIG_PAX_SEGMEXEC
79670 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
79671 + if (end > SEGMEXEC_TASK_SIZE)
79672 + return -EINVAL;
79673 + } else
79674 +#endif
79675 +
79676 + if (end > TASK_SIZE)
79677 + return -EINVAL;
79678 +
79679 if (!arch_validate_prot(prot))
79680 return -EINVAL;
79681
79682 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79683 /*
79684 * Does the application expect PROT_READ to imply PROT_EXEC:
79685 */
79686 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
79687 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
79688 prot |= PROT_EXEC;
79689
79690 vm_flags = calc_vm_prot_bits(prot);
79691 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79692 if (start > vma->vm_start)
79693 prev = vma;
79694
79695 +#ifdef CONFIG_PAX_MPROTECT
79696 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
79697 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
79698 +#endif
79699 +
79700 for (nstart = start ; ; ) {
79701 unsigned long newflags;
79702
79703 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79704
79705 /* newflags >> 4 shift VM_MAY% in place of VM_% */
79706 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
79707 + if (prot & (PROT_WRITE | PROT_EXEC))
79708 + gr_log_rwxmprotect(vma->vm_file);
79709 +
79710 + error = -EACCES;
79711 + goto out;
79712 + }
79713 +
79714 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
79715 error = -EACCES;
79716 goto out;
79717 }
79718 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79719 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
79720 if (error)
79721 goto out;
79722 +
79723 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
79724 +
79725 nstart = tmp;
79726
79727 if (nstart < prev->vm_end)
79728 diff --git a/mm/mremap.c b/mm/mremap.c
79729 index 3e98d79..1706cec 100644
79730 --- a/mm/mremap.c
79731 +++ b/mm/mremap.c
79732 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
79733 continue;
79734 pte = ptep_clear_flush(vma, old_addr, old_pte);
79735 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
79736 +
79737 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
79738 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
79739 + pte = pte_exprotect(pte);
79740 +#endif
79741 +
79742 set_pte_at(mm, new_addr, new_pte, pte);
79743 }
79744
79745 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
79746 if (is_vm_hugetlb_page(vma))
79747 goto Einval;
79748
79749 +#ifdef CONFIG_PAX_SEGMEXEC
79750 + if (pax_find_mirror_vma(vma))
79751 + goto Einval;
79752 +#endif
79753 +
79754 /* We can't remap across vm area boundaries */
79755 if (old_len > vma->vm_end - addr)
79756 goto Efault;
79757 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
79758 unsigned long ret = -EINVAL;
79759 unsigned long charged = 0;
79760 unsigned long map_flags;
79761 + unsigned long pax_task_size = TASK_SIZE;
79762
79763 if (new_addr & ~PAGE_MASK)
79764 goto out;
79765
79766 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
79767 +#ifdef CONFIG_PAX_SEGMEXEC
79768 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
79769 + pax_task_size = SEGMEXEC_TASK_SIZE;
79770 +#endif
79771 +
79772 + pax_task_size -= PAGE_SIZE;
79773 +
79774 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
79775 goto out;
79776
79777 /* Check if the location we're moving into overlaps the
79778 * old location at all, and fail if it does.
79779 */
79780 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
79781 - goto out;
79782 -
79783 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
79784 + if (addr + old_len > new_addr && new_addr + new_len > addr)
79785 goto out;
79786
79787 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
79788 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
79789 struct vm_area_struct *vma;
79790 unsigned long ret = -EINVAL;
79791 unsigned long charged = 0;
79792 + unsigned long pax_task_size = TASK_SIZE;
79793
79794 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
79795 goto out;
79796 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
79797 if (!new_len)
79798 goto out;
79799
79800 +#ifdef CONFIG_PAX_SEGMEXEC
79801 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
79802 + pax_task_size = SEGMEXEC_TASK_SIZE;
79803 +#endif
79804 +
79805 + pax_task_size -= PAGE_SIZE;
79806 +
79807 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
79808 + old_len > pax_task_size || addr > pax_task_size-old_len)
79809 + goto out;
79810 +
79811 if (flags & MREMAP_FIXED) {
79812 if (flags & MREMAP_MAYMOVE)
79813 ret = mremap_to(addr, old_len, new_addr, new_len);
79814 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
79815 addr + new_len);
79816 }
79817 ret = addr;
79818 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
79819 goto out;
79820 }
79821 }
79822 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
79823 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
79824 if (ret)
79825 goto out;
79826 +
79827 + map_flags = vma->vm_flags;
79828 ret = move_vma(vma, addr, old_len, new_len, new_addr);
79829 + if (!(ret & ~PAGE_MASK)) {
79830 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
79831 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
79832 + }
79833 }
79834 out:
79835 if (ret & ~PAGE_MASK)
79836 diff --git a/mm/nommu.c b/mm/nommu.c
79837 index 406e8d4..53970d3 100644
79838 --- a/mm/nommu.c
79839 +++ b/mm/nommu.c
79840 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
79841 int sysctl_overcommit_ratio = 50; /* default is 50% */
79842 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
79843 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
79844 -int heap_stack_gap = 0;
79845
79846 atomic_long_t mmap_pages_allocated;
79847
79848 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
79849 EXPORT_SYMBOL(find_vma);
79850
79851 /*
79852 - * find a VMA
79853 - * - we don't extend stack VMAs under NOMMU conditions
79854 - */
79855 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
79856 -{
79857 - return find_vma(mm, addr);
79858 -}
79859 -
79860 -/*
79861 * expand a stack to a given address
79862 * - not supported under NOMMU conditions
79863 */
79864 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
79865 index 3ecab7e..594a471 100644
79866 --- a/mm/page_alloc.c
79867 +++ b/mm/page_alloc.c
79868 @@ -289,7 +289,7 @@ out:
79869 * This usage means that zero-order pages may not be compound.
79870 */
79871
79872 -static void free_compound_page(struct page *page)
79873 +void free_compound_page(struct page *page)
79874 {
79875 __free_pages_ok(page, compound_order(page));
79876 }
79877 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79878 int bad = 0;
79879 int wasMlocked = __TestClearPageMlocked(page);
79880
79881 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
79882 + unsigned long index = 1UL << order;
79883 +#endif
79884 +
79885 kmemcheck_free_shadow(page, order);
79886
79887 for (i = 0 ; i < (1 << order) ; ++i)
79888 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79889 debug_check_no_obj_freed(page_address(page),
79890 PAGE_SIZE << order);
79891 }
79892 +
79893 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
79894 + for (; index; --index)
79895 + sanitize_highpage(page + index - 1);
79896 +#endif
79897 +
79898 arch_free_page(page, order);
79899 kernel_map_pages(page, 1 << order, 0);
79900
79901 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
79902 arch_alloc_page(page, order);
79903 kernel_map_pages(page, 1 << order, 1);
79904
79905 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
79906 if (gfp_flags & __GFP_ZERO)
79907 prep_zero_page(page, order, gfp_flags);
79908 +#endif
79909
79910 if (order && (gfp_flags & __GFP_COMP))
79911 prep_compound_page(page, order);
79912 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
79913 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
79914 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
79915 }
79916 +
79917 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
79918 + sanitize_highpage(page);
79919 +#endif
79920 +
79921 arch_free_page(page, 0);
79922 kernel_map_pages(page, 1, 0);
79923
79924 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
79925 int cpu;
79926 struct zone *zone;
79927
79928 + pax_track_stack();
79929 +
79930 for_each_populated_zone(zone) {
79931 show_node(zone);
79932 printk("%s per-cpu:\n", zone->name);
79933 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
79934 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
79935 }
79936 #else
79937 -static void inline setup_usemap(struct pglist_data *pgdat,
79938 +static inline void setup_usemap(struct pglist_data *pgdat,
79939 struct zone *zone, unsigned long zonesize) {}
79940 #endif /* CONFIG_SPARSEMEM */
79941
79942 diff --git a/mm/percpu.c b/mm/percpu.c
79943 index c90614a..5f7b7b8 100644
79944 --- a/mm/percpu.c
79945 +++ b/mm/percpu.c
79946 @@ -115,7 +115,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
79947 static unsigned int pcpu_high_unit_cpu __read_mostly;
79948
79949 /* the address of the first chunk which starts with the kernel static area */
79950 -void *pcpu_base_addr __read_mostly;
79951 +void *pcpu_base_addr __read_only;
79952 EXPORT_SYMBOL_GPL(pcpu_base_addr);
79953
79954 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
79955 diff --git a/mm/rmap.c b/mm/rmap.c
79956 index dd43373..d848cd7 100644
79957 --- a/mm/rmap.c
79958 +++ b/mm/rmap.c
79959 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
79960 /* page_table_lock to protect against threads */
79961 spin_lock(&mm->page_table_lock);
79962 if (likely(!vma->anon_vma)) {
79963 +
79964 +#ifdef CONFIG_PAX_SEGMEXEC
79965 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
79966 +
79967 + if (vma_m) {
79968 + BUG_ON(vma_m->anon_vma);
79969 + vma_m->anon_vma = anon_vma;
79970 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
79971 + }
79972 +#endif
79973 +
79974 vma->anon_vma = anon_vma;
79975 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
79976 allocated = NULL;
79977 diff --git a/mm/shmem.c b/mm/shmem.c
79978 index 3e0005b..1d659a8 100644
79979 --- a/mm/shmem.c
79980 +++ b/mm/shmem.c
79981 @@ -31,7 +31,7 @@
79982 #include <linux/swap.h>
79983 #include <linux/ima.h>
79984
79985 -static struct vfsmount *shm_mnt;
79986 +struct vfsmount *shm_mnt;
79987
79988 #ifdef CONFIG_SHMEM
79989 /*
79990 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
79991 goto unlock;
79992 }
79993 entry = shmem_swp_entry(info, index, NULL);
79994 + if (!entry)
79995 + goto unlock;
79996 if (entry->val) {
79997 /*
79998 * The more uptodate page coming down from a stacked
79999 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
80000 struct vm_area_struct pvma;
80001 struct page *page;
80002
80003 + pax_track_stack();
80004 +
80005 spol = mpol_cond_copy(&mpol,
80006 mpol_shared_policy_lookup(&info->policy, idx));
80007
80008 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
80009
80010 info = SHMEM_I(inode);
80011 inode->i_size = len-1;
80012 - if (len <= (char *)inode - (char *)info) {
80013 + if (len <= (char *)inode - (char *)info && len <= 64) {
80014 /* do it inline */
80015 memcpy(info, symname, len);
80016 inode->i_op = &shmem_symlink_inline_operations;
80017 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
80018 int err = -ENOMEM;
80019
80020 /* Round up to L1_CACHE_BYTES to resist false sharing */
80021 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
80022 - L1_CACHE_BYTES), GFP_KERNEL);
80023 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
80024 if (!sbinfo)
80025 return -ENOMEM;
80026
80027 diff --git a/mm/slab.c b/mm/slab.c
80028 index c8d466a..909e01e 100644
80029 --- a/mm/slab.c
80030 +++ b/mm/slab.c
80031 @@ -174,7 +174,7 @@
80032
80033 /* Legal flag mask for kmem_cache_create(). */
80034 #if DEBUG
80035 -# define CREATE_MASK (SLAB_RED_ZONE | \
80036 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
80037 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
80038 SLAB_CACHE_DMA | \
80039 SLAB_STORE_USER | \
80040 @@ -182,7 +182,7 @@
80041 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
80042 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
80043 #else
80044 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
80045 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
80046 SLAB_CACHE_DMA | \
80047 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
80048 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
80049 @@ -308,7 +308,7 @@ struct kmem_list3 {
80050 * Need this for bootstrapping a per node allocator.
80051 */
80052 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
80053 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
80054 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
80055 #define CACHE_CACHE 0
80056 #define SIZE_AC MAX_NUMNODES
80057 #define SIZE_L3 (2 * MAX_NUMNODES)
80058 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
80059 if ((x)->max_freeable < i) \
80060 (x)->max_freeable = i; \
80061 } while (0)
80062 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
80063 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
80064 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
80065 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
80066 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
80067 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
80068 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
80069 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
80070 #else
80071 #define STATS_INC_ACTIVE(x) do { } while (0)
80072 #define STATS_DEC_ACTIVE(x) do { } while (0)
80073 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
80074 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
80075 */
80076 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
80077 - const struct slab *slab, void *obj)
80078 + const struct slab *slab, const void *obj)
80079 {
80080 u32 offset = (obj - slab->s_mem);
80081 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
80082 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
80083 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
80084 sizes[INDEX_AC].cs_size,
80085 ARCH_KMALLOC_MINALIGN,
80086 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
80087 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
80088 NULL);
80089
80090 if (INDEX_AC != INDEX_L3) {
80091 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
80092 kmem_cache_create(names[INDEX_L3].name,
80093 sizes[INDEX_L3].cs_size,
80094 ARCH_KMALLOC_MINALIGN,
80095 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
80096 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
80097 NULL);
80098 }
80099
80100 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
80101 sizes->cs_cachep = kmem_cache_create(names->name,
80102 sizes->cs_size,
80103 ARCH_KMALLOC_MINALIGN,
80104 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
80105 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
80106 NULL);
80107 }
80108 #ifdef CONFIG_ZONE_DMA
80109 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
80110 }
80111 /* cpu stats */
80112 {
80113 - unsigned long allochit = atomic_read(&cachep->allochit);
80114 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
80115 - unsigned long freehit = atomic_read(&cachep->freehit);
80116 - unsigned long freemiss = atomic_read(&cachep->freemiss);
80117 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
80118 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
80119 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
80120 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
80121
80122 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
80123 allochit, allocmiss, freehit, freemiss);
80124 @@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
80125
80126 static int __init slab_proc_init(void)
80127 {
80128 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
80129 + mode_t gr_mode = S_IRUGO;
80130 +
80131 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
80132 + gr_mode = S_IRUSR;
80133 +#endif
80134 +
80135 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
80136 #ifdef CONFIG_DEBUG_SLAB_LEAK
80137 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
80138 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
80139 #endif
80140 return 0;
80141 }
80142 module_init(slab_proc_init);
80143 #endif
80144
80145 +void check_object_size(const void *ptr, unsigned long n, bool to)
80146 +{
80147 +
80148 +#ifdef CONFIG_PAX_USERCOPY
80149 + struct page *page;
80150 + struct kmem_cache *cachep = NULL;
80151 + struct slab *slabp;
80152 + unsigned int objnr;
80153 + unsigned long offset;
80154 + const char *type;
80155 +
80156 + if (!n)
80157 + return;
80158 +
80159 + type = "<null>";
80160 + if (ZERO_OR_NULL_PTR(ptr))
80161 + goto report;
80162 +
80163 + if (!virt_addr_valid(ptr))
80164 + return;
80165 +
80166 + page = virt_to_head_page(ptr);
80167 +
80168 + type = "<process stack>";
80169 + if (!PageSlab(page)) {
80170 + if (object_is_on_stack(ptr, n) == -1)
80171 + goto report;
80172 + return;
80173 + }
80174 +
80175 + cachep = page_get_cache(page);
80176 + type = cachep->name;
80177 + if (!(cachep->flags & SLAB_USERCOPY))
80178 + goto report;
80179 +
80180 + slabp = page_get_slab(page);
80181 + objnr = obj_to_index(cachep, slabp, ptr);
80182 + BUG_ON(objnr >= cachep->num);
80183 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
80184 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
80185 + return;
80186 +
80187 +report:
80188 + pax_report_usercopy(ptr, n, to, type);
80189 +#endif
80190 +
80191 +}
80192 +EXPORT_SYMBOL(check_object_size);
80193 +
80194 /**
80195 * ksize - get the actual amount of memory allocated for a given object
80196 * @objp: Pointer to the object
80197 diff --git a/mm/slob.c b/mm/slob.c
80198 index 837ebd6..0bd23bc 100644
80199 --- a/mm/slob.c
80200 +++ b/mm/slob.c
80201 @@ -29,7 +29,7 @@
80202 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
80203 * alloc_pages() directly, allocating compound pages so the page order
80204 * does not have to be separately tracked, and also stores the exact
80205 - * allocation size in page->private so that it can be used to accurately
80206 + * allocation size in slob_page->size so that it can be used to accurately
80207 * provide ksize(). These objects are detected in kfree() because slob_page()
80208 * is false for them.
80209 *
80210 @@ -58,6 +58,7 @@
80211 */
80212
80213 #include <linux/kernel.h>
80214 +#include <linux/sched.h>
80215 #include <linux/slab.h>
80216 #include <linux/mm.h>
80217 #include <linux/swap.h> /* struct reclaim_state */
80218 @@ -100,7 +101,8 @@ struct slob_page {
80219 unsigned long flags; /* mandatory */
80220 atomic_t _count; /* mandatory */
80221 slobidx_t units; /* free units left in page */
80222 - unsigned long pad[2];
80223 + unsigned long pad[1];
80224 + unsigned long size; /* size when >=PAGE_SIZE */
80225 slob_t *free; /* first free slob_t in page */
80226 struct list_head list; /* linked list of free pages */
80227 };
80228 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
80229 */
80230 static inline int is_slob_page(struct slob_page *sp)
80231 {
80232 - return PageSlab((struct page *)sp);
80233 + return PageSlab((struct page *)sp) && !sp->size;
80234 }
80235
80236 static inline void set_slob_page(struct slob_page *sp)
80237 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
80238
80239 static inline struct slob_page *slob_page(const void *addr)
80240 {
80241 - return (struct slob_page *)virt_to_page(addr);
80242 + return (struct slob_page *)virt_to_head_page(addr);
80243 }
80244
80245 /*
80246 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
80247 /*
80248 * Return the size of a slob block.
80249 */
80250 -static slobidx_t slob_units(slob_t *s)
80251 +static slobidx_t slob_units(const slob_t *s)
80252 {
80253 if (s->units > 0)
80254 return s->units;
80255 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
80256 /*
80257 * Return the next free slob block pointer after this one.
80258 */
80259 -static slob_t *slob_next(slob_t *s)
80260 +static slob_t *slob_next(const slob_t *s)
80261 {
80262 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
80263 slobidx_t next;
80264 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
80265 /*
80266 * Returns true if s is the last free block in its page.
80267 */
80268 -static int slob_last(slob_t *s)
80269 +static int slob_last(const slob_t *s)
80270 {
80271 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
80272 }
80273 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
80274 if (!page)
80275 return NULL;
80276
80277 + set_slob_page(page);
80278 return page_address(page);
80279 }
80280
80281 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
80282 if (!b)
80283 return NULL;
80284 sp = slob_page(b);
80285 - set_slob_page(sp);
80286
80287 spin_lock_irqsave(&slob_lock, flags);
80288 sp->units = SLOB_UNITS(PAGE_SIZE);
80289 sp->free = b;
80290 + sp->size = 0;
80291 INIT_LIST_HEAD(&sp->list);
80292 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
80293 set_slob_page_free(sp, slob_list);
80294 @@ -475,10 +478,9 @@ out:
80295 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
80296 #endif
80297
80298 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80299 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
80300 {
80301 - unsigned int *m;
80302 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80303 + slob_t *m;
80304 void *ret;
80305
80306 lockdep_trace_alloc(gfp);
80307 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80308
80309 if (!m)
80310 return NULL;
80311 - *m = size;
80312 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
80313 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
80314 + m[0].units = size;
80315 + m[1].units = align;
80316 ret = (void *)m + align;
80317
80318 trace_kmalloc_node(_RET_IP_, ret,
80319 @@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80320
80321 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
80322 if (ret) {
80323 - struct page *page;
80324 - page = virt_to_page(ret);
80325 - page->private = size;
80326 + struct slob_page *sp;
80327 + sp = slob_page(ret);
80328 + sp->size = size;
80329 }
80330
80331 trace_kmalloc_node(_RET_IP_, ret,
80332 size, PAGE_SIZE << order, gfp, node);
80333 }
80334
80335 - kmemleak_alloc(ret, size, 1, gfp);
80336 + return ret;
80337 +}
80338 +
80339 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80340 +{
80341 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80342 + void *ret = __kmalloc_node_align(size, gfp, node, align);
80343 +
80344 + if (!ZERO_OR_NULL_PTR(ret))
80345 + kmemleak_alloc(ret, size, 1, gfp);
80346 return ret;
80347 }
80348 EXPORT_SYMBOL(__kmalloc_node);
80349 @@ -528,13 +542,92 @@ void kfree(const void *block)
80350 sp = slob_page(block);
80351 if (is_slob_page(sp)) {
80352 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80353 - unsigned int *m = (unsigned int *)(block - align);
80354 - slob_free(m, *m + align);
80355 - } else
80356 + slob_t *m = (slob_t *)(block - align);
80357 + slob_free(m, m[0].units + align);
80358 + } else {
80359 + clear_slob_page(sp);
80360 + free_slob_page(sp);
80361 + sp->size = 0;
80362 put_page(&sp->page);
80363 + }
80364 }
80365 EXPORT_SYMBOL(kfree);
80366
80367 +void check_object_size(const void *ptr, unsigned long n, bool to)
80368 +{
80369 +
80370 +#ifdef CONFIG_PAX_USERCOPY
80371 + struct slob_page *sp;
80372 + const slob_t *free;
80373 + const void *base;
80374 + unsigned long flags;
80375 + const char *type;
80376 +
80377 + if (!n)
80378 + return;
80379 +
80380 + type = "<null>";
80381 + if (ZERO_OR_NULL_PTR(ptr))
80382 + goto report;
80383 +
80384 + if (!virt_addr_valid(ptr))
80385 + return;
80386 +
80387 + type = "<process stack>";
80388 + sp = slob_page(ptr);
80389 + if (!PageSlab((struct page *)sp)) {
80390 + if (object_is_on_stack(ptr, n) == -1)
80391 + goto report;
80392 + return;
80393 + }
80394 +
80395 + type = "<slob>";
80396 + if (sp->size) {
80397 + base = page_address(&sp->page);
80398 + if (base <= ptr && n <= sp->size - (ptr - base))
80399 + return;
80400 + goto report;
80401 + }
80402 +
80403 + /* some tricky double walking to find the chunk */
80404 + spin_lock_irqsave(&slob_lock, flags);
80405 + base = (void *)((unsigned long)ptr & PAGE_MASK);
80406 + free = sp->free;
80407 +
80408 + while (!slob_last(free) && (void *)free <= ptr) {
80409 + base = free + slob_units(free);
80410 + free = slob_next(free);
80411 + }
80412 +
80413 + while (base < (void *)free) {
80414 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
80415 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
80416 + int offset;
80417 +
80418 + if (ptr < base + align)
80419 + break;
80420 +
80421 + offset = ptr - base - align;
80422 + if (offset >= m) {
80423 + base += size;
80424 + continue;
80425 + }
80426 +
80427 + if (n > m - offset)
80428 + break;
80429 +
80430 + spin_unlock_irqrestore(&slob_lock, flags);
80431 + return;
80432 + }
80433 +
80434 + spin_unlock_irqrestore(&slob_lock, flags);
80435 +report:
80436 + pax_report_usercopy(ptr, n, to, type);
80437 +#endif
80438 +
80439 +}
80440 +EXPORT_SYMBOL(check_object_size);
80441 +
80442 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
80443 size_t ksize(const void *block)
80444 {
80445 @@ -547,10 +640,10 @@ size_t ksize(const void *block)
80446 sp = slob_page(block);
80447 if (is_slob_page(sp)) {
80448 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80449 - unsigned int *m = (unsigned int *)(block - align);
80450 - return SLOB_UNITS(*m) * SLOB_UNIT;
80451 + slob_t *m = (slob_t *)(block - align);
80452 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
80453 } else
80454 - return sp->page.private;
80455 + return sp->size;
80456 }
80457 EXPORT_SYMBOL(ksize);
80458
80459 @@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80460 {
80461 struct kmem_cache *c;
80462
80463 +#ifdef CONFIG_PAX_USERCOPY
80464 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
80465 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
80466 +#else
80467 c = slob_alloc(sizeof(struct kmem_cache),
80468 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
80469 +#endif
80470
80471 if (c) {
80472 c->name = name;
80473 @@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
80474 {
80475 void *b;
80476
80477 +#ifdef CONFIG_PAX_USERCOPY
80478 + b = __kmalloc_node_align(c->size, flags, node, c->align);
80479 +#else
80480 if (c->size < PAGE_SIZE) {
80481 b = slob_alloc(c->size, flags, c->align, node);
80482 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
80483 SLOB_UNITS(c->size) * SLOB_UNIT,
80484 flags, node);
80485 } else {
80486 + struct slob_page *sp;
80487 +
80488 b = slob_new_pages(flags, get_order(c->size), node);
80489 + sp = slob_page(b);
80490 + sp->size = c->size;
80491 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
80492 PAGE_SIZE << get_order(c->size),
80493 flags, node);
80494 }
80495 +#endif
80496
80497 if (c->ctor)
80498 c->ctor(b);
80499 @@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
80500
80501 static void __kmem_cache_free(void *b, int size)
80502 {
80503 - if (size < PAGE_SIZE)
80504 + struct slob_page *sp = slob_page(b);
80505 +
80506 + if (is_slob_page(sp))
80507 slob_free(b, size);
80508 - else
80509 + else {
80510 + clear_slob_page(sp);
80511 + free_slob_page(sp);
80512 + sp->size = 0;
80513 slob_free_pages(b, get_order(size));
80514 + }
80515 }
80516
80517 static void kmem_rcu_free(struct rcu_head *head)
80518 @@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
80519
80520 void kmem_cache_free(struct kmem_cache *c, void *b)
80521 {
80522 + int size = c->size;
80523 +
80524 +#ifdef CONFIG_PAX_USERCOPY
80525 + if (size + c->align < PAGE_SIZE) {
80526 + size += c->align;
80527 + b -= c->align;
80528 + }
80529 +#endif
80530 +
80531 kmemleak_free_recursive(b, c->flags);
80532 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
80533 struct slob_rcu *slob_rcu;
80534 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
80535 + slob_rcu = b + (size - sizeof(struct slob_rcu));
80536 INIT_RCU_HEAD(&slob_rcu->head);
80537 - slob_rcu->size = c->size;
80538 + slob_rcu->size = size;
80539 call_rcu(&slob_rcu->head, kmem_rcu_free);
80540 } else {
80541 - __kmem_cache_free(b, c->size);
80542 + __kmem_cache_free(b, size);
80543 }
80544
80545 +#ifdef CONFIG_PAX_USERCOPY
80546 + trace_kfree(_RET_IP_, b);
80547 +#else
80548 trace_kmem_cache_free(_RET_IP_, b);
80549 +#endif
80550 +
80551 }
80552 EXPORT_SYMBOL(kmem_cache_free);
80553
80554 diff --git a/mm/slub.c b/mm/slub.c
80555 index 4996fc7..87e01d0 100644
80556 --- a/mm/slub.c
80557 +++ b/mm/slub.c
80558 @@ -201,7 +201,7 @@ struct track {
80559
80560 enum track_item { TRACK_ALLOC, TRACK_FREE };
80561
80562 -#ifdef CONFIG_SLUB_DEBUG
80563 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80564 static int sysfs_slab_add(struct kmem_cache *);
80565 static int sysfs_slab_alias(struct kmem_cache *, const char *);
80566 static void sysfs_slab_remove(struct kmem_cache *);
80567 @@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
80568 if (!t->addr)
80569 return;
80570
80571 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
80572 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
80573 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
80574 }
80575
80576 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
80577
80578 page = virt_to_head_page(x);
80579
80580 + BUG_ON(!PageSlab(page));
80581 +
80582 slab_free(s, page, x, _RET_IP_);
80583
80584 trace_kmem_cache_free(_RET_IP_, x);
80585 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
80586 * Merge control. If this is set then no merging of slab caches will occur.
80587 * (Could be removed. This was introduced to pacify the merge skeptics.)
80588 */
80589 -static int slub_nomerge;
80590 +static int slub_nomerge = 1;
80591
80592 /*
80593 * Calculate the order of allocation given an slab object size.
80594 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
80595 * list to avoid pounding the page allocator excessively.
80596 */
80597 set_min_partial(s, ilog2(s->size));
80598 - s->refcount = 1;
80599 + atomic_set(&s->refcount, 1);
80600 #ifdef CONFIG_NUMA
80601 s->remote_node_defrag_ratio = 1000;
80602 #endif
80603 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
80604 void kmem_cache_destroy(struct kmem_cache *s)
80605 {
80606 down_write(&slub_lock);
80607 - s->refcount--;
80608 - if (!s->refcount) {
80609 + if (atomic_dec_and_test(&s->refcount)) {
80610 list_del(&s->list);
80611 up_write(&slub_lock);
80612 if (kmem_cache_close(s)) {
80613 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
80614 __setup("slub_nomerge", setup_slub_nomerge);
80615
80616 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
80617 - const char *name, int size, gfp_t gfp_flags)
80618 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
80619 {
80620 - unsigned int flags = 0;
80621 -
80622 if (gfp_flags & SLUB_DMA)
80623 - flags = SLAB_CACHE_DMA;
80624 + flags |= SLAB_CACHE_DMA;
80625
80626 /*
80627 * This function is called with IRQs disabled during early-boot on
80628 @@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
80629 EXPORT_SYMBOL(__kmalloc_node);
80630 #endif
80631
80632 +void check_object_size(const void *ptr, unsigned long n, bool to)
80633 +{
80634 +
80635 +#ifdef CONFIG_PAX_USERCOPY
80636 + struct page *page;
80637 + struct kmem_cache *s = NULL;
80638 + unsigned long offset;
80639 + const char *type;
80640 +
80641 + if (!n)
80642 + return;
80643 +
80644 + type = "<null>";
80645 + if (ZERO_OR_NULL_PTR(ptr))
80646 + goto report;
80647 +
80648 + if (!virt_addr_valid(ptr))
80649 + return;
80650 +
80651 + page = get_object_page(ptr);
80652 +
80653 + type = "<process stack>";
80654 + if (!page) {
80655 + if (object_is_on_stack(ptr, n) == -1)
80656 + goto report;
80657 + return;
80658 + }
80659 +
80660 + s = page->slab;
80661 + type = s->name;
80662 + if (!(s->flags & SLAB_USERCOPY))
80663 + goto report;
80664 +
80665 + offset = (ptr - page_address(page)) % s->size;
80666 + if (offset <= s->objsize && n <= s->objsize - offset)
80667 + return;
80668 +
80669 +report:
80670 + pax_report_usercopy(ptr, n, to, type);
80671 +#endif
80672 +
80673 +}
80674 +EXPORT_SYMBOL(check_object_size);
80675 +
80676 size_t ksize(const void *object)
80677 {
80678 struct page *page;
80679 @@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
80680 * kmem_cache_open for slab_state == DOWN.
80681 */
80682 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
80683 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
80684 - kmalloc_caches[0].refcount = -1;
80685 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
80686 + atomic_set(&kmalloc_caches[0].refcount, -1);
80687 caches++;
80688
80689 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
80690 @@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
80691 /* Caches that are not of the two-to-the-power-of size */
80692 if (KMALLOC_MIN_SIZE <= 32) {
80693 create_kmalloc_cache(&kmalloc_caches[1],
80694 - "kmalloc-96", 96, GFP_NOWAIT);
80695 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
80696 caches++;
80697 }
80698 if (KMALLOC_MIN_SIZE <= 64) {
80699 create_kmalloc_cache(&kmalloc_caches[2],
80700 - "kmalloc-192", 192, GFP_NOWAIT);
80701 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
80702 caches++;
80703 }
80704
80705 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
80706 create_kmalloc_cache(&kmalloc_caches[i],
80707 - "kmalloc", 1 << i, GFP_NOWAIT);
80708 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
80709 caches++;
80710 }
80711
80712 @@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
80713 /*
80714 * We may have set a slab to be unmergeable during bootstrap.
80715 */
80716 - if (s->refcount < 0)
80717 + if (atomic_read(&s->refcount) < 0)
80718 return 1;
80719
80720 return 0;
80721 @@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80722 if (s) {
80723 int cpu;
80724
80725 - s->refcount++;
80726 + atomic_inc(&s->refcount);
80727 /*
80728 * Adjust the object sizes so that we clear
80729 * the complete object on kzalloc.
80730 @@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80731
80732 if (sysfs_slab_alias(s, name)) {
80733 down_write(&slub_lock);
80734 - s->refcount--;
80735 + atomic_dec(&s->refcount);
80736 up_write(&slub_lock);
80737 goto err;
80738 }
80739 @@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
80740
80741 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
80742 {
80743 - return sprintf(buf, "%d\n", s->refcount - 1);
80744 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
80745 }
80746 SLAB_ATTR_RO(aliases);
80747
80748 @@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
80749 kfree(s);
80750 }
80751
80752 -static struct sysfs_ops slab_sysfs_ops = {
80753 +static const struct sysfs_ops slab_sysfs_ops = {
80754 .show = slab_attr_show,
80755 .store = slab_attr_store,
80756 };
80757 @@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
80758 return 0;
80759 }
80760
80761 -static struct kset_uevent_ops slab_uevent_ops = {
80762 +static const struct kset_uevent_ops slab_uevent_ops = {
80763 .filter = uevent_filter,
80764 };
80765
80766 @@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
80767 return name;
80768 }
80769
80770 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80771 static int sysfs_slab_add(struct kmem_cache *s)
80772 {
80773 int err;
80774 @@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
80775 kobject_del(&s->kobj);
80776 kobject_put(&s->kobj);
80777 }
80778 +#endif
80779
80780 /*
80781 * Need to buffer aliases during bootup until sysfs becomes
80782 @@ -4632,6 +4677,7 @@ struct saved_alias {
80783
80784 static struct saved_alias *alias_list;
80785
80786 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80787 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
80788 {
80789 struct saved_alias *al;
80790 @@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
80791 alias_list = al;
80792 return 0;
80793 }
80794 +#endif
80795
80796 static int __init slab_sysfs_init(void)
80797 {
80798 @@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
80799
80800 static int __init slab_proc_init(void)
80801 {
80802 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
80803 + mode_t gr_mode = S_IRUGO;
80804 +
80805 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
80806 + gr_mode = S_IRUSR;
80807 +#endif
80808 +
80809 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
80810 return 0;
80811 }
80812 module_init(slab_proc_init);
80813 diff --git a/mm/swap.c b/mm/swap.c
80814 index 308e57d..5de19c0 100644
80815 --- a/mm/swap.c
80816 +++ b/mm/swap.c
80817 @@ -30,6 +30,7 @@
80818 #include <linux/notifier.h>
80819 #include <linux/backing-dev.h>
80820 #include <linux/memcontrol.h>
80821 +#include <linux/hugetlb.h>
80822
80823 #include "internal.h"
80824
80825 @@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
80826 compound_page_dtor *dtor;
80827
80828 dtor = get_compound_page_dtor(page);
80829 + if (!PageHuge(page))
80830 + BUG_ON(dtor != free_compound_page);
80831 (*dtor)(page);
80832 }
80833 }
80834 diff --git a/mm/util.c b/mm/util.c
80835 index e48b493..24a601d 100644
80836 --- a/mm/util.c
80837 +++ b/mm/util.c
80838 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
80839 void arch_pick_mmap_layout(struct mm_struct *mm)
80840 {
80841 mm->mmap_base = TASK_UNMAPPED_BASE;
80842 +
80843 +#ifdef CONFIG_PAX_RANDMMAP
80844 + if (mm->pax_flags & MF_PAX_RANDMMAP)
80845 + mm->mmap_base += mm->delta_mmap;
80846 +#endif
80847 +
80848 mm->get_unmapped_area = arch_get_unmapped_area;
80849 mm->unmap_area = arch_unmap_area;
80850 }
80851 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
80852 index f34ffd0..e60c44f 100644
80853 --- a/mm/vmalloc.c
80854 +++ b/mm/vmalloc.c
80855 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
80856
80857 pte = pte_offset_kernel(pmd, addr);
80858 do {
80859 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80860 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80861 +
80862 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80863 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
80864 + BUG_ON(!pte_exec(*pte));
80865 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
80866 + continue;
80867 + }
80868 +#endif
80869 +
80870 + {
80871 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80872 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80873 + }
80874 } while (pte++, addr += PAGE_SIZE, addr != end);
80875 }
80876
80877 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80878 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
80879 {
80880 pte_t *pte;
80881 + int ret = -ENOMEM;
80882
80883 /*
80884 * nr is a running index into the array which helps higher level
80885 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80886 pte = pte_alloc_kernel(pmd, addr);
80887 if (!pte)
80888 return -ENOMEM;
80889 +
80890 + pax_open_kernel();
80891 do {
80892 struct page *page = pages[*nr];
80893
80894 - if (WARN_ON(!pte_none(*pte)))
80895 - return -EBUSY;
80896 - if (WARN_ON(!page))
80897 - return -ENOMEM;
80898 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80899 + if (!(pgprot_val(prot) & _PAGE_NX))
80900 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
80901 + else
80902 +#endif
80903 +
80904 + if (WARN_ON(!pte_none(*pte))) {
80905 + ret = -EBUSY;
80906 + goto out;
80907 + }
80908 + if (WARN_ON(!page)) {
80909 + ret = -ENOMEM;
80910 + goto out;
80911 + }
80912 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
80913 (*nr)++;
80914 } while (pte++, addr += PAGE_SIZE, addr != end);
80915 - return 0;
80916 + ret = 0;
80917 +out:
80918 + pax_close_kernel();
80919 + return ret;
80920 }
80921
80922 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
80923 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
80924 * and fall back on vmalloc() if that fails. Others
80925 * just put it in the vmalloc space.
80926 */
80927 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
80928 +#ifdef CONFIG_MODULES
80929 +#ifdef MODULES_VADDR
80930 unsigned long addr = (unsigned long)x;
80931 if (addr >= MODULES_VADDR && addr < MODULES_END)
80932 return 1;
80933 #endif
80934 +
80935 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80936 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
80937 + return 1;
80938 +#endif
80939 +
80940 +#endif
80941 +
80942 return is_vmalloc_addr(x);
80943 }
80944
80945 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
80946
80947 if (!pgd_none(*pgd)) {
80948 pud_t *pud = pud_offset(pgd, addr);
80949 +#ifdef CONFIG_X86
80950 + if (!pud_large(*pud))
80951 +#endif
80952 if (!pud_none(*pud)) {
80953 pmd_t *pmd = pmd_offset(pud, addr);
80954 +#ifdef CONFIG_X86
80955 + if (!pmd_large(*pmd))
80956 +#endif
80957 if (!pmd_none(*pmd)) {
80958 pte_t *ptep, pte;
80959
80960 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
80961 struct rb_node *tmp;
80962
80963 while (*p) {
80964 - struct vmap_area *tmp;
80965 + struct vmap_area *varea;
80966
80967 parent = *p;
80968 - tmp = rb_entry(parent, struct vmap_area, rb_node);
80969 - if (va->va_start < tmp->va_end)
80970 + varea = rb_entry(parent, struct vmap_area, rb_node);
80971 + if (va->va_start < varea->va_end)
80972 p = &(*p)->rb_left;
80973 - else if (va->va_end > tmp->va_start)
80974 + else if (va->va_end > varea->va_start)
80975 p = &(*p)->rb_right;
80976 else
80977 BUG();
80978 @@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
80979 struct vm_struct *area;
80980
80981 BUG_ON(in_interrupt());
80982 +
80983 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80984 + if (flags & VM_KERNEXEC) {
80985 + if (start != VMALLOC_START || end != VMALLOC_END)
80986 + return NULL;
80987 + start = (unsigned long)MODULES_EXEC_VADDR;
80988 + end = (unsigned long)MODULES_EXEC_END;
80989 + }
80990 +#endif
80991 +
80992 if (flags & VM_IOREMAP) {
80993 int bit = fls(size);
80994
80995 @@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
80996 if (count > totalram_pages)
80997 return NULL;
80998
80999 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
81000 + if (!(pgprot_val(prot) & _PAGE_NX))
81001 + flags |= VM_KERNEXEC;
81002 +#endif
81003 +
81004 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
81005 __builtin_return_address(0));
81006 if (!area)
81007 @@ -1594,6 +1651,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
81008 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
81009 return NULL;
81010
81011 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
81012 + if (!(pgprot_val(prot) & _PAGE_NX))
81013 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
81014 + VMALLOC_START, VMALLOC_END, node,
81015 + gfp_mask, caller);
81016 + else
81017 +#endif
81018 +
81019 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
81020 VMALLOC_START, VMALLOC_END, node,
81021 gfp_mask, caller);
81022 @@ -1619,6 +1684,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
81023 return addr;
81024 }
81025
81026 +#undef __vmalloc
81027 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
81028 {
81029 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
81030 @@ -1635,6 +1701,7 @@ EXPORT_SYMBOL(__vmalloc);
81031 * For tight control over page level allocator and protection flags
81032 * use __vmalloc() instead.
81033 */
81034 +#undef vmalloc
81035 void *vmalloc(unsigned long size)
81036 {
81037 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
81038 @@ -1649,6 +1716,7 @@ EXPORT_SYMBOL(vmalloc);
81039 * The resulting memory area is zeroed so it can be mapped to userspace
81040 * without leaking data.
81041 */
81042 +#undef vmalloc_user
81043 void *vmalloc_user(unsigned long size)
81044 {
81045 struct vm_struct *area;
81046 @@ -1676,6 +1744,7 @@ EXPORT_SYMBOL(vmalloc_user);
81047 * For tight control over page level allocator and protection flags
81048 * use __vmalloc() instead.
81049 */
81050 +#undef vmalloc_node
81051 void *vmalloc_node(unsigned long size, int node)
81052 {
81053 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
81054 @@ -1698,10 +1767,10 @@ EXPORT_SYMBOL(vmalloc_node);
81055 * For tight control over page level allocator and protection flags
81056 * use __vmalloc() instead.
81057 */
81058 -
81059 +#undef vmalloc_exec
81060 void *vmalloc_exec(unsigned long size)
81061 {
81062 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
81063 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
81064 -1, __builtin_return_address(0));
81065 }
81066
81067 @@ -1720,6 +1789,7 @@ void *vmalloc_exec(unsigned long size)
81068 * Allocate enough 32bit PA addressable pages to cover @size from the
81069 * page level allocator and map them into contiguous kernel virtual space.
81070 */
81071 +#undef vmalloc_32
81072 void *vmalloc_32(unsigned long size)
81073 {
81074 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
81075 @@ -1734,6 +1804,7 @@ EXPORT_SYMBOL(vmalloc_32);
81076 * The resulting memory area is 32bit addressable and zeroed so it can be
81077 * mapped to userspace without leaking data.
81078 */
81079 +#undef vmalloc_32_user
81080 void *vmalloc_32_user(unsigned long size)
81081 {
81082 struct vm_struct *area;
81083 @@ -1998,6 +2069,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
81084 unsigned long uaddr = vma->vm_start;
81085 unsigned long usize = vma->vm_end - vma->vm_start;
81086
81087 + BUG_ON(vma->vm_mirror);
81088 +
81089 if ((PAGE_SIZE-1) & (unsigned long)addr)
81090 return -EINVAL;
81091
81092 diff --git a/mm/vmstat.c b/mm/vmstat.c
81093 index 42d76c6..5643dc4 100644
81094 --- a/mm/vmstat.c
81095 +++ b/mm/vmstat.c
81096 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
81097 *
81098 * vm_stat contains the global counters
81099 */
81100 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
81101 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
81102 EXPORT_SYMBOL(vm_stat);
81103
81104 #ifdef CONFIG_SMP
81105 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
81106 v = p->vm_stat_diff[i];
81107 p->vm_stat_diff[i] = 0;
81108 local_irq_restore(flags);
81109 - atomic_long_add(v, &zone->vm_stat[i]);
81110 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
81111 global_diff[i] += v;
81112 #ifdef CONFIG_NUMA
81113 /* 3 seconds idle till flush */
81114 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
81115
81116 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
81117 if (global_diff[i])
81118 - atomic_long_add(global_diff[i], &vm_stat[i]);
81119 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
81120 }
81121
81122 #endif
81123 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
81124 start_cpu_timer(cpu);
81125 #endif
81126 #ifdef CONFIG_PROC_FS
81127 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
81128 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
81129 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
81130 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
81131 + {
81132 + mode_t gr_mode = S_IRUGO;
81133 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
81134 + gr_mode = S_IRUSR;
81135 +#endif
81136 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
81137 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
81138 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
81139 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
81140 +#else
81141 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
81142 +#endif
81143 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
81144 + }
81145 #endif
81146 return 0;
81147 }
81148 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
81149 index a29c5ab..6143f20 100644
81150 --- a/net/8021q/vlan.c
81151 +++ b/net/8021q/vlan.c
81152 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
81153 err = -EPERM;
81154 if (!capable(CAP_NET_ADMIN))
81155 break;
81156 - if ((args.u.name_type >= 0) &&
81157 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
81158 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
81159 struct vlan_net *vn;
81160
81161 vn = net_generic(net, vlan_net_id);
81162 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
81163 index a2d2984..f9eb711 100644
81164 --- a/net/9p/trans_fd.c
81165 +++ b/net/9p/trans_fd.c
81166 @@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
81167 oldfs = get_fs();
81168 set_fs(get_ds());
81169 /* The cast to a user pointer is valid due to the set_fs() */
81170 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
81171 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
81172 set_fs(oldfs);
81173
81174 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
81175 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
81176 index 02cc7e7..4514f1b 100644
81177 --- a/net/atm/atm_misc.c
81178 +++ b/net/atm/atm_misc.c
81179 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
81180 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
81181 return 1;
81182 atm_return(vcc,truesize);
81183 - atomic_inc(&vcc->stats->rx_drop);
81184 + atomic_inc_unchecked(&vcc->stats->rx_drop);
81185 return 0;
81186 }
81187
81188 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
81189 }
81190 }
81191 atm_return(vcc,guess);
81192 - atomic_inc(&vcc->stats->rx_drop);
81193 + atomic_inc_unchecked(&vcc->stats->rx_drop);
81194 return NULL;
81195 }
81196
81197 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
81198
81199 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81200 {
81201 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
81202 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
81203 __SONET_ITEMS
81204 #undef __HANDLE_ITEM
81205 }
81206 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81207
81208 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81209 {
81210 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
81211 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
81212 __SONET_ITEMS
81213 #undef __HANDLE_ITEM
81214 }
81215 diff --git a/net/atm/lec.h b/net/atm/lec.h
81216 index 9d14d19..5c145f3 100644
81217 --- a/net/atm/lec.h
81218 +++ b/net/atm/lec.h
81219 @@ -48,7 +48,7 @@ struct lane2_ops {
81220 const u8 *tlvs, u32 sizeoftlvs);
81221 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
81222 const u8 *tlvs, u32 sizeoftlvs);
81223 -};
81224 +} __no_const;
81225
81226 /*
81227 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
81228 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
81229 index 0919a88..a23d54e 100644
81230 --- a/net/atm/mpc.h
81231 +++ b/net/atm/mpc.h
81232 @@ -33,7 +33,7 @@ struct mpoa_client {
81233 struct mpc_parameters parameters; /* parameters for this client */
81234
81235 const struct net_device_ops *old_ops;
81236 - struct net_device_ops new_ops;
81237 + net_device_ops_no_const new_ops;
81238 };
81239
81240
81241 diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
81242 index 4504a4b..1733f1e 100644
81243 --- a/net/atm/mpoa_caches.c
81244 +++ b/net/atm/mpoa_caches.c
81245 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
81246 struct timeval now;
81247 struct k_message msg;
81248
81249 + pax_track_stack();
81250 +
81251 do_gettimeofday(&now);
81252
81253 write_lock_irq(&client->egress_lock);
81254 diff --git a/net/atm/proc.c b/net/atm/proc.c
81255 index ab8419a..aa91497 100644
81256 --- a/net/atm/proc.c
81257 +++ b/net/atm/proc.c
81258 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
81259 const struct k_atm_aal_stats *stats)
81260 {
81261 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
81262 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
81263 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
81264 - atomic_read(&stats->rx_drop));
81265 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
81266 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
81267 + atomic_read_unchecked(&stats->rx_drop));
81268 }
81269
81270 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
81271 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
81272 {
81273 struct sock *sk = sk_atm(vcc);
81274
81275 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81276 + seq_printf(seq, "%p ", NULL);
81277 +#else
81278 seq_printf(seq, "%p ", vcc);
81279 +#endif
81280 +
81281 if (!vcc->dev)
81282 seq_printf(seq, "Unassigned ");
81283 else
81284 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
81285 {
81286 if (!vcc->dev)
81287 seq_printf(seq, sizeof(void *) == 4 ?
81288 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81289 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
81290 +#else
81291 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
81292 +#endif
81293 else
81294 seq_printf(seq, "%3d %3d %5d ",
81295 vcc->dev->number, vcc->vpi, vcc->vci);
81296 diff --git a/net/atm/resources.c b/net/atm/resources.c
81297 index 56b7322..c48b84e 100644
81298 --- a/net/atm/resources.c
81299 +++ b/net/atm/resources.c
81300 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
81301 static void copy_aal_stats(struct k_atm_aal_stats *from,
81302 struct atm_aal_stats *to)
81303 {
81304 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
81305 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
81306 __AAL_STAT_ITEMS
81307 #undef __HANDLE_ITEM
81308 }
81309 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
81310 static void subtract_aal_stats(struct k_atm_aal_stats *from,
81311 struct atm_aal_stats *to)
81312 {
81313 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
81314 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
81315 __AAL_STAT_ITEMS
81316 #undef __HANDLE_ITEM
81317 }
81318 diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
81319 index 8567d47..bba2292 100644
81320 --- a/net/bridge/br_private.h
81321 +++ b/net/bridge/br_private.h
81322 @@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
81323
81324 #ifdef CONFIG_SYSFS
81325 /* br_sysfs_if.c */
81326 -extern struct sysfs_ops brport_sysfs_ops;
81327 +extern const struct sysfs_ops brport_sysfs_ops;
81328 extern int br_sysfs_addif(struct net_bridge_port *p);
81329
81330 /* br_sysfs_br.c */
81331 diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
81332 index 9a52ac5..c97538e 100644
81333 --- a/net/bridge/br_stp_if.c
81334 +++ b/net/bridge/br_stp_if.c
81335 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
81336 char *envp[] = { NULL };
81337
81338 if (br->stp_enabled == BR_USER_STP) {
81339 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
81340 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
81341 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
81342 br->dev->name, r);
81343
81344 diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
81345 index 820643a..ce77fb3 100644
81346 --- a/net/bridge/br_sysfs_if.c
81347 +++ b/net/bridge/br_sysfs_if.c
81348 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
81349 return ret;
81350 }
81351
81352 -struct sysfs_ops brport_sysfs_ops = {
81353 +const struct sysfs_ops brport_sysfs_ops = {
81354 .show = brport_show,
81355 .store = brport_store,
81356 };
81357 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
81358 index d73d47f..72df42a 100644
81359 --- a/net/bridge/netfilter/ebtables.c
81360 +++ b/net/bridge/netfilter/ebtables.c
81361 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
81362 unsigned int entries_size, nentries;
81363 char *entries;
81364
81365 + pax_track_stack();
81366 +
81367 if (cmd == EBT_SO_GET_ENTRIES) {
81368 entries_size = t->private->entries_size;
81369 nentries = t->private->nentries;
81370 diff --git a/net/can/bcm.c b/net/can/bcm.c
81371 index 2ffd2e0..72a7486 100644
81372 --- a/net/can/bcm.c
81373 +++ b/net/can/bcm.c
81374 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
81375 struct bcm_sock *bo = bcm_sk(sk);
81376 struct bcm_op *op;
81377
81378 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81379 + seq_printf(m, ">>> socket %p", NULL);
81380 + seq_printf(m, " / sk %p", NULL);
81381 + seq_printf(m, " / bo %p", NULL);
81382 +#else
81383 seq_printf(m, ">>> socket %p", sk->sk_socket);
81384 seq_printf(m, " / sk %p", sk);
81385 seq_printf(m, " / bo %p", bo);
81386 +#endif
81387 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
81388 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
81389 seq_printf(m, " <<<\n");
81390 diff --git a/net/compat.c b/net/compat.c
81391 index 9559afc..ccd74e1 100644
81392 --- a/net/compat.c
81393 +++ b/net/compat.c
81394 @@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
81395 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
81396 __get_user(kmsg->msg_flags, &umsg->msg_flags))
81397 return -EFAULT;
81398 - kmsg->msg_name = compat_ptr(tmp1);
81399 - kmsg->msg_iov = compat_ptr(tmp2);
81400 - kmsg->msg_control = compat_ptr(tmp3);
81401 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
81402 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
81403 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
81404 return 0;
81405 }
81406
81407 @@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
81408 kern_msg->msg_name = NULL;
81409
81410 tot_len = iov_from_user_compat_to_kern(kern_iov,
81411 - (struct compat_iovec __user *)kern_msg->msg_iov,
81412 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
81413 kern_msg->msg_iovlen);
81414 if (tot_len >= 0)
81415 kern_msg->msg_iov = kern_iov;
81416 @@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
81417
81418 #define CMSG_COMPAT_FIRSTHDR(msg) \
81419 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
81420 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
81421 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
81422 (struct compat_cmsghdr __user *)NULL)
81423
81424 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
81425 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
81426 (ucmlen) <= (unsigned long) \
81427 ((mhdr)->msg_controllen - \
81428 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
81429 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
81430
81431 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
81432 struct compat_cmsghdr __user *cmsg, int cmsg_len)
81433 {
81434 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
81435 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
81436 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
81437 msg->msg_controllen)
81438 return NULL;
81439 return (struct compat_cmsghdr __user *)ptr;
81440 @@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
81441 {
81442 struct compat_timeval ctv;
81443 struct compat_timespec cts[3];
81444 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
81445 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
81446 struct compat_cmsghdr cmhdr;
81447 int cmlen;
81448
81449 @@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
81450
81451 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
81452 {
81453 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
81454 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
81455 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
81456 int fdnum = scm->fp->count;
81457 struct file **fp = scm->fp->fp;
81458 @@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
81459 len = sizeof(ktime);
81460 old_fs = get_fs();
81461 set_fs(KERNEL_DS);
81462 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
81463 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
81464 set_fs(old_fs);
81465
81466 if (!err) {
81467 @@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
81468 case MCAST_JOIN_GROUP:
81469 case MCAST_LEAVE_GROUP:
81470 {
81471 - struct compat_group_req __user *gr32 = (void *)optval;
81472 + struct compat_group_req __user *gr32 = (void __user *)optval;
81473 struct group_req __user *kgr =
81474 compat_alloc_user_space(sizeof(struct group_req));
81475 u32 interface;
81476 @@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
81477 case MCAST_BLOCK_SOURCE:
81478 case MCAST_UNBLOCK_SOURCE:
81479 {
81480 - struct compat_group_source_req __user *gsr32 = (void *)optval;
81481 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
81482 struct group_source_req __user *kgsr = compat_alloc_user_space(
81483 sizeof(struct group_source_req));
81484 u32 interface;
81485 @@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
81486 }
81487 case MCAST_MSFILTER:
81488 {
81489 - struct compat_group_filter __user *gf32 = (void *)optval;
81490 + struct compat_group_filter __user *gf32 = (void __user *)optval;
81491 struct group_filter __user *kgf;
81492 u32 interface, fmode, numsrc;
81493
81494 diff --git a/net/core/dev.c b/net/core/dev.c
81495 index 84a0705..575db4c 100644
81496 --- a/net/core/dev.c
81497 +++ b/net/core/dev.c
81498 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
81499 if (no_module && capable(CAP_NET_ADMIN))
81500 no_module = request_module("netdev-%s", name);
81501 if (no_module && capable(CAP_SYS_MODULE)) {
81502 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
81503 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
81504 +#else
81505 if (!request_module("%s", name))
81506 pr_err("Loading kernel module for a network device "
81507 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
81508 "instead\n", name);
81509 +#endif
81510 }
81511 }
81512 EXPORT_SYMBOL(dev_load);
81513 @@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
81514
81515 struct dev_gso_cb {
81516 void (*destructor)(struct sk_buff *skb);
81517 -};
81518 +} __no_const;
81519
81520 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
81521
81522 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
81523 }
81524 EXPORT_SYMBOL(netif_rx_ni);
81525
81526 -static void net_tx_action(struct softirq_action *h)
81527 +static void net_tx_action(void)
81528 {
81529 struct softnet_data *sd = &__get_cpu_var(softnet_data);
81530
81531 @@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi)
81532 EXPORT_SYMBOL(netif_napi_del);
81533
81534
81535 -static void net_rx_action(struct softirq_action *h)
81536 +static void net_rx_action(void)
81537 {
81538 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
81539 unsigned long time_limit = jiffies + 2;
81540 diff --git a/net/core/flow.c b/net/core/flow.c
81541 index 9601587..8c4824e 100644
81542 --- a/net/core/flow.c
81543 +++ b/net/core/flow.c
81544 @@ -35,11 +35,11 @@ struct flow_cache_entry {
81545 atomic_t *object_ref;
81546 };
81547
81548 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
81549 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
81550
81551 static u32 flow_hash_shift;
81552 #define flow_hash_size (1 << flow_hash_shift)
81553 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
81554 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
81555
81556 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
81557
81558 @@ -52,7 +52,7 @@ struct flow_percpu_info {
81559 u32 hash_rnd;
81560 int count;
81561 };
81562 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
81563 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
81564
81565 #define flow_hash_rnd_recalc(cpu) \
81566 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
81567 @@ -69,7 +69,7 @@ struct flow_flush_info {
81568 atomic_t cpuleft;
81569 struct completion completion;
81570 };
81571 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
81572 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
81573
81574 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
81575
81576 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
81577 if (fle->family == family &&
81578 fle->dir == dir &&
81579 flow_key_compare(key, &fle->key) == 0) {
81580 - if (fle->genid == atomic_read(&flow_cache_genid)) {
81581 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
81582 void *ret = fle->object;
81583
81584 if (ret)
81585 @@ -228,7 +228,7 @@ nocache:
81586 err = resolver(net, key, family, dir, &obj, &obj_ref);
81587
81588 if (fle && !err) {
81589 - fle->genid = atomic_read(&flow_cache_genid);
81590 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
81591
81592 if (fle->object)
81593 atomic_dec(fle->object_ref);
81594 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
81595
81596 fle = flow_table(cpu)[i];
81597 for (; fle; fle = fle->next) {
81598 - unsigned genid = atomic_read(&flow_cache_genid);
81599 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
81600
81601 if (!fle->object || fle->genid == genid)
81602 continue;
81603 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
81604 index d4fd895..ac9b1e6 100644
81605 --- a/net/core/rtnetlink.c
81606 +++ b/net/core/rtnetlink.c
81607 @@ -57,7 +57,7 @@ struct rtnl_link
81608 {
81609 rtnl_doit_func doit;
81610 rtnl_dumpit_func dumpit;
81611 -};
81612 +} __no_const;
81613
81614 static DEFINE_MUTEX(rtnl_mutex);
81615
81616 diff --git a/net/core/scm.c b/net/core/scm.c
81617 index d98eafc..1a190a9 100644
81618 --- a/net/core/scm.c
81619 +++ b/net/core/scm.c
81620 @@ -191,7 +191,7 @@ error:
81621 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
81622 {
81623 struct cmsghdr __user *cm
81624 - = (__force struct cmsghdr __user *)msg->msg_control;
81625 + = (struct cmsghdr __force_user *)msg->msg_control;
81626 struct cmsghdr cmhdr;
81627 int cmlen = CMSG_LEN(len);
81628 int err;
81629 @@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
81630 err = -EFAULT;
81631 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
81632 goto out;
81633 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
81634 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
81635 goto out;
81636 cmlen = CMSG_SPACE(len);
81637 if (msg->msg_controllen < cmlen)
81638 @@ -229,7 +229,7 @@ out:
81639 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
81640 {
81641 struct cmsghdr __user *cm
81642 - = (__force struct cmsghdr __user*)msg->msg_control;
81643 + = (struct cmsghdr __force_user *)msg->msg_control;
81644
81645 int fdmax = 0;
81646 int fdnum = scm->fp->count;
81647 @@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
81648 if (fdnum < fdmax)
81649 fdmax = fdnum;
81650
81651 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
81652 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
81653 i++, cmfptr++)
81654 {
81655 int new_fd;
81656 diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
81657 index 45329d7..626aaa6 100644
81658 --- a/net/core/secure_seq.c
81659 +++ b/net/core/secure_seq.c
81660 @@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
81661 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
81662
81663 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
81664 - __be16 dport)
81665 + __be16 dport)
81666 {
81667 u32 secret[MD5_MESSAGE_BYTES / 4];
81668 u32 hash[MD5_DIGEST_WORDS];
81669 @@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
81670 secret[i] = net_secret[i];
81671
81672 md5_transform(hash, secret);
81673 -
81674 return hash[0];
81675 }
81676 #endif
81677 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
81678 index 025f924..70a71c4 100644
81679 --- a/net/core/skbuff.c
81680 +++ b/net/core/skbuff.c
81681 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
81682 struct sk_buff *frag_iter;
81683 struct sock *sk = skb->sk;
81684
81685 + pax_track_stack();
81686 +
81687 /*
81688 * __skb_splice_bits() only fails if the output has no room left,
81689 * so no point in going over the frag_list for the error case.
81690 diff --git a/net/core/sock.c b/net/core/sock.c
81691 index 6605e75..3acebda 100644
81692 --- a/net/core/sock.c
81693 +++ b/net/core/sock.c
81694 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
81695 break;
81696
81697 case SO_PEERCRED:
81698 + {
81699 + struct ucred peercred;
81700 if (len > sizeof(sk->sk_peercred))
81701 len = sizeof(sk->sk_peercred);
81702 - if (copy_to_user(optval, &sk->sk_peercred, len))
81703 + peercred = sk->sk_peercred;
81704 + if (copy_to_user(optval, &peercred, len))
81705 return -EFAULT;
81706 goto lenout;
81707 + }
81708
81709 case SO_PEERNAME:
81710 {
81711 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
81712 */
81713 smp_wmb();
81714 atomic_set(&sk->sk_refcnt, 1);
81715 - atomic_set(&sk->sk_drops, 0);
81716 + atomic_set_unchecked(&sk->sk_drops, 0);
81717 }
81718 EXPORT_SYMBOL(sock_init_data);
81719
81720 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
81721 index 2036568..c55883d 100644
81722 --- a/net/decnet/sysctl_net_decnet.c
81723 +++ b/net/decnet/sysctl_net_decnet.c
81724 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
81725
81726 if (len > *lenp) len = *lenp;
81727
81728 - if (copy_to_user(buffer, addr, len))
81729 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
81730 return -EFAULT;
81731
81732 *lenp = len;
81733 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
81734
81735 if (len > *lenp) len = *lenp;
81736
81737 - if (copy_to_user(buffer, devname, len))
81738 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
81739 return -EFAULT;
81740
81741 *lenp = len;
81742 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
81743 index 39a2d29..f39c0fe 100644
81744 --- a/net/econet/Kconfig
81745 +++ b/net/econet/Kconfig
81746 @@ -4,7 +4,7 @@
81747
81748 config ECONET
81749 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
81750 - depends on EXPERIMENTAL && INET
81751 + depends on EXPERIMENTAL && INET && BROKEN
81752 ---help---
81753 Econet is a fairly old and slow networking protocol mainly used by
81754 Acorn computers to access file and print servers. It uses native
81755 diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
81756 index a413b1b..380849c 100644
81757 --- a/net/ieee802154/dgram.c
81758 +++ b/net/ieee802154/dgram.c
81759 @@ -318,7 +318,7 @@ out:
81760 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
81761 {
81762 if (sock_queue_rcv_skb(sk, skb) < 0) {
81763 - atomic_inc(&sk->sk_drops);
81764 + atomic_inc_unchecked(&sk->sk_drops);
81765 kfree_skb(skb);
81766 return NET_RX_DROP;
81767 }
81768 diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
81769 index 30e74ee..bfc6ee0 100644
81770 --- a/net/ieee802154/raw.c
81771 +++ b/net/ieee802154/raw.c
81772 @@ -206,7 +206,7 @@ out:
81773 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
81774 {
81775 if (sock_queue_rcv_skb(sk, skb) < 0) {
81776 - atomic_inc(&sk->sk_drops);
81777 + atomic_inc_unchecked(&sk->sk_drops);
81778 kfree_skb(skb);
81779 return NET_RX_DROP;
81780 }
81781 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
81782 index dba56d2..acee5d6 100644
81783 --- a/net/ipv4/inet_diag.c
81784 +++ b/net/ipv4/inet_diag.c
81785 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
81786 r->idiag_retrans = 0;
81787
81788 r->id.idiag_if = sk->sk_bound_dev_if;
81789 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81790 + r->id.idiag_cookie[0] = 0;
81791 + r->id.idiag_cookie[1] = 0;
81792 +#else
81793 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
81794 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
81795 +#endif
81796
81797 r->id.idiag_sport = inet->sport;
81798 r->id.idiag_dport = inet->dport;
81799 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
81800 r->idiag_family = tw->tw_family;
81801 r->idiag_retrans = 0;
81802 r->id.idiag_if = tw->tw_bound_dev_if;
81803 +
81804 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81805 + r->id.idiag_cookie[0] = 0;
81806 + r->id.idiag_cookie[1] = 0;
81807 +#else
81808 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
81809 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
81810 +#endif
81811 +
81812 r->id.idiag_sport = tw->tw_sport;
81813 r->id.idiag_dport = tw->tw_dport;
81814 r->id.idiag_src[0] = tw->tw_rcv_saddr;
81815 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
81816 if (sk == NULL)
81817 goto unlock;
81818
81819 +#ifndef CONFIG_GRKERNSEC_HIDESYM
81820 err = -ESTALE;
81821 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
81822 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
81823 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
81824 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
81825 goto out;
81826 +#endif
81827
81828 err = -ENOMEM;
81829 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
81830 @@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
81831 r->idiag_retrans = req->retrans;
81832
81833 r->id.idiag_if = sk->sk_bound_dev_if;
81834 +
81835 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81836 + r->id.idiag_cookie[0] = 0;
81837 + r->id.idiag_cookie[1] = 0;
81838 +#else
81839 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
81840 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
81841 +#endif
81842
81843 tmo = req->expires - jiffies;
81844 if (tmo < 0)
81845 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
81846 index d717267..56de7e7 100644
81847 --- a/net/ipv4/inet_hashtables.c
81848 +++ b/net/ipv4/inet_hashtables.c
81849 @@ -18,12 +18,15 @@
81850 #include <linux/sched.h>
81851 #include <linux/slab.h>
81852 #include <linux/wait.h>
81853 +#include <linux/security.h>
81854
81855 #include <net/inet_connection_sock.h>
81856 #include <net/inet_hashtables.h>
81857 #include <net/secure_seq.h>
81858 #include <net/ip.h>
81859
81860 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
81861 +
81862 /*
81863 * Allocate and initialize a new local port bind bucket.
81864 * The bindhash mutex for snum's hash chain must be held here.
81865 @@ -491,6 +494,8 @@ ok:
81866 }
81867 spin_unlock(&head->lock);
81868
81869 + gr_update_task_in_ip_table(current, inet_sk(sk));
81870 +
81871 if (tw) {
81872 inet_twsk_deschedule(tw, death_row);
81873 inet_twsk_put(tw);
81874 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
81875 index 13b229f..6956484 100644
81876 --- a/net/ipv4/inetpeer.c
81877 +++ b/net/ipv4/inetpeer.c
81878 @@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81879 struct inet_peer *p, *n;
81880 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
81881
81882 + pax_track_stack();
81883 +
81884 /* Look up for the address quickly. */
81885 read_lock_bh(&peer_pool_lock);
81886 p = lookup(daddr, NULL);
81887 @@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81888 return NULL;
81889 n->v4daddr = daddr;
81890 atomic_set(&n->refcnt, 1);
81891 - atomic_set(&n->rid, 0);
81892 + atomic_set_unchecked(&n->rid, 0);
81893 n->ip_id_count = secure_ip_id(daddr);
81894 n->tcp_ts_stamp = 0;
81895
81896 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
81897 index d3fe10b..feeafc9 100644
81898 --- a/net/ipv4/ip_fragment.c
81899 +++ b/net/ipv4/ip_fragment.c
81900 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
81901 return 0;
81902
81903 start = qp->rid;
81904 - end = atomic_inc_return(&peer->rid);
81905 + end = atomic_inc_return_unchecked(&peer->rid);
81906 qp->rid = end;
81907
81908 rc = qp->q.fragments && (end - start) > max;
81909 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
81910 index e982b5c..f079d75 100644
81911 --- a/net/ipv4/ip_sockglue.c
81912 +++ b/net/ipv4/ip_sockglue.c
81913 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
81914 int val;
81915 int len;
81916
81917 + pax_track_stack();
81918 +
81919 if (level != SOL_IP)
81920 return -EOPNOTSUPP;
81921
81922 @@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
81923 if (sk->sk_type != SOCK_STREAM)
81924 return -ENOPROTOOPT;
81925
81926 - msg.msg_control = optval;
81927 + msg.msg_control = (void __force_kernel *)optval;
81928 msg.msg_controllen = len;
81929 msg.msg_flags = 0;
81930
81931 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
81932 index f8d04c2..c1188f2 100644
81933 --- a/net/ipv4/ipconfig.c
81934 +++ b/net/ipv4/ipconfig.c
81935 @@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
81936
81937 mm_segment_t oldfs = get_fs();
81938 set_fs(get_ds());
81939 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
81940 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
81941 set_fs(oldfs);
81942 return res;
81943 }
81944 @@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
81945
81946 mm_segment_t oldfs = get_fs();
81947 set_fs(get_ds());
81948 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
81949 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
81950 set_fs(oldfs);
81951 return res;
81952 }
81953 @@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
81954
81955 mm_segment_t oldfs = get_fs();
81956 set_fs(get_ds());
81957 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
81958 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
81959 set_fs(oldfs);
81960 return res;
81961 }
81962 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
81963 index c8b0cc3..4da5ae2 100644
81964 --- a/net/ipv4/netfilter/arp_tables.c
81965 +++ b/net/ipv4/netfilter/arp_tables.c
81966 @@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81967 private = &tmp;
81968 }
81969 #endif
81970 + memset(&info, 0, sizeof(info));
81971 info.valid_hooks = t->valid_hooks;
81972 memcpy(info.hook_entry, private->hook_entry,
81973 sizeof(info.hook_entry));
81974 diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
81975 index c156db2..e772975 100644
81976 --- a/net/ipv4/netfilter/ip_queue.c
81977 +++ b/net/ipv4/netfilter/ip_queue.c
81978 @@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
81979
81980 if (v->data_len < sizeof(*user_iph))
81981 return 0;
81982 + if (v->data_len > 65535)
81983 + return -EMSGSIZE;
81984 +
81985 diff = v->data_len - e->skb->len;
81986 if (diff < 0) {
81987 if (pskb_trim(e->skb, v->data_len))
81988 @@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
81989 static inline void
81990 __ipq_rcv_skb(struct sk_buff *skb)
81991 {
81992 - int status, type, pid, flags, nlmsglen, skblen;
81993 + int status, type, pid, flags;
81994 + unsigned int nlmsglen, skblen;
81995 struct nlmsghdr *nlh;
81996
81997 skblen = skb->len;
81998 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
81999 index 0606db1..02e7e4c 100644
82000 --- a/net/ipv4/netfilter/ip_tables.c
82001 +++ b/net/ipv4/netfilter/ip_tables.c
82002 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
82003 private = &tmp;
82004 }
82005 #endif
82006 + memset(&info, 0, sizeof(info));
82007 info.valid_hooks = t->valid_hooks;
82008 memcpy(info.hook_entry, private->hook_entry,
82009 sizeof(info.hook_entry));
82010 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
82011 index d9521f6..3c3eb25 100644
82012 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
82013 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
82014 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
82015
82016 *len = 0;
82017
82018 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
82019 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
82020 if (*octets == NULL) {
82021 if (net_ratelimit())
82022 printk("OOM in bsalg (%d)\n", __LINE__);
82023 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
82024 index ab996f9..3da5f96 100644
82025 --- a/net/ipv4/raw.c
82026 +++ b/net/ipv4/raw.c
82027 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
82028 /* Charge it to the socket. */
82029
82030 if (sock_queue_rcv_skb(sk, skb) < 0) {
82031 - atomic_inc(&sk->sk_drops);
82032 + atomic_inc_unchecked(&sk->sk_drops);
82033 kfree_skb(skb);
82034 return NET_RX_DROP;
82035 }
82036 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
82037 int raw_rcv(struct sock *sk, struct sk_buff *skb)
82038 {
82039 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
82040 - atomic_inc(&sk->sk_drops);
82041 + atomic_inc_unchecked(&sk->sk_drops);
82042 kfree_skb(skb);
82043 return NET_RX_DROP;
82044 }
82045 @@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
82046
82047 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
82048 {
82049 + struct icmp_filter filter;
82050 +
82051 + if (optlen < 0)
82052 + return -EINVAL;
82053 if (optlen > sizeof(struct icmp_filter))
82054 optlen = sizeof(struct icmp_filter);
82055 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
82056 + if (copy_from_user(&filter, optval, optlen))
82057 return -EFAULT;
82058 + raw_sk(sk)->filter = filter;
82059 +
82060 return 0;
82061 }
82062
82063 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
82064 {
82065 int len, ret = -EFAULT;
82066 + struct icmp_filter filter;
82067
82068 if (get_user(len, optlen))
82069 goto out;
82070 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
82071 if (len > sizeof(struct icmp_filter))
82072 len = sizeof(struct icmp_filter);
82073 ret = -EFAULT;
82074 - if (put_user(len, optlen) ||
82075 - copy_to_user(optval, &raw_sk(sk)->filter, len))
82076 + filter = raw_sk(sk)->filter;
82077 + if (put_user(len, optlen) || len > sizeof filter ||
82078 + copy_to_user(optval, &filter, len))
82079 goto out;
82080 ret = 0;
82081 out: return ret;
82082 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
82083 sk_wmem_alloc_get(sp),
82084 sk_rmem_alloc_get(sp),
82085 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
82086 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
82087 + atomic_read(&sp->sk_refcnt),
82088 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82089 + NULL,
82090 +#else
82091 + sp,
82092 +#endif
82093 + atomic_read_unchecked(&sp->sk_drops));
82094 }
82095
82096 static int raw_seq_show(struct seq_file *seq, void *v)
82097 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
82098 index 58f141b..b759702 100644
82099 --- a/net/ipv4/route.c
82100 +++ b/net/ipv4/route.c
82101 @@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
82102
82103 static inline int rt_genid(struct net *net)
82104 {
82105 - return atomic_read(&net->ipv4.rt_genid);
82106 + return atomic_read_unchecked(&net->ipv4.rt_genid);
82107 }
82108
82109 #ifdef CONFIG_PROC_FS
82110 @@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
82111 unsigned char shuffle;
82112
82113 get_random_bytes(&shuffle, sizeof(shuffle));
82114 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
82115 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
82116 }
82117
82118 /*
82119 @@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
82120
82121 static __net_init int rt_secret_timer_init(struct net *net)
82122 {
82123 - atomic_set(&net->ipv4.rt_genid,
82124 + atomic_set_unchecked(&net->ipv4.rt_genid,
82125 (int) ((num_physpages ^ (num_physpages>>8)) ^
82126 (jiffies ^ (jiffies >> 7))));
82127
82128 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
82129 index f095659..adc892a 100644
82130 --- a/net/ipv4/tcp.c
82131 +++ b/net/ipv4/tcp.c
82132 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
82133 int val;
82134 int err = 0;
82135
82136 + pax_track_stack();
82137 +
82138 /* This is a string value all the others are int's */
82139 if (optname == TCP_CONGESTION) {
82140 char name[TCP_CA_NAME_MAX];
82141 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
82142 struct tcp_sock *tp = tcp_sk(sk);
82143 int val, len;
82144
82145 + pax_track_stack();
82146 +
82147 if (get_user(len, optlen))
82148 return -EFAULT;
82149
82150 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
82151 index 6fc7961..33bad4a 100644
82152 --- a/net/ipv4/tcp_ipv4.c
82153 +++ b/net/ipv4/tcp_ipv4.c
82154 @@ -85,6 +85,9 @@
82155 int sysctl_tcp_tw_reuse __read_mostly;
82156 int sysctl_tcp_low_latency __read_mostly;
82157
82158 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82159 +extern int grsec_enable_blackhole;
82160 +#endif
82161
82162 #ifdef CONFIG_TCP_MD5SIG
82163 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
82164 @@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
82165 return 0;
82166
82167 reset:
82168 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82169 + if (!grsec_enable_blackhole)
82170 +#endif
82171 tcp_v4_send_reset(rsk, skb);
82172 discard:
82173 kfree_skb(skb);
82174 @@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
82175 TCP_SKB_CB(skb)->sacked = 0;
82176
82177 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
82178 - if (!sk)
82179 + if (!sk) {
82180 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82181 + ret = 1;
82182 +#endif
82183 goto no_tcp_socket;
82184 + }
82185
82186 process:
82187 - if (sk->sk_state == TCP_TIME_WAIT)
82188 + if (sk->sk_state == TCP_TIME_WAIT) {
82189 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82190 + ret = 2;
82191 +#endif
82192 goto do_time_wait;
82193 + }
82194
82195 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
82196 goto discard_and_relse;
82197 @@ -1651,6 +1665,10 @@ no_tcp_socket:
82198 bad_packet:
82199 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
82200 } else {
82201 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82202 + if (!grsec_enable_blackhole || (ret == 1 &&
82203 + (skb->dev->flags & IFF_LOOPBACK)))
82204 +#endif
82205 tcp_v4_send_reset(NULL, skb);
82206 }
82207
82208 @@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
82209 0, /* non standard timer */
82210 0, /* open_requests have no inode */
82211 atomic_read(&sk->sk_refcnt),
82212 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82213 + NULL,
82214 +#else
82215 req,
82216 +#endif
82217 len);
82218 }
82219
82220 @@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
82221 sock_i_uid(sk),
82222 icsk->icsk_probes_out,
82223 sock_i_ino(sk),
82224 - atomic_read(&sk->sk_refcnt), sk,
82225 + atomic_read(&sk->sk_refcnt),
82226 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82227 + NULL,
82228 +#else
82229 + sk,
82230 +#endif
82231 jiffies_to_clock_t(icsk->icsk_rto),
82232 jiffies_to_clock_t(icsk->icsk_ack.ato),
82233 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
82234 @@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
82235 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
82236 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
82237 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
82238 - atomic_read(&tw->tw_refcnt), tw, len);
82239 + atomic_read(&tw->tw_refcnt),
82240 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82241 + NULL,
82242 +#else
82243 + tw,
82244 +#endif
82245 + len);
82246 }
82247
82248 #define TMPSZ 150
82249 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
82250 index 4c03598..e09a8e8 100644
82251 --- a/net/ipv4/tcp_minisocks.c
82252 +++ b/net/ipv4/tcp_minisocks.c
82253 @@ -26,6 +26,10 @@
82254 #include <net/inet_common.h>
82255 #include <net/xfrm.h>
82256
82257 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82258 +extern int grsec_enable_blackhole;
82259 +#endif
82260 +
82261 #ifdef CONFIG_SYSCTL
82262 #define SYNC_INIT 0 /* let the user enable it */
82263 #else
82264 @@ -672,6 +676,10 @@ listen_overflow:
82265
82266 embryonic_reset:
82267 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
82268 +
82269 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82270 + if (!grsec_enable_blackhole)
82271 +#endif
82272 if (!(flg & TCP_FLAG_RST))
82273 req->rsk_ops->send_reset(sk, skb);
82274
82275 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
82276 index af83bdf..ec91cb2 100644
82277 --- a/net/ipv4/tcp_output.c
82278 +++ b/net/ipv4/tcp_output.c
82279 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
82280 __u8 *md5_hash_location;
82281 int mss;
82282
82283 + pax_track_stack();
82284 +
82285 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
82286 if (skb == NULL)
82287 return NULL;
82288 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
82289 index 59f5b5e..193860f 100644
82290 --- a/net/ipv4/tcp_probe.c
82291 +++ b/net/ipv4/tcp_probe.c
82292 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
82293 if (cnt + width >= len)
82294 break;
82295
82296 - if (copy_to_user(buf + cnt, tbuf, width))
82297 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
82298 return -EFAULT;
82299 cnt += width;
82300 }
82301 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
82302 index 57d5501..a9ed13a 100644
82303 --- a/net/ipv4/tcp_timer.c
82304 +++ b/net/ipv4/tcp_timer.c
82305 @@ -21,6 +21,10 @@
82306 #include <linux/module.h>
82307 #include <net/tcp.h>
82308
82309 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82310 +extern int grsec_lastack_retries;
82311 +#endif
82312 +
82313 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
82314 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
82315 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
82316 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
82317 }
82318 }
82319
82320 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82321 + if ((sk->sk_state == TCP_LAST_ACK) &&
82322 + (grsec_lastack_retries > 0) &&
82323 + (grsec_lastack_retries < retry_until))
82324 + retry_until = grsec_lastack_retries;
82325 +#endif
82326 +
82327 if (retransmits_timed_out(sk, retry_until)) {
82328 /* Has it gone just too far? */
82329 tcp_write_err(sk);
82330 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
82331 index 8e28770..72105c8 100644
82332 --- a/net/ipv4/udp.c
82333 +++ b/net/ipv4/udp.c
82334 @@ -86,6 +86,7 @@
82335 #include <linux/types.h>
82336 #include <linux/fcntl.h>
82337 #include <linux/module.h>
82338 +#include <linux/security.h>
82339 #include <linux/socket.h>
82340 #include <linux/sockios.h>
82341 #include <linux/igmp.h>
82342 @@ -106,6 +107,10 @@
82343 #include <net/xfrm.h>
82344 #include "udp_impl.h"
82345
82346 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82347 +extern int grsec_enable_blackhole;
82348 +#endif
82349 +
82350 struct udp_table udp_table;
82351 EXPORT_SYMBOL(udp_table);
82352
82353 @@ -371,6 +376,9 @@ found:
82354 return s;
82355 }
82356
82357 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
82358 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
82359 +
82360 /*
82361 * This routine is called by the ICMP module when it gets some
82362 * sort of error condition. If err < 0 then the socket should
82363 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
82364 dport = usin->sin_port;
82365 if (dport == 0)
82366 return -EINVAL;
82367 +
82368 + err = gr_search_udp_sendmsg(sk, usin);
82369 + if (err)
82370 + return err;
82371 } else {
82372 if (sk->sk_state != TCP_ESTABLISHED)
82373 return -EDESTADDRREQ;
82374 +
82375 + err = gr_search_udp_sendmsg(sk, NULL);
82376 + if (err)
82377 + return err;
82378 +
82379 daddr = inet->daddr;
82380 dport = inet->dport;
82381 /* Open fast path for connected socket.
82382 @@ -945,6 +962,10 @@ try_again:
82383 if (!skb)
82384 goto out;
82385
82386 + err = gr_search_udp_recvmsg(sk, skb);
82387 + if (err)
82388 + goto out_free;
82389 +
82390 ulen = skb->len - sizeof(struct udphdr);
82391 copied = len;
82392 if (copied > ulen)
82393 @@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
82394 if (rc == -ENOMEM) {
82395 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
82396 is_udplite);
82397 - atomic_inc(&sk->sk_drops);
82398 + atomic_inc_unchecked(&sk->sk_drops);
82399 }
82400 goto drop;
82401 }
82402 @@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
82403 goto csum_error;
82404
82405 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
82406 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82407 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
82408 +#endif
82409 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
82410
82411 /*
82412 @@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
82413 sk_wmem_alloc_get(sp),
82414 sk_rmem_alloc_get(sp),
82415 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
82416 - atomic_read(&sp->sk_refcnt), sp,
82417 - atomic_read(&sp->sk_drops), len);
82418 + atomic_read(&sp->sk_refcnt),
82419 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82420 + NULL,
82421 +#else
82422 + sp,
82423 +#endif
82424 + atomic_read_unchecked(&sp->sk_drops), len);
82425 }
82426
82427 int udp4_seq_show(struct seq_file *seq, void *v)
82428 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
82429 index 8ac3d09..fc58c5f 100644
82430 --- a/net/ipv6/addrconf.c
82431 +++ b/net/ipv6/addrconf.c
82432 @@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
82433 p.iph.ihl = 5;
82434 p.iph.protocol = IPPROTO_IPV6;
82435 p.iph.ttl = 64;
82436 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
82437 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
82438
82439 if (ops->ndo_do_ioctl) {
82440 mm_segment_t oldfs = get_fs();
82441 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
82442 index cc4797d..7cfdfcc 100644
82443 --- a/net/ipv6/inet6_connection_sock.c
82444 +++ b/net/ipv6/inet6_connection_sock.c
82445 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
82446 #ifdef CONFIG_XFRM
82447 {
82448 struct rt6_info *rt = (struct rt6_info *)dst;
82449 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
82450 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
82451 }
82452 #endif
82453 }
82454 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
82455 #ifdef CONFIG_XFRM
82456 if (dst) {
82457 struct rt6_info *rt = (struct rt6_info *)dst;
82458 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
82459 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
82460 sk->sk_dst_cache = NULL;
82461 dst_release(dst);
82462 dst = NULL;
82463 diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
82464 index 093e9b2..f72cddb 100644
82465 --- a/net/ipv6/inet6_hashtables.c
82466 +++ b/net/ipv6/inet6_hashtables.c
82467 @@ -119,7 +119,7 @@ out:
82468 }
82469 EXPORT_SYMBOL(__inet6_lookup_established);
82470
82471 -static int inline compute_score(struct sock *sk, struct net *net,
82472 +static inline int compute_score(struct sock *sk, struct net *net,
82473 const unsigned short hnum,
82474 const struct in6_addr *daddr,
82475 const int dif)
82476 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
82477 index 4f7aaf6..f7acf45 100644
82478 --- a/net/ipv6/ipv6_sockglue.c
82479 +++ b/net/ipv6/ipv6_sockglue.c
82480 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
82481 int val, valbool;
82482 int retv = -ENOPROTOOPT;
82483
82484 + pax_track_stack();
82485 +
82486 if (optval == NULL)
82487 val=0;
82488 else {
82489 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
82490 int len;
82491 int val;
82492
82493 + pax_track_stack();
82494 +
82495 if (ip6_mroute_opt(optname))
82496 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
82497
82498 @@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
82499 if (sk->sk_type != SOCK_STREAM)
82500 return -ENOPROTOOPT;
82501
82502 - msg.msg_control = optval;
82503 + msg.msg_control = (void __force_kernel *)optval;
82504 msg.msg_controllen = len;
82505 msg.msg_flags = 0;
82506
82507 diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
82508 index 1cf3f0c..1d4376f 100644
82509 --- a/net/ipv6/netfilter/ip6_queue.c
82510 +++ b/net/ipv6/netfilter/ip6_queue.c
82511 @@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
82512
82513 if (v->data_len < sizeof(*user_iph))
82514 return 0;
82515 + if (v->data_len > 65535)
82516 + return -EMSGSIZE;
82517 +
82518 diff = v->data_len - e->skb->len;
82519 if (diff < 0) {
82520 if (pskb_trim(e->skb, v->data_len))
82521 @@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
82522 static inline void
82523 __ipq_rcv_skb(struct sk_buff *skb)
82524 {
82525 - int status, type, pid, flags, nlmsglen, skblen;
82526 + int status, type, pid, flags;
82527 + unsigned int nlmsglen, skblen;
82528 struct nlmsghdr *nlh;
82529
82530 skblen = skb->len;
82531 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
82532 index 78b5a36..7f37433 100644
82533 --- a/net/ipv6/netfilter/ip6_tables.c
82534 +++ b/net/ipv6/netfilter/ip6_tables.c
82535 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
82536 private = &tmp;
82537 }
82538 #endif
82539 + memset(&info, 0, sizeof(info));
82540 info.valid_hooks = t->valid_hooks;
82541 memcpy(info.hook_entry, private->hook_entry,
82542 sizeof(info.hook_entry));
82543 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
82544 index 4f24570..b813b34 100644
82545 --- a/net/ipv6/raw.c
82546 +++ b/net/ipv6/raw.c
82547 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
82548 {
82549 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
82550 skb_checksum_complete(skb)) {
82551 - atomic_inc(&sk->sk_drops);
82552 + atomic_inc_unchecked(&sk->sk_drops);
82553 kfree_skb(skb);
82554 return NET_RX_DROP;
82555 }
82556
82557 /* Charge it to the socket. */
82558 if (sock_queue_rcv_skb(sk,skb)<0) {
82559 - atomic_inc(&sk->sk_drops);
82560 + atomic_inc_unchecked(&sk->sk_drops);
82561 kfree_skb(skb);
82562 return NET_RX_DROP;
82563 }
82564 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
82565 struct raw6_sock *rp = raw6_sk(sk);
82566
82567 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
82568 - atomic_inc(&sk->sk_drops);
82569 + atomic_inc_unchecked(&sk->sk_drops);
82570 kfree_skb(skb);
82571 return NET_RX_DROP;
82572 }
82573 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
82574
82575 if (inet->hdrincl) {
82576 if (skb_checksum_complete(skb)) {
82577 - atomic_inc(&sk->sk_drops);
82578 + atomic_inc_unchecked(&sk->sk_drops);
82579 kfree_skb(skb);
82580 return NET_RX_DROP;
82581 }
82582 @@ -518,7 +518,7 @@ csum_copy_err:
82583 as some normal condition.
82584 */
82585 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
82586 - atomic_inc(&sk->sk_drops);
82587 + atomic_inc_unchecked(&sk->sk_drops);
82588 goto out;
82589 }
82590
82591 @@ -600,7 +600,7 @@ out:
82592 return err;
82593 }
82594
82595 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
82596 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
82597 struct flowi *fl, struct rt6_info *rt,
82598 unsigned int flags)
82599 {
82600 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
82601 u16 proto;
82602 int err;
82603
82604 + pax_track_stack();
82605 +
82606 /* Rough check on arithmetic overflow,
82607 better check is made in ip6_append_data().
82608 */
82609 @@ -916,12 +918,17 @@ do_confirm:
82610 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
82611 char __user *optval, int optlen)
82612 {
82613 + struct icmp6_filter filter;
82614 +
82615 switch (optname) {
82616 case ICMPV6_FILTER:
82617 + if (optlen < 0)
82618 + return -EINVAL;
82619 if (optlen > sizeof(struct icmp6_filter))
82620 optlen = sizeof(struct icmp6_filter);
82621 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
82622 + if (copy_from_user(&filter, optval, optlen))
82623 return -EFAULT;
82624 + raw6_sk(sk)->filter = filter;
82625 return 0;
82626 default:
82627 return -ENOPROTOOPT;
82628 @@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
82629 char __user *optval, int __user *optlen)
82630 {
82631 int len;
82632 + struct icmp6_filter filter;
82633
82634 switch (optname) {
82635 case ICMPV6_FILTER:
82636 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
82637 len = sizeof(struct icmp6_filter);
82638 if (put_user(len, optlen))
82639 return -EFAULT;
82640 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
82641 + filter = raw6_sk(sk)->filter;
82642 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
82643 return -EFAULT;
82644 return 0;
82645 default:
82646 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
82647 0, 0L, 0,
82648 sock_i_uid(sp), 0,
82649 sock_i_ino(sp),
82650 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
82651 + atomic_read(&sp->sk_refcnt),
82652 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82653 + NULL,
82654 +#else
82655 + sp,
82656 +#endif
82657 + atomic_read_unchecked(&sp->sk_drops));
82658 }
82659
82660 static int raw6_seq_show(struct seq_file *seq, void *v)
82661 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
82662 index faae6df..d4430c1 100644
82663 --- a/net/ipv6/tcp_ipv6.c
82664 +++ b/net/ipv6/tcp_ipv6.c
82665 @@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
82666 }
82667 #endif
82668
82669 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82670 +extern int grsec_enable_blackhole;
82671 +#endif
82672 +
82673 static void tcp_v6_hash(struct sock *sk)
82674 {
82675 if (sk->sk_state != TCP_CLOSE) {
82676 @@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
82677 return 0;
82678
82679 reset:
82680 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82681 + if (!grsec_enable_blackhole)
82682 +#endif
82683 tcp_v6_send_reset(sk, skb);
82684 discard:
82685 if (opt_skb)
82686 @@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
82687 TCP_SKB_CB(skb)->sacked = 0;
82688
82689 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
82690 - if (!sk)
82691 + if (!sk) {
82692 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82693 + ret = 1;
82694 +#endif
82695 goto no_tcp_socket;
82696 + }
82697
82698 process:
82699 - if (sk->sk_state == TCP_TIME_WAIT)
82700 + if (sk->sk_state == TCP_TIME_WAIT) {
82701 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82702 + ret = 2;
82703 +#endif
82704 goto do_time_wait;
82705 + }
82706
82707 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
82708 goto discard_and_relse;
82709 @@ -1701,6 +1716,10 @@ no_tcp_socket:
82710 bad_packet:
82711 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
82712 } else {
82713 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82714 + if (!grsec_enable_blackhole || (ret == 1 &&
82715 + (skb->dev->flags & IFF_LOOPBACK)))
82716 +#endif
82717 tcp_v6_send_reset(NULL, skb);
82718 }
82719
82720 @@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
82721 uid,
82722 0, /* non standard timer */
82723 0, /* open_requests have no inode */
82724 - 0, req);
82725 + 0,
82726 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82727 + NULL
82728 +#else
82729 + req
82730 +#endif
82731 + );
82732 }
82733
82734 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
82735 @@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
82736 sock_i_uid(sp),
82737 icsk->icsk_probes_out,
82738 sock_i_ino(sp),
82739 - atomic_read(&sp->sk_refcnt), sp,
82740 + atomic_read(&sp->sk_refcnt),
82741 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82742 + NULL,
82743 +#else
82744 + sp,
82745 +#endif
82746 jiffies_to_clock_t(icsk->icsk_rto),
82747 jiffies_to_clock_t(icsk->icsk_ack.ato),
82748 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
82749 @@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
82750 dest->s6_addr32[2], dest->s6_addr32[3], destp,
82751 tw->tw_substate, 0, 0,
82752 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
82753 - atomic_read(&tw->tw_refcnt), tw);
82754 + atomic_read(&tw->tw_refcnt),
82755 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82756 + NULL
82757 +#else
82758 + tw
82759 +#endif
82760 + );
82761 }
82762
82763 static int tcp6_seq_show(struct seq_file *seq, void *v)
82764 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
82765 index 9cc6289..052c521 100644
82766 --- a/net/ipv6/udp.c
82767 +++ b/net/ipv6/udp.c
82768 @@ -49,6 +49,10 @@
82769 #include <linux/seq_file.h>
82770 #include "udp_impl.h"
82771
82772 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82773 +extern int grsec_enable_blackhole;
82774 +#endif
82775 +
82776 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
82777 {
82778 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
82779 @@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
82780 if (rc == -ENOMEM) {
82781 UDP6_INC_STATS_BH(sock_net(sk),
82782 UDP_MIB_RCVBUFERRORS, is_udplite);
82783 - atomic_inc(&sk->sk_drops);
82784 + atomic_inc_unchecked(&sk->sk_drops);
82785 }
82786 goto drop;
82787 }
82788 @@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
82789 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
82790 proto == IPPROTO_UDPLITE);
82791
82792 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82793 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
82794 +#endif
82795 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
82796
82797 kfree_skb(skb);
82798 @@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
82799 0, 0L, 0,
82800 sock_i_uid(sp), 0,
82801 sock_i_ino(sp),
82802 - atomic_read(&sp->sk_refcnt), sp,
82803 - atomic_read(&sp->sk_drops));
82804 + atomic_read(&sp->sk_refcnt),
82805 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82806 + NULL,
82807 +#else
82808 + sp,
82809 +#endif
82810 + atomic_read_unchecked(&sp->sk_drops));
82811 }
82812
82813 int udp6_seq_show(struct seq_file *seq, void *v)
82814 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
82815 index 811984d..11f59b7 100644
82816 --- a/net/irda/ircomm/ircomm_tty.c
82817 +++ b/net/irda/ircomm/ircomm_tty.c
82818 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82819 add_wait_queue(&self->open_wait, &wait);
82820
82821 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
82822 - __FILE__,__LINE__, tty->driver->name, self->open_count );
82823 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82824
82825 /* As far as I can see, we protect open_count - Jean II */
82826 spin_lock_irqsave(&self->spinlock, flags);
82827 if (!tty_hung_up_p(filp)) {
82828 extra_count = 1;
82829 - self->open_count--;
82830 + local_dec(&self->open_count);
82831 }
82832 spin_unlock_irqrestore(&self->spinlock, flags);
82833 - self->blocked_open++;
82834 + local_inc(&self->blocked_open);
82835
82836 while (1) {
82837 if (tty->termios->c_cflag & CBAUD) {
82838 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82839 }
82840
82841 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
82842 - __FILE__,__LINE__, tty->driver->name, self->open_count );
82843 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82844
82845 schedule();
82846 }
82847 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82848 if (extra_count) {
82849 /* ++ is not atomic, so this should be protected - Jean II */
82850 spin_lock_irqsave(&self->spinlock, flags);
82851 - self->open_count++;
82852 + local_inc(&self->open_count);
82853 spin_unlock_irqrestore(&self->spinlock, flags);
82854 }
82855 - self->blocked_open--;
82856 + local_dec(&self->blocked_open);
82857
82858 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
82859 - __FILE__,__LINE__, tty->driver->name, self->open_count);
82860 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
82861
82862 if (!retval)
82863 self->flags |= ASYNC_NORMAL_ACTIVE;
82864 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
82865 }
82866 /* ++ is not atomic, so this should be protected - Jean II */
82867 spin_lock_irqsave(&self->spinlock, flags);
82868 - self->open_count++;
82869 + local_inc(&self->open_count);
82870
82871 tty->driver_data = self;
82872 self->tty = tty;
82873 spin_unlock_irqrestore(&self->spinlock, flags);
82874
82875 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
82876 - self->line, self->open_count);
82877 + self->line, local_read(&self->open_count));
82878
82879 /* Not really used by us, but lets do it anyway */
82880 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
82881 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82882 return;
82883 }
82884
82885 - if ((tty->count == 1) && (self->open_count != 1)) {
82886 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
82887 /*
82888 * Uh, oh. tty->count is 1, which means that the tty
82889 * structure will be freed. state->count should always
82890 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82891 */
82892 IRDA_DEBUG(0, "%s(), bad serial port count; "
82893 "tty->count is 1, state->count is %d\n", __func__ ,
82894 - self->open_count);
82895 - self->open_count = 1;
82896 + local_read(&self->open_count));
82897 + local_set(&self->open_count, 1);
82898 }
82899
82900 - if (--self->open_count < 0) {
82901 + if (local_dec_return(&self->open_count) < 0) {
82902 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
82903 - __func__, self->line, self->open_count);
82904 - self->open_count = 0;
82905 + __func__, self->line, local_read(&self->open_count));
82906 + local_set(&self->open_count, 0);
82907 }
82908 - if (self->open_count) {
82909 + if (local_read(&self->open_count)) {
82910 spin_unlock_irqrestore(&self->spinlock, flags);
82911
82912 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
82913 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82914 tty->closing = 0;
82915 self->tty = NULL;
82916
82917 - if (self->blocked_open) {
82918 + if (local_read(&self->blocked_open)) {
82919 if (self->close_delay)
82920 schedule_timeout_interruptible(self->close_delay);
82921 wake_up_interruptible(&self->open_wait);
82922 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
82923 spin_lock_irqsave(&self->spinlock, flags);
82924 self->flags &= ~ASYNC_NORMAL_ACTIVE;
82925 self->tty = NULL;
82926 - self->open_count = 0;
82927 + local_set(&self->open_count, 0);
82928 spin_unlock_irqrestore(&self->spinlock, flags);
82929
82930 wake_up_interruptible(&self->open_wait);
82931 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
82932 seq_putc(m, '\n');
82933
82934 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
82935 - seq_printf(m, "Open count: %d\n", self->open_count);
82936 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
82937 seq_printf(m, "Max data size: %d\n", self->max_data_size);
82938 seq_printf(m, "Max header size: %d\n", self->max_header_size);
82939
82940 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
82941 index bada1b9..f325943 100644
82942 --- a/net/iucv/af_iucv.c
82943 +++ b/net/iucv/af_iucv.c
82944 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
82945
82946 write_lock_bh(&iucv_sk_list.lock);
82947
82948 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
82949 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
82950 while (__iucv_get_sock_by_name(name)) {
82951 sprintf(name, "%08x",
82952 - atomic_inc_return(&iucv_sk_list.autobind_name));
82953 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
82954 }
82955
82956 write_unlock_bh(&iucv_sk_list.lock);
82957 diff --git a/net/key/af_key.c b/net/key/af_key.c
82958 index 4e98193..439b449 100644
82959 --- a/net/key/af_key.c
82960 +++ b/net/key/af_key.c
82961 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
82962 struct xfrm_migrate m[XFRM_MAX_DEPTH];
82963 struct xfrm_kmaddress k;
82964
82965 + pax_track_stack();
82966 +
82967 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
82968 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
82969 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
82970 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
82971 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
82972 else
82973 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
82974 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82975 + NULL,
82976 +#else
82977 s,
82978 +#endif
82979 atomic_read(&s->sk_refcnt),
82980 sk_rmem_alloc_get(s),
82981 sk_wmem_alloc_get(s),
82982 diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
82983 index bda96d1..c038b72 100644
82984 --- a/net/lapb/lapb_iface.c
82985 +++ b/net/lapb/lapb_iface.c
82986 @@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
82987 goto out;
82988
82989 lapb->dev = dev;
82990 - lapb->callbacks = *callbacks;
82991 + lapb->callbacks = callbacks;
82992
82993 __lapb_insert_cb(lapb);
82994
82995 @@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
82996
82997 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
82998 {
82999 - if (lapb->callbacks.connect_confirmation)
83000 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
83001 + if (lapb->callbacks->connect_confirmation)
83002 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
83003 }
83004
83005 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
83006 {
83007 - if (lapb->callbacks.connect_indication)
83008 - lapb->callbacks.connect_indication(lapb->dev, reason);
83009 + if (lapb->callbacks->connect_indication)
83010 + lapb->callbacks->connect_indication(lapb->dev, reason);
83011 }
83012
83013 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
83014 {
83015 - if (lapb->callbacks.disconnect_confirmation)
83016 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
83017 + if (lapb->callbacks->disconnect_confirmation)
83018 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
83019 }
83020
83021 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
83022 {
83023 - if (lapb->callbacks.disconnect_indication)
83024 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
83025 + if (lapb->callbacks->disconnect_indication)
83026 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
83027 }
83028
83029 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
83030 {
83031 - if (lapb->callbacks.data_indication)
83032 - return lapb->callbacks.data_indication(lapb->dev, skb);
83033 + if (lapb->callbacks->data_indication)
83034 + return lapb->callbacks->data_indication(lapb->dev, skb);
83035
83036 kfree_skb(skb);
83037 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
83038 @@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
83039 {
83040 int used = 0;
83041
83042 - if (lapb->callbacks.data_transmit) {
83043 - lapb->callbacks.data_transmit(lapb->dev, skb);
83044 + if (lapb->callbacks->data_transmit) {
83045 + lapb->callbacks->data_transmit(lapb->dev, skb);
83046 used = 1;
83047 }
83048
83049 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
83050 index fe2d3f8..e57f683 100644
83051 --- a/net/mac80211/cfg.c
83052 +++ b/net/mac80211/cfg.c
83053 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
83054 return err;
83055 }
83056
83057 -struct cfg80211_ops mac80211_config_ops = {
83058 +const struct cfg80211_ops mac80211_config_ops = {
83059 .add_virtual_intf = ieee80211_add_iface,
83060 .del_virtual_intf = ieee80211_del_iface,
83061 .change_virtual_intf = ieee80211_change_iface,
83062 diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
83063 index 7d7879f..2d51f62 100644
83064 --- a/net/mac80211/cfg.h
83065 +++ b/net/mac80211/cfg.h
83066 @@ -4,6 +4,6 @@
83067 #ifndef __CFG_H
83068 #define __CFG_H
83069
83070 -extern struct cfg80211_ops mac80211_config_ops;
83071 +extern const struct cfg80211_ops mac80211_config_ops;
83072
83073 #endif /* __CFG_H */
83074 diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
83075 index 99c7525..9cb4937 100644
83076 --- a/net/mac80211/debugfs_key.c
83077 +++ b/net/mac80211/debugfs_key.c
83078 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
83079 size_t count, loff_t *ppos)
83080 {
83081 struct ieee80211_key *key = file->private_data;
83082 - int i, res, bufsize = 2 * key->conf.keylen + 2;
83083 + int i, bufsize = 2 * key->conf.keylen + 2;
83084 char *buf = kmalloc(bufsize, GFP_KERNEL);
83085 char *p = buf;
83086 + ssize_t res;
83087 +
83088 + if (buf == NULL)
83089 + return -ENOMEM;
83090
83091 for (i = 0; i < key->conf.keylen; i++)
83092 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
83093 diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
83094 index 33a2e89..08650c8 100644
83095 --- a/net/mac80211/debugfs_sta.c
83096 +++ b/net/mac80211/debugfs_sta.c
83097 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
83098 int i;
83099 struct sta_info *sta = file->private_data;
83100
83101 + pax_track_stack();
83102 +
83103 spin_lock_bh(&sta->lock);
83104 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
83105 sta->ampdu_mlme.dialog_token_allocator + 1);
83106 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
83107 index ca62bfe..6657a03 100644
83108 --- a/net/mac80211/ieee80211_i.h
83109 +++ b/net/mac80211/ieee80211_i.h
83110 @@ -25,6 +25,7 @@
83111 #include <linux/etherdevice.h>
83112 #include <net/cfg80211.h>
83113 #include <net/mac80211.h>
83114 +#include <asm/local.h>
83115 #include "key.h"
83116 #include "sta_info.h"
83117
83118 @@ -635,7 +636,7 @@ struct ieee80211_local {
83119 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
83120 spinlock_t queue_stop_reason_lock;
83121
83122 - int open_count;
83123 + local_t open_count;
83124 int monitors, cooked_mntrs;
83125 /* number of interfaces with corresponding FIF_ flags */
83126 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
83127 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
83128 index 079c500..eb3c6d4 100644
83129 --- a/net/mac80211/iface.c
83130 +++ b/net/mac80211/iface.c
83131 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
83132 break;
83133 }
83134
83135 - if (local->open_count == 0) {
83136 + if (local_read(&local->open_count) == 0) {
83137 res = drv_start(local);
83138 if (res)
83139 goto err_del_bss;
83140 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
83141 * Validate the MAC address for this device.
83142 */
83143 if (!is_valid_ether_addr(dev->dev_addr)) {
83144 - if (!local->open_count)
83145 + if (!local_read(&local->open_count))
83146 drv_stop(local);
83147 return -EADDRNOTAVAIL;
83148 }
83149 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
83150
83151 hw_reconf_flags |= __ieee80211_recalc_idle(local);
83152
83153 - local->open_count++;
83154 + local_inc(&local->open_count);
83155 if (hw_reconf_flags) {
83156 ieee80211_hw_config(local, hw_reconf_flags);
83157 /*
83158 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
83159 err_del_interface:
83160 drv_remove_interface(local, &conf);
83161 err_stop:
83162 - if (!local->open_count)
83163 + if (!local_read(&local->open_count))
83164 drv_stop(local);
83165 err_del_bss:
83166 sdata->bss = NULL;
83167 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
83168 WARN_ON(!list_empty(&sdata->u.ap.vlans));
83169 }
83170
83171 - local->open_count--;
83172 + local_dec(&local->open_count);
83173
83174 switch (sdata->vif.type) {
83175 case NL80211_IFTYPE_AP_VLAN:
83176 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
83177
83178 ieee80211_recalc_ps(local, -1);
83179
83180 - if (local->open_count == 0) {
83181 + if (local_read(&local->open_count) == 0) {
83182 ieee80211_clear_tx_pending(local);
83183 ieee80211_stop_device(local);
83184
83185 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
83186 index 2dfe176..74e4388 100644
83187 --- a/net/mac80211/main.c
83188 +++ b/net/mac80211/main.c
83189 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
83190 local->hw.conf.power_level = power;
83191 }
83192
83193 - if (changed && local->open_count) {
83194 + if (changed && local_read(&local->open_count)) {
83195 ret = drv_config(local, changed);
83196 /*
83197 * Goal:
83198 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
83199 index e67eea7..fcc227e 100644
83200 --- a/net/mac80211/mlme.c
83201 +++ b/net/mac80211/mlme.c
83202 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
83203 bool have_higher_than_11mbit = false, newsta = false;
83204 u16 ap_ht_cap_flags;
83205
83206 + pax_track_stack();
83207 +
83208 /*
83209 * AssocResp and ReassocResp have identical structure, so process both
83210 * of them in this function.
83211 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
83212 index e535f1c..4d733d1 100644
83213 --- a/net/mac80211/pm.c
83214 +++ b/net/mac80211/pm.c
83215 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
83216 }
83217
83218 /* stop hardware - this must stop RX */
83219 - if (local->open_count)
83220 + if (local_read(&local->open_count))
83221 ieee80211_stop_device(local);
83222
83223 local->suspended = true;
83224 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
83225 index b33efc4..0a2efb6 100644
83226 --- a/net/mac80211/rate.c
83227 +++ b/net/mac80211/rate.c
83228 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
83229 struct rate_control_ref *ref, *old;
83230
83231 ASSERT_RTNL();
83232 - if (local->open_count)
83233 + if (local_read(&local->open_count))
83234 return -EBUSY;
83235
83236 ref = rate_control_alloc(name, local);
83237 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
83238 index b1d7904..57e4da7 100644
83239 --- a/net/mac80211/tx.c
83240 +++ b/net/mac80211/tx.c
83241 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
83242 return cpu_to_le16(dur);
83243 }
83244
83245 -static int inline is_ieee80211_device(struct ieee80211_local *local,
83246 +static inline int is_ieee80211_device(struct ieee80211_local *local,
83247 struct net_device *dev)
83248 {
83249 return local == wdev_priv(dev->ieee80211_ptr);
83250 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
83251 index 31b1085..48fb26d 100644
83252 --- a/net/mac80211/util.c
83253 +++ b/net/mac80211/util.c
83254 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
83255 local->resuming = true;
83256
83257 /* restart hardware */
83258 - if (local->open_count) {
83259 + if (local_read(&local->open_count)) {
83260 /*
83261 * Upon resume hardware can sometimes be goofy due to
83262 * various platform / driver / bus issues, so restarting
83263 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
83264 index 634d14a..b35a608 100644
83265 --- a/net/netfilter/Kconfig
83266 +++ b/net/netfilter/Kconfig
83267 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
83268
83269 To compile it as a module, choose M here. If unsure, say N.
83270
83271 +config NETFILTER_XT_MATCH_GRADM
83272 + tristate '"gradm" match support'
83273 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
83274 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
83275 + ---help---
83276 + The gradm match allows to match on grsecurity RBAC being enabled.
83277 + It is useful when iptables rules are applied early on bootup to
83278 + prevent connections to the machine (except from a trusted host)
83279 + while the RBAC system is disabled.
83280 +
83281 config NETFILTER_XT_MATCH_HASHLIMIT
83282 tristate '"hashlimit" match support'
83283 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
83284 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
83285 index 49f62ee..a17b2c6 100644
83286 --- a/net/netfilter/Makefile
83287 +++ b/net/netfilter/Makefile
83288 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
83289 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
83290 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
83291 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
83292 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
83293 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
83294 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
83295 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
83296 diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
83297 index 3c7e427..724043c 100644
83298 --- a/net/netfilter/ipvs/ip_vs_app.c
83299 +++ b/net/netfilter/ipvs/ip_vs_app.c
83300 @@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
83301 .open = ip_vs_app_open,
83302 .read = seq_read,
83303 .llseek = seq_lseek,
83304 - .release = seq_release,
83305 + .release = seq_release_net,
83306 };
83307 #endif
83308
83309 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
83310 index 95682e5..457dbac 100644
83311 --- a/net/netfilter/ipvs/ip_vs_conn.c
83312 +++ b/net/netfilter/ipvs/ip_vs_conn.c
83313 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
83314 /* if the connection is not template and is created
83315 * by sync, preserve the activity flag.
83316 */
83317 - cp->flags |= atomic_read(&dest->conn_flags) &
83318 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
83319 (~IP_VS_CONN_F_INACTIVE);
83320 else
83321 - cp->flags |= atomic_read(&dest->conn_flags);
83322 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
83323 cp->dest = dest;
83324
83325 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
83326 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
83327 atomic_set(&cp->refcnt, 1);
83328
83329 atomic_set(&cp->n_control, 0);
83330 - atomic_set(&cp->in_pkts, 0);
83331 + atomic_set_unchecked(&cp->in_pkts, 0);
83332
83333 atomic_inc(&ip_vs_conn_count);
83334 if (flags & IP_VS_CONN_F_NO_CPORT)
83335 @@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
83336 .open = ip_vs_conn_open,
83337 .read = seq_read,
83338 .llseek = seq_lseek,
83339 - .release = seq_release,
83340 + .release = seq_release_net,
83341 };
83342
83343 static const char *ip_vs_origin_name(unsigned flags)
83344 @@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
83345 .open = ip_vs_conn_sync_open,
83346 .read = seq_read,
83347 .llseek = seq_lseek,
83348 - .release = seq_release,
83349 + .release = seq_release_net,
83350 };
83351
83352 #endif
83353 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
83354
83355 /* Don't drop the entry if its number of incoming packets is not
83356 located in [0, 8] */
83357 - i = atomic_read(&cp->in_pkts);
83358 + i = atomic_read_unchecked(&cp->in_pkts);
83359 if (i > 8 || i < 0) return 0;
83360
83361 if (!todrop_rate[i]) return 0;
83362 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
83363 index b95699f..5fee919 100644
83364 --- a/net/netfilter/ipvs/ip_vs_core.c
83365 +++ b/net/netfilter/ipvs/ip_vs_core.c
83366 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
83367 ret = cp->packet_xmit(skb, cp, pp);
83368 /* do not touch skb anymore */
83369
83370 - atomic_inc(&cp->in_pkts);
83371 + atomic_inc_unchecked(&cp->in_pkts);
83372 ip_vs_conn_put(cp);
83373 return ret;
83374 }
83375 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
83376 * Sync connection if it is about to close to
83377 * encorage the standby servers to update the connections timeout
83378 */
83379 - pkts = atomic_add_return(1, &cp->in_pkts);
83380 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
83381 if (af == AF_INET &&
83382 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
83383 (((cp->protocol != IPPROTO_TCP ||
83384 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
83385 index 02b2610..2d89424 100644
83386 --- a/net/netfilter/ipvs/ip_vs_ctl.c
83387 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
83388 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
83389 ip_vs_rs_hash(dest);
83390 write_unlock_bh(&__ip_vs_rs_lock);
83391 }
83392 - atomic_set(&dest->conn_flags, conn_flags);
83393 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
83394
83395 /* bind the service */
83396 if (!dest->svc) {
83397 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
83398 " %-7s %-6d %-10d %-10d\n",
83399 &dest->addr.in6,
83400 ntohs(dest->port),
83401 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
83402 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
83403 atomic_read(&dest->weight),
83404 atomic_read(&dest->activeconns),
83405 atomic_read(&dest->inactconns));
83406 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
83407 "%-7s %-6d %-10d %-10d\n",
83408 ntohl(dest->addr.ip),
83409 ntohs(dest->port),
83410 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
83411 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
83412 atomic_read(&dest->weight),
83413 atomic_read(&dest->activeconns),
83414 atomic_read(&dest->inactconns));
83415 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
83416 .open = ip_vs_info_open,
83417 .read = seq_read,
83418 .llseek = seq_lseek,
83419 - .release = seq_release_private,
83420 + .release = seq_release_net,
83421 };
83422
83423 #endif
83424 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
83425 .open = ip_vs_stats_seq_open,
83426 .read = seq_read,
83427 .llseek = seq_lseek,
83428 - .release = single_release,
83429 + .release = single_release_net,
83430 };
83431
83432 #endif
83433 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
83434
83435 entry.addr = dest->addr.ip;
83436 entry.port = dest->port;
83437 - entry.conn_flags = atomic_read(&dest->conn_flags);
83438 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
83439 entry.weight = atomic_read(&dest->weight);
83440 entry.u_threshold = dest->u_threshold;
83441 entry.l_threshold = dest->l_threshold;
83442 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
83443 unsigned char arg[128];
83444 int ret = 0;
83445
83446 + pax_track_stack();
83447 +
83448 if (!capable(CAP_NET_ADMIN))
83449 return -EPERM;
83450
83451 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
83452 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
83453
83454 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
83455 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
83456 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
83457 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
83458 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
83459 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
83460 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
83461 index e177f0d..55e8581 100644
83462 --- a/net/netfilter/ipvs/ip_vs_sync.c
83463 +++ b/net/netfilter/ipvs/ip_vs_sync.c
83464 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
83465
83466 if (opt)
83467 memcpy(&cp->in_seq, opt, sizeof(*opt));
83468 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
83469 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
83470 cp->state = state;
83471 cp->old_state = cp->state;
83472 /*
83473 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
83474 index 30b3189..e2e4b55 100644
83475 --- a/net/netfilter/ipvs/ip_vs_xmit.c
83476 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
83477 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
83478 else
83479 rc = NF_ACCEPT;
83480 /* do not touch skb anymore */
83481 - atomic_inc(&cp->in_pkts);
83482 + atomic_inc_unchecked(&cp->in_pkts);
83483 goto out;
83484 }
83485
83486 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
83487 else
83488 rc = NF_ACCEPT;
83489 /* do not touch skb anymore */
83490 - atomic_inc(&cp->in_pkts);
83491 + atomic_inc_unchecked(&cp->in_pkts);
83492 goto out;
83493 }
83494
83495 diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
83496 index d521718..d0fd7a1 100644
83497 --- a/net/netfilter/nf_conntrack_netlink.c
83498 +++ b/net/netfilter/nf_conntrack_netlink.c
83499 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
83500 static int
83501 ctnetlink_parse_tuple(const struct nlattr * const cda[],
83502 struct nf_conntrack_tuple *tuple,
83503 - enum ctattr_tuple type, u_int8_t l3num)
83504 + enum ctattr_type type, u_int8_t l3num)
83505 {
83506 struct nlattr *tb[CTA_TUPLE_MAX+1];
83507 int err;
83508 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
83509 index f900dc3..5e45346 100644
83510 --- a/net/netfilter/nfnetlink_log.c
83511 +++ b/net/netfilter/nfnetlink_log.c
83512 @@ -68,7 +68,7 @@ struct nfulnl_instance {
83513 };
83514
83515 static DEFINE_RWLOCK(instances_lock);
83516 -static atomic_t global_seq;
83517 +static atomic_unchecked_t global_seq;
83518
83519 #define INSTANCE_BUCKETS 16
83520 static struct hlist_head instance_table[INSTANCE_BUCKETS];
83521 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
83522 /* global sequence number */
83523 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
83524 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
83525 - htonl(atomic_inc_return(&global_seq)));
83526 + htonl(atomic_inc_return_unchecked(&global_seq)));
83527
83528 if (data_len) {
83529 struct nlattr *nla;
83530 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
83531 new file mode 100644
83532 index 0000000..b1bac76
83533 --- /dev/null
83534 +++ b/net/netfilter/xt_gradm.c
83535 @@ -0,0 +1,51 @@
83536 +/*
83537 + * gradm match for netfilter
83538 + * Copyright © Zbigniew Krzystolik, 2010
83539 + *
83540 + * This program is free software; you can redistribute it and/or modify
83541 + * it under the terms of the GNU General Public License; either version
83542 + * 2 or 3 as published by the Free Software Foundation.
83543 + */
83544 +#include <linux/module.h>
83545 +#include <linux/moduleparam.h>
83546 +#include <linux/skbuff.h>
83547 +#include <linux/netfilter/x_tables.h>
83548 +#include <linux/grsecurity.h>
83549 +#include <linux/netfilter/xt_gradm.h>
83550 +
83551 +static bool
83552 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
83553 +{
83554 + const struct xt_gradm_mtinfo *info = par->matchinfo;
83555 + bool retval = false;
83556 + if (gr_acl_is_enabled())
83557 + retval = true;
83558 + return retval ^ info->invflags;
83559 +}
83560 +
83561 +static struct xt_match gradm_mt_reg __read_mostly = {
83562 + .name = "gradm",
83563 + .revision = 0,
83564 + .family = NFPROTO_UNSPEC,
83565 + .match = gradm_mt,
83566 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
83567 + .me = THIS_MODULE,
83568 +};
83569 +
83570 +static int __init gradm_mt_init(void)
83571 +{
83572 + return xt_register_match(&gradm_mt_reg);
83573 +}
83574 +
83575 +static void __exit gradm_mt_exit(void)
83576 +{
83577 + xt_unregister_match(&gradm_mt_reg);
83578 +}
83579 +
83580 +module_init(gradm_mt_init);
83581 +module_exit(gradm_mt_exit);
83582 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
83583 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
83584 +MODULE_LICENSE("GPL");
83585 +MODULE_ALIAS("ipt_gradm");
83586 +MODULE_ALIAS("ip6t_gradm");
83587 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
83588 index 5a7dcdf..24a3578 100644
83589 --- a/net/netlink/af_netlink.c
83590 +++ b/net/netlink/af_netlink.c
83591 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock *sk)
83592 sk->sk_error_report(sk);
83593 }
83594 }
83595 - atomic_inc(&sk->sk_drops);
83596 + atomic_inc_unchecked(&sk->sk_drops);
83597 }
83598
83599 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
83600 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
83601 struct netlink_sock *nlk = nlk_sk(s);
83602
83603 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
83604 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83605 + NULL,
83606 +#else
83607 s,
83608 +#endif
83609 s->sk_protocol,
83610 nlk->pid,
83611 nlk->groups ? (u32)nlk->groups[0] : 0,
83612 sk_rmem_alloc_get(s),
83613 sk_wmem_alloc_get(s),
83614 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83615 + NULL,
83616 +#else
83617 nlk->cb,
83618 +#endif
83619 atomic_read(&s->sk_refcnt),
83620 - atomic_read(&s->sk_drops)
83621 + atomic_read_unchecked(&s->sk_drops)
83622 );
83623
83624 }
83625 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
83626 index 7a83495..ab0062f 100644
83627 --- a/net/netrom/af_netrom.c
83628 +++ b/net/netrom/af_netrom.c
83629 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
83630 struct sock *sk = sock->sk;
83631 struct nr_sock *nr = nr_sk(sk);
83632
83633 + memset(sax, 0, sizeof(*sax));
83634 lock_sock(sk);
83635 if (peer != 0) {
83636 if (sk->sk_state != TCP_ESTABLISHED) {
83637 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
83638 *uaddr_len = sizeof(struct full_sockaddr_ax25);
83639 } else {
83640 sax->fsa_ax25.sax25_family = AF_NETROM;
83641 - sax->fsa_ax25.sax25_ndigis = 0;
83642 sax->fsa_ax25.sax25_call = nr->source_addr;
83643 *uaddr_len = sizeof(struct sockaddr_ax25);
83644 }
83645 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
83646 index 35cfa79..4e78ff7 100644
83647 --- a/net/packet/af_packet.c
83648 +++ b/net/packet/af_packet.c
83649 @@ -2429,7 +2429,11 @@ static int packet_seq_show(struct seq_file *seq, void *v)
83650
83651 seq_printf(seq,
83652 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
83653 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83654 + NULL,
83655 +#else
83656 s,
83657 +#endif
83658 atomic_read(&s->sk_refcnt),
83659 s->sk_type,
83660 ntohs(po->num),
83661 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
83662 index 519ff9d..a422a90 100644
83663 --- a/net/phonet/af_phonet.c
83664 +++ b/net/phonet/af_phonet.c
83665 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(int protocol)
83666 {
83667 struct phonet_protocol *pp;
83668
83669 - if (protocol >= PHONET_NPROTO)
83670 + if (protocol < 0 || protocol >= PHONET_NPROTO)
83671 return NULL;
83672
83673 spin_lock(&proto_tab_lock);
83674 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_register(int protocol,
83675 {
83676 int err = 0;
83677
83678 - if (protocol >= PHONET_NPROTO)
83679 + if (protocol < 0 || protocol >= PHONET_NPROTO)
83680 return -EINVAL;
83681
83682 err = proto_register(pp->prot, 1);
83683 diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
83684 index ef5c75c..2b6c2fa 100644
83685 --- a/net/phonet/datagram.c
83686 +++ b/net/phonet/datagram.c
83687 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
83688 if (err < 0) {
83689 kfree_skb(skb);
83690 if (err == -ENOMEM)
83691 - atomic_inc(&sk->sk_drops);
83692 + atomic_inc_unchecked(&sk->sk_drops);
83693 }
83694 return err ? NET_RX_DROP : NET_RX_SUCCESS;
83695 }
83696 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
83697 index 9cdd35e..16cd850 100644
83698 --- a/net/phonet/pep.c
83699 +++ b/net/phonet/pep.c
83700 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
83701
83702 case PNS_PEP_CTRL_REQ:
83703 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
83704 - atomic_inc(&sk->sk_drops);
83705 + atomic_inc_unchecked(&sk->sk_drops);
83706 break;
83707 }
83708 __skb_pull(skb, 4);
83709 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
83710 if (!err)
83711 return 0;
83712 if (err == -ENOMEM)
83713 - atomic_inc(&sk->sk_drops);
83714 + atomic_inc_unchecked(&sk->sk_drops);
83715 break;
83716 }
83717
83718 if (pn->rx_credits == 0) {
83719 - atomic_inc(&sk->sk_drops);
83720 + atomic_inc_unchecked(&sk->sk_drops);
83721 err = -ENOBUFS;
83722 break;
83723 }
83724 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
83725 index aa5b5a9..c09b4f8 100644
83726 --- a/net/phonet/socket.c
83727 +++ b/net/phonet/socket.c
83728 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
83729 sk->sk_state,
83730 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
83731 sock_i_uid(sk), sock_i_ino(sk),
83732 - atomic_read(&sk->sk_refcnt), sk,
83733 - atomic_read(&sk->sk_drops), &len);
83734 + atomic_read(&sk->sk_refcnt),
83735 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83736 + NULL,
83737 +#else
83738 + sk,
83739 +#endif
83740 + atomic_read_unchecked(&sk->sk_drops), &len);
83741 }
83742 seq_printf(seq, "%*s\n", 127 - len, "");
83743 return 0;
83744 diff --git a/net/rds/Kconfig b/net/rds/Kconfig
83745 index ec753b3..821187c 100644
83746 --- a/net/rds/Kconfig
83747 +++ b/net/rds/Kconfig
83748 @@ -1,7 +1,7 @@
83749
83750 config RDS
83751 tristate "The RDS Protocol (EXPERIMENTAL)"
83752 - depends on INET && EXPERIMENTAL
83753 + depends on INET && EXPERIMENTAL && BROKEN
83754 ---help---
83755 The RDS (Reliable Datagram Sockets) protocol provides reliable,
83756 sequenced delivery of datagrams over Infiniband, iWARP,
83757 diff --git a/net/rds/cong.c b/net/rds/cong.c
83758 index dd2711d..1c7ed12 100644
83759 --- a/net/rds/cong.c
83760 +++ b/net/rds/cong.c
83761 @@ -77,7 +77,7 @@
83762 * finds that the saved generation number is smaller than the global generation
83763 * number, it wakes up the process.
83764 */
83765 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
83766 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
83767
83768 /*
83769 * Congestion monitoring
83770 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
83771 rdsdebug("waking map %p for %pI4\n",
83772 map, &map->m_addr);
83773 rds_stats_inc(s_cong_update_received);
83774 - atomic_inc(&rds_cong_generation);
83775 + atomic_inc_unchecked(&rds_cong_generation);
83776 if (waitqueue_active(&map->m_waitq))
83777 wake_up(&map->m_waitq);
83778 if (waitqueue_active(&rds_poll_waitq))
83779 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
83780
83781 int rds_cong_updated_since(unsigned long *recent)
83782 {
83783 - unsigned long gen = atomic_read(&rds_cong_generation);
83784 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
83785
83786 if (likely(*recent == gen))
83787 return 0;
83788 diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
83789 index de4a1b1..94ec861 100644
83790 --- a/net/rds/iw_rdma.c
83791 +++ b/net/rds/iw_rdma.c
83792 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
83793 struct rdma_cm_id *pcm_id;
83794 int rc;
83795
83796 + pax_track_stack();
83797 +
83798 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
83799 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
83800
83801 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
83802 index b5198ae..8b9fb90 100644
83803 --- a/net/rds/tcp.c
83804 +++ b/net/rds/tcp.c
83805 @@ -57,7 +57,7 @@ void rds_tcp_nonagle(struct socket *sock)
83806 int val = 1;
83807
83808 set_fs(KERNEL_DS);
83809 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
83810 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
83811 sizeof(val));
83812 set_fs(oldfs);
83813 }
83814 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
83815 index ab545e0..4079b3b 100644
83816 --- a/net/rds/tcp_send.c
83817 +++ b/net/rds/tcp_send.c
83818 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
83819
83820 oldfs = get_fs();
83821 set_fs(KERNEL_DS);
83822 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
83823 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
83824 sizeof(val));
83825 set_fs(oldfs);
83826 }
83827 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
83828 index a86afce..8657bce 100644
83829 --- a/net/rxrpc/af_rxrpc.c
83830 +++ b/net/rxrpc/af_rxrpc.c
83831 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_ops;
83832 __be32 rxrpc_epoch;
83833
83834 /* current debugging ID */
83835 -atomic_t rxrpc_debug_id;
83836 +atomic_unchecked_t rxrpc_debug_id;
83837
83838 /* count of skbs currently in use */
83839 atomic_t rxrpc_n_skbs;
83840 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
83841 index b4a2209..539106c 100644
83842 --- a/net/rxrpc/ar-ack.c
83843 +++ b/net/rxrpc/ar-ack.c
83844 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
83845
83846 _enter("{%d,%d,%d,%d},",
83847 call->acks_hard, call->acks_unacked,
83848 - atomic_read(&call->sequence),
83849 + atomic_read_unchecked(&call->sequence),
83850 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
83851
83852 stop = 0;
83853 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
83854
83855 /* each Tx packet has a new serial number */
83856 sp->hdr.serial =
83857 - htonl(atomic_inc_return(&call->conn->serial));
83858 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
83859
83860 hdr = (struct rxrpc_header *) txb->head;
83861 hdr->serial = sp->hdr.serial;
83862 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
83863 */
83864 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
83865 {
83866 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
83867 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
83868 }
83869
83870 /*
83871 @@ -627,7 +627,7 @@ process_further:
83872
83873 latest = ntohl(sp->hdr.serial);
83874 hard = ntohl(ack.firstPacket);
83875 - tx = atomic_read(&call->sequence);
83876 + tx = atomic_read_unchecked(&call->sequence);
83877
83878 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
83879 latest,
83880 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_struct *work)
83881 u32 abort_code = RX_PROTOCOL_ERROR;
83882 u8 *acks = NULL;
83883
83884 + pax_track_stack();
83885 +
83886 //printk("\n--------------------\n");
83887 _enter("{%d,%s,%lx} [%lu]",
83888 call->debug_id, rxrpc_call_states[call->state], call->events,
83889 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
83890 goto maybe_reschedule;
83891
83892 send_ACK_with_skew:
83893 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
83894 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
83895 ntohl(ack.serial));
83896 send_ACK:
83897 mtu = call->conn->trans->peer->if_mtu;
83898 @@ -1171,7 +1173,7 @@ send_ACK:
83899 ackinfo.rxMTU = htonl(5692);
83900 ackinfo.jumbo_max = htonl(4);
83901
83902 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
83903 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
83904 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
83905 ntohl(hdr.serial),
83906 ntohs(ack.maxSkew),
83907 @@ -1189,7 +1191,7 @@ send_ACK:
83908 send_message:
83909 _debug("send message");
83910
83911 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
83912 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
83913 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
83914 send_message_2:
83915
83916 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
83917 index bc0019f..e1b4b24 100644
83918 --- a/net/rxrpc/ar-call.c
83919 +++ b/net/rxrpc/ar-call.c
83920 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
83921 spin_lock_init(&call->lock);
83922 rwlock_init(&call->state_lock);
83923 atomic_set(&call->usage, 1);
83924 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
83925 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
83926 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
83927
83928 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
83929 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
83930 index 9f1ce84..ff8d061 100644
83931 --- a/net/rxrpc/ar-connection.c
83932 +++ b/net/rxrpc/ar-connection.c
83933 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
83934 rwlock_init(&conn->lock);
83935 spin_lock_init(&conn->state_lock);
83936 atomic_set(&conn->usage, 1);
83937 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
83938 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
83939 conn->avail_calls = RXRPC_MAXCALLS;
83940 conn->size_align = 4;
83941 conn->header_size = sizeof(struct rxrpc_header);
83942 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
83943 index 0505cdc..f0748ce 100644
83944 --- a/net/rxrpc/ar-connevent.c
83945 +++ b/net/rxrpc/ar-connevent.c
83946 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
83947
83948 len = iov[0].iov_len + iov[1].iov_len;
83949
83950 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
83951 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
83952 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
83953
83954 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
83955 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
83956 index f98c802..9e8488e 100644
83957 --- a/net/rxrpc/ar-input.c
83958 +++ b/net/rxrpc/ar-input.c
83959 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
83960 /* track the latest serial number on this connection for ACK packet
83961 * information */
83962 serial = ntohl(sp->hdr.serial);
83963 - hi_serial = atomic_read(&call->conn->hi_serial);
83964 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
83965 while (serial > hi_serial)
83966 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
83967 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
83968 serial);
83969
83970 /* request ACK generation for any ACK or DATA packet that requests
83971 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
83972 index 7043b29..06edcdf 100644
83973 --- a/net/rxrpc/ar-internal.h
83974 +++ b/net/rxrpc/ar-internal.h
83975 @@ -272,8 +272,8 @@ struct rxrpc_connection {
83976 int error; /* error code for local abort */
83977 int debug_id; /* debug ID for printks */
83978 unsigned call_counter; /* call ID counter */
83979 - atomic_t serial; /* packet serial number counter */
83980 - atomic_t hi_serial; /* highest serial number received */
83981 + atomic_unchecked_t serial; /* packet serial number counter */
83982 + atomic_unchecked_t hi_serial; /* highest serial number received */
83983 u8 avail_calls; /* number of calls available */
83984 u8 size_align; /* data size alignment (for security) */
83985 u8 header_size; /* rxrpc + security header size */
83986 @@ -346,7 +346,7 @@ struct rxrpc_call {
83987 spinlock_t lock;
83988 rwlock_t state_lock; /* lock for state transition */
83989 atomic_t usage;
83990 - atomic_t sequence; /* Tx data packet sequence counter */
83991 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
83992 u32 abort_code; /* local/remote abort code */
83993 enum { /* current state of call */
83994 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
83995 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
83996 */
83997 extern atomic_t rxrpc_n_skbs;
83998 extern __be32 rxrpc_epoch;
83999 -extern atomic_t rxrpc_debug_id;
84000 +extern atomic_unchecked_t rxrpc_debug_id;
84001 extern struct workqueue_struct *rxrpc_workqueue;
84002
84003 /*
84004 diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
84005 index 74697b2..10f9b77 100644
84006 --- a/net/rxrpc/ar-key.c
84007 +++ b/net/rxrpc/ar-key.c
84008 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
84009 return ret;
84010
84011 plen -= sizeof(*token);
84012 - token = kmalloc(sizeof(*token), GFP_KERNEL);
84013 + token = kzalloc(sizeof(*token), GFP_KERNEL);
84014 if (!token)
84015 return -ENOMEM;
84016
84017 - token->kad = kmalloc(plen, GFP_KERNEL);
84018 + token->kad = kzalloc(plen, GFP_KERNEL);
84019 if (!token->kad) {
84020 kfree(token);
84021 return -ENOMEM;
84022 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
84023 goto error;
84024
84025 ret = -ENOMEM;
84026 - token = kmalloc(sizeof(*token), GFP_KERNEL);
84027 + token = kzalloc(sizeof(*token), GFP_KERNEL);
84028 if (!token)
84029 goto error;
84030 - token->kad = kmalloc(plen, GFP_KERNEL);
84031 + token->kad = kzalloc(plen, GFP_KERNEL);
84032 if (!token->kad)
84033 goto error_free;
84034
84035 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
84036 index 807535f..5b7f19e 100644
84037 --- a/net/rxrpc/ar-local.c
84038 +++ b/net/rxrpc/ar-local.c
84039 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
84040 spin_lock_init(&local->lock);
84041 rwlock_init(&local->services_lock);
84042 atomic_set(&local->usage, 1);
84043 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
84044 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
84045 memcpy(&local->srx, srx, sizeof(*srx));
84046 }
84047
84048 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
84049 index cc9102c..7d3888e 100644
84050 --- a/net/rxrpc/ar-output.c
84051 +++ b/net/rxrpc/ar-output.c
84052 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
84053 sp->hdr.cid = call->cid;
84054 sp->hdr.callNumber = call->call_id;
84055 sp->hdr.seq =
84056 - htonl(atomic_inc_return(&call->sequence));
84057 + htonl(atomic_inc_return_unchecked(&call->sequence));
84058 sp->hdr.serial =
84059 - htonl(atomic_inc_return(&conn->serial));
84060 + htonl(atomic_inc_return_unchecked(&conn->serial));
84061 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
84062 sp->hdr.userStatus = 0;
84063 sp->hdr.securityIndex = conn->security_ix;
84064 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
84065 index edc026c..4bd4e2d 100644
84066 --- a/net/rxrpc/ar-peer.c
84067 +++ b/net/rxrpc/ar-peer.c
84068 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
84069 INIT_LIST_HEAD(&peer->error_targets);
84070 spin_lock_init(&peer->lock);
84071 atomic_set(&peer->usage, 1);
84072 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
84073 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
84074 memcpy(&peer->srx, srx, sizeof(*srx));
84075
84076 rxrpc_assess_MTU_size(peer);
84077 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
84078 index 38047f7..9f48511 100644
84079 --- a/net/rxrpc/ar-proc.c
84080 +++ b/net/rxrpc/ar-proc.c
84081 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
84082 atomic_read(&conn->usage),
84083 rxrpc_conn_states[conn->state],
84084 key_serial(conn->key),
84085 - atomic_read(&conn->serial),
84086 - atomic_read(&conn->hi_serial));
84087 + atomic_read_unchecked(&conn->serial),
84088 + atomic_read_unchecked(&conn->hi_serial));
84089
84090 return 0;
84091 }
84092 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
84093 index 0936e1a..437c640 100644
84094 --- a/net/rxrpc/ar-transport.c
84095 +++ b/net/rxrpc/ar-transport.c
84096 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
84097 spin_lock_init(&trans->client_lock);
84098 rwlock_init(&trans->conn_lock);
84099 atomic_set(&trans->usage, 1);
84100 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
84101 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
84102
84103 if (peer->srx.transport.family == AF_INET) {
84104 switch (peer->srx.transport_type) {
84105 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
84106 index 713ac59..306f6ae 100644
84107 --- a/net/rxrpc/rxkad.c
84108 +++ b/net/rxrpc/rxkad.c
84109 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
84110 u16 check;
84111 int nsg;
84112
84113 + pax_track_stack();
84114 +
84115 sp = rxrpc_skb(skb);
84116
84117 _enter("");
84118 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
84119 u16 check;
84120 int nsg;
84121
84122 + pax_track_stack();
84123 +
84124 _enter("");
84125
84126 sp = rxrpc_skb(skb);
84127 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
84128
84129 len = iov[0].iov_len + iov[1].iov_len;
84130
84131 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
84132 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
84133 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
84134
84135 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
84136 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
84137
84138 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
84139
84140 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
84141 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
84142 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
84143
84144 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
84145 diff --git a/net/sctp/auth.c b/net/sctp/auth.c
84146 index 914c419..7a16d2c 100644
84147 --- a/net/sctp/auth.c
84148 +++ b/net/sctp/auth.c
84149 @@ -81,7 +81,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
84150 struct sctp_auth_bytes *key;
84151
84152 /* Verify that we are not going to overflow INT_MAX */
84153 - if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
84154 + if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
84155 return NULL;
84156
84157 /* Allocate the shared key */
84158 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
84159 index d093cbf..9fc36fc 100644
84160 --- a/net/sctp/proc.c
84161 +++ b/net/sctp/proc.c
84162 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
84163 sctp_for_each_hentry(epb, node, &head->chain) {
84164 ep = sctp_ep(epb);
84165 sk = epb->sk;
84166 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
84167 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
84168 +#ifdef CONFIG_GRKERNSEC_HIDESYM
84169 + NULL, NULL,
84170 +#else
84171 + ep, sk,
84172 +#endif
84173 sctp_sk(sk)->type, sk->sk_state, hash,
84174 epb->bind_addr.port,
84175 sock_i_uid(sk), sock_i_ino(sk));
84176 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
84177 seq_printf(seq,
84178 "%8p %8p %-3d %-3d %-2d %-4d "
84179 "%4d %8d %8d %7d %5lu %-5d %5d ",
84180 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
84181 +#ifdef CONFIG_GRKERNSEC_HIDESYM
84182 + NULL, NULL,
84183 +#else
84184 + assoc, sk,
84185 +#endif
84186 + sctp_sk(sk)->type, sk->sk_state,
84187 assoc->state, hash,
84188 assoc->assoc_id,
84189 assoc->sndbuf_used,
84190 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
84191 index 3a95fcb..c40fc1d 100644
84192 --- a/net/sctp/socket.c
84193 +++ b/net/sctp/socket.c
84194 @@ -5802,7 +5802,6 @@ pp_found:
84195 */
84196 int reuse = sk->sk_reuse;
84197 struct sock *sk2;
84198 - struct hlist_node *node;
84199
84200 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
84201 if (pp->fastreuse && sk->sk_reuse &&
84202 diff --git a/net/socket.c b/net/socket.c
84203 index d449812..4ac08d3c 100644
84204 --- a/net/socket.c
84205 +++ b/net/socket.c
84206 @@ -87,6 +87,7 @@
84207 #include <linux/wireless.h>
84208 #include <linux/nsproxy.h>
84209 #include <linux/magic.h>
84210 +#include <linux/in.h>
84211
84212 #include <asm/uaccess.h>
84213 #include <asm/unistd.h>
84214 @@ -97,6 +98,21 @@
84215 #include <net/sock.h>
84216 #include <linux/netfilter.h>
84217
84218 +extern void gr_attach_curr_ip(const struct sock *sk);
84219 +extern int gr_handle_sock_all(const int family, const int type,
84220 + const int protocol);
84221 +extern int gr_handle_sock_server(const struct sockaddr *sck);
84222 +extern int gr_handle_sock_server_other(const struct sock *sck);
84223 +extern int gr_handle_sock_client(const struct sockaddr *sck);
84224 +extern int gr_search_connect(struct socket * sock,
84225 + struct sockaddr_in * addr);
84226 +extern int gr_search_bind(struct socket * sock,
84227 + struct sockaddr_in * addr);
84228 +extern int gr_search_listen(struct socket * sock);
84229 +extern int gr_search_accept(struct socket * sock);
84230 +extern int gr_search_socket(const int domain, const int type,
84231 + const int protocol);
84232 +
84233 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
84234 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
84235 unsigned long nr_segs, loff_t pos);
84236 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_system_type *fs_type,
84237 mnt);
84238 }
84239
84240 -static struct vfsmount *sock_mnt __read_mostly;
84241 +struct vfsmount *sock_mnt __read_mostly;
84242
84243 static struct file_system_type sock_fs_type = {
84244 .name = "sockfs",
84245 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net, int family, int type, int protocol,
84246 return -EAFNOSUPPORT;
84247 if (type < 0 || type >= SOCK_MAX)
84248 return -EINVAL;
84249 + if (protocol < 0)
84250 + return -EINVAL;
84251
84252 /* Compatibility.
84253
84254 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
84255 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
84256 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
84257
84258 + if(!gr_search_socket(family, type, protocol)) {
84259 + retval = -EACCES;
84260 + goto out;
84261 + }
84262 +
84263 + if (gr_handle_sock_all(family, type, protocol)) {
84264 + retval = -EACCES;
84265 + goto out;
84266 + }
84267 +
84268 retval = sock_create(family, type, protocol, &sock);
84269 if (retval < 0)
84270 goto out;
84271 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
84272 if (sock) {
84273 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
84274 if (err >= 0) {
84275 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
84276 + err = -EACCES;
84277 + goto error;
84278 + }
84279 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
84280 + if (err)
84281 + goto error;
84282 +
84283 err = security_socket_bind(sock,
84284 (struct sockaddr *)&address,
84285 addrlen);
84286 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
84287 (struct sockaddr *)
84288 &address, addrlen);
84289 }
84290 +error:
84291 fput_light(sock->file, fput_needed);
84292 }
84293 return err;
84294 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
84295 if ((unsigned)backlog > somaxconn)
84296 backlog = somaxconn;
84297
84298 + if (gr_handle_sock_server_other(sock->sk)) {
84299 + err = -EPERM;
84300 + goto error;
84301 + }
84302 +
84303 + err = gr_search_listen(sock);
84304 + if (err)
84305 + goto error;
84306 +
84307 err = security_socket_listen(sock, backlog);
84308 if (!err)
84309 err = sock->ops->listen(sock, backlog);
84310
84311 +error:
84312 fput_light(sock->file, fput_needed);
84313 }
84314 return err;
84315 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
84316 newsock->type = sock->type;
84317 newsock->ops = sock->ops;
84318
84319 + if (gr_handle_sock_server_other(sock->sk)) {
84320 + err = -EPERM;
84321 + sock_release(newsock);
84322 + goto out_put;
84323 + }
84324 +
84325 + err = gr_search_accept(sock);
84326 + if (err) {
84327 + sock_release(newsock);
84328 + goto out_put;
84329 + }
84330 +
84331 /*
84332 * We don't need try_module_get here, as the listening socket (sock)
84333 * has the protocol module (sock->ops->owner) held.
84334 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
84335 fd_install(newfd, newfile);
84336 err = newfd;
84337
84338 + gr_attach_curr_ip(newsock->sk);
84339 +
84340 out_put:
84341 fput_light(sock->file, fput_needed);
84342 out:
84343 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
84344 int, addrlen)
84345 {
84346 struct socket *sock;
84347 + struct sockaddr *sck;
84348 struct sockaddr_storage address;
84349 int err, fput_needed;
84350
84351 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
84352 if (err < 0)
84353 goto out_put;
84354
84355 + sck = (struct sockaddr *)&address;
84356 +
84357 + if (gr_handle_sock_client(sck)) {
84358 + err = -EACCES;
84359 + goto out_put;
84360 + }
84361 +
84362 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
84363 + if (err)
84364 + goto out_put;
84365 +
84366 err =
84367 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
84368 if (err)
84369 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
84370 int err, ctl_len, iov_size, total_len;
84371 int fput_needed;
84372
84373 + pax_track_stack();
84374 +
84375 err = -EFAULT;
84376 if (MSG_CMSG_COMPAT & flags) {
84377 if (get_compat_msghdr(&msg_sys, msg_compat))
84378 @@ -2022,7 +2097,7 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
84379 * kernel msghdr to use the kernel address space)
84380 */
84381
84382 - uaddr = (__force void __user *)msg_sys.msg_name;
84383 + uaddr = (void __force_user *)msg_sys.msg_name;
84384 uaddr_len = COMPAT_NAMELEN(msg);
84385 if (MSG_CMSG_COMPAT & flags) {
84386 err = verify_compat_iovec(&msg_sys, iov,
84387 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
84388 index ac94477..8afe5c3 100644
84389 --- a/net/sunrpc/sched.c
84390 +++ b/net/sunrpc/sched.c
84391 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *word)
84392 #ifdef RPC_DEBUG
84393 static void rpc_task_set_debuginfo(struct rpc_task *task)
84394 {
84395 - static atomic_t rpc_pid;
84396 + static atomic_unchecked_t rpc_pid;
84397
84398 task->tk_magic = RPC_TASK_MAGIC_ID;
84399 - task->tk_pid = atomic_inc_return(&rpc_pid);
84400 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
84401 }
84402 #else
84403 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
84404 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
84405 index 35fb68b..236a8bf 100644
84406 --- a/net/sunrpc/xprtrdma/svc_rdma.c
84407 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
84408 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
84409 static unsigned int min_max_inline = 4096;
84410 static unsigned int max_max_inline = 65536;
84411
84412 -atomic_t rdma_stat_recv;
84413 -atomic_t rdma_stat_read;
84414 -atomic_t rdma_stat_write;
84415 -atomic_t rdma_stat_sq_starve;
84416 -atomic_t rdma_stat_rq_starve;
84417 -atomic_t rdma_stat_rq_poll;
84418 -atomic_t rdma_stat_rq_prod;
84419 -atomic_t rdma_stat_sq_poll;
84420 -atomic_t rdma_stat_sq_prod;
84421 +atomic_unchecked_t rdma_stat_recv;
84422 +atomic_unchecked_t rdma_stat_read;
84423 +atomic_unchecked_t rdma_stat_write;
84424 +atomic_unchecked_t rdma_stat_sq_starve;
84425 +atomic_unchecked_t rdma_stat_rq_starve;
84426 +atomic_unchecked_t rdma_stat_rq_poll;
84427 +atomic_unchecked_t rdma_stat_rq_prod;
84428 +atomic_unchecked_t rdma_stat_sq_poll;
84429 +atomic_unchecked_t rdma_stat_sq_prod;
84430
84431 /* Temporary NFS request map and context caches */
84432 struct kmem_cache *svc_rdma_map_cachep;
84433 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *table, int write,
84434 len -= *ppos;
84435 if (len > *lenp)
84436 len = *lenp;
84437 - if (len && copy_to_user(buffer, str_buf, len))
84438 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
84439 return -EFAULT;
84440 *lenp = len;
84441 *ppos += len;
84442 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] = {
84443 {
84444 .procname = "rdma_stat_read",
84445 .data = &rdma_stat_read,
84446 - .maxlen = sizeof(atomic_t),
84447 + .maxlen = sizeof(atomic_unchecked_t),
84448 .mode = 0644,
84449 .proc_handler = &read_reset_stat,
84450 },
84451 {
84452 .procname = "rdma_stat_recv",
84453 .data = &rdma_stat_recv,
84454 - .maxlen = sizeof(atomic_t),
84455 + .maxlen = sizeof(atomic_unchecked_t),
84456 .mode = 0644,
84457 .proc_handler = &read_reset_stat,
84458 },
84459 {
84460 .procname = "rdma_stat_write",
84461 .data = &rdma_stat_write,
84462 - .maxlen = sizeof(atomic_t),
84463 + .maxlen = sizeof(atomic_unchecked_t),
84464 .mode = 0644,
84465 .proc_handler = &read_reset_stat,
84466 },
84467 {
84468 .procname = "rdma_stat_sq_starve",
84469 .data = &rdma_stat_sq_starve,
84470 - .maxlen = sizeof(atomic_t),
84471 + .maxlen = sizeof(atomic_unchecked_t),
84472 .mode = 0644,
84473 .proc_handler = &read_reset_stat,
84474 },
84475 {
84476 .procname = "rdma_stat_rq_starve",
84477 .data = &rdma_stat_rq_starve,
84478 - .maxlen = sizeof(atomic_t),
84479 + .maxlen = sizeof(atomic_unchecked_t),
84480 .mode = 0644,
84481 .proc_handler = &read_reset_stat,
84482 },
84483 {
84484 .procname = "rdma_stat_rq_poll",
84485 .data = &rdma_stat_rq_poll,
84486 - .maxlen = sizeof(atomic_t),
84487 + .maxlen = sizeof(atomic_unchecked_t),
84488 .mode = 0644,
84489 .proc_handler = &read_reset_stat,
84490 },
84491 {
84492 .procname = "rdma_stat_rq_prod",
84493 .data = &rdma_stat_rq_prod,
84494 - .maxlen = sizeof(atomic_t),
84495 + .maxlen = sizeof(atomic_unchecked_t),
84496 .mode = 0644,
84497 .proc_handler = &read_reset_stat,
84498 },
84499 {
84500 .procname = "rdma_stat_sq_poll",
84501 .data = &rdma_stat_sq_poll,
84502 - .maxlen = sizeof(atomic_t),
84503 + .maxlen = sizeof(atomic_unchecked_t),
84504 .mode = 0644,
84505 .proc_handler = &read_reset_stat,
84506 },
84507 {
84508 .procname = "rdma_stat_sq_prod",
84509 .data = &rdma_stat_sq_prod,
84510 - .maxlen = sizeof(atomic_t),
84511 + .maxlen = sizeof(atomic_unchecked_t),
84512 .mode = 0644,
84513 .proc_handler = &read_reset_stat,
84514 },
84515 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
84516 index 9e88438..8ed5cf0 100644
84517 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
84518 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
84519 @@ -495,7 +495,7 @@ next_sge:
84520 svc_rdma_put_context(ctxt, 0);
84521 goto out;
84522 }
84523 - atomic_inc(&rdma_stat_read);
84524 + atomic_inc_unchecked(&rdma_stat_read);
84525
84526 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
84527 chl_map->ch[ch_no].count -= read_wr.num_sge;
84528 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
84529 dto_q);
84530 list_del_init(&ctxt->dto_q);
84531 } else {
84532 - atomic_inc(&rdma_stat_rq_starve);
84533 + atomic_inc_unchecked(&rdma_stat_rq_starve);
84534 clear_bit(XPT_DATA, &xprt->xpt_flags);
84535 ctxt = NULL;
84536 }
84537 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
84538 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
84539 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
84540 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
84541 - atomic_inc(&rdma_stat_recv);
84542 + atomic_inc_unchecked(&rdma_stat_recv);
84543
84544 /* Build up the XDR from the receive buffers. */
84545 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
84546 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
84547 index f11be72..7aad4e8 100644
84548 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
84549 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
84550 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
84551 write_wr.wr.rdma.remote_addr = to;
84552
84553 /* Post It */
84554 - atomic_inc(&rdma_stat_write);
84555 + atomic_inc_unchecked(&rdma_stat_write);
84556 if (svc_rdma_send(xprt, &write_wr))
84557 goto err;
84558 return 0;
84559 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
84560 index 3fa5751..030ba89 100644
84561 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
84562 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
84563 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
84564 return;
84565
84566 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
84567 - atomic_inc(&rdma_stat_rq_poll);
84568 + atomic_inc_unchecked(&rdma_stat_rq_poll);
84569
84570 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
84571 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
84572 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
84573 }
84574
84575 if (ctxt)
84576 - atomic_inc(&rdma_stat_rq_prod);
84577 + atomic_inc_unchecked(&rdma_stat_rq_prod);
84578
84579 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
84580 /*
84581 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
84582 return;
84583
84584 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
84585 - atomic_inc(&rdma_stat_sq_poll);
84586 + atomic_inc_unchecked(&rdma_stat_sq_poll);
84587 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
84588 if (wc.status != IB_WC_SUCCESS)
84589 /* Close the transport */
84590 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
84591 }
84592
84593 if (ctxt)
84594 - atomic_inc(&rdma_stat_sq_prod);
84595 + atomic_inc_unchecked(&rdma_stat_sq_prod);
84596 }
84597
84598 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
84599 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
84600 spin_lock_bh(&xprt->sc_lock);
84601 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
84602 spin_unlock_bh(&xprt->sc_lock);
84603 - atomic_inc(&rdma_stat_sq_starve);
84604 + atomic_inc_unchecked(&rdma_stat_sq_starve);
84605
84606 /* See if we can opportunistically reap SQ WR to make room */
84607 sq_cq_reap(xprt);
84608 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
84609 index 0b15d72..7934fbb 100644
84610 --- a/net/sysctl_net.c
84611 +++ b/net/sysctl_net.c
84612 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
84613 struct ctl_table *table)
84614 {
84615 /* Allow network administrator to have same access as root. */
84616 - if (capable(CAP_NET_ADMIN)) {
84617 + if (capable_nolog(CAP_NET_ADMIN)) {
84618 int mode = (table->mode >> 6) & 7;
84619 return (mode << 6) | (mode << 3) | mode;
84620 }
84621 diff --git a/net/tipc/link.c b/net/tipc/link.c
84622 index dd4c18b..f40d38d 100644
84623 --- a/net/tipc/link.c
84624 +++ b/net/tipc/link.c
84625 @@ -1418,7 +1418,7 @@ again:
84626
84627 if (!sect_rest) {
84628 sect_rest = msg_sect[++curr_sect].iov_len;
84629 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
84630 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
84631 }
84632
84633 if (sect_rest < fragm_rest)
84634 @@ -1437,7 +1437,7 @@ error:
84635 }
84636 } else
84637 skb_copy_to_linear_data_offset(buf, fragm_crs,
84638 - sect_crs, sz);
84639 + (const void __force_kernel *)sect_crs, sz);
84640 sect_crs += sz;
84641 sect_rest -= sz;
84642 fragm_crs += sz;
84643 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
84644 index 0747d8a..e8bf3f3 100644
84645 --- a/net/tipc/subscr.c
84646 +++ b/net/tipc/subscr.c
84647 @@ -104,7 +104,7 @@ static void subscr_send_event(struct subscription *sub,
84648 {
84649 struct iovec msg_sect;
84650
84651 - msg_sect.iov_base = (void *)&sub->evt;
84652 + msg_sect.iov_base = (void __force_user *)&sub->evt;
84653 msg_sect.iov_len = sizeof(struct tipc_event);
84654
84655 sub->evt.event = htohl(event, sub->swap);
84656 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
84657 index db8d51a..608692d 100644
84658 --- a/net/unix/af_unix.c
84659 +++ b/net/unix/af_unix.c
84660 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(struct net *net,
84661 err = -ECONNREFUSED;
84662 if (!S_ISSOCK(inode->i_mode))
84663 goto put_fail;
84664 +
84665 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
84666 + err = -EACCES;
84667 + goto put_fail;
84668 + }
84669 +
84670 u = unix_find_socket_byinode(net, inode);
84671 if (!u)
84672 goto put_fail;
84673 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(struct net *net,
84674 if (u) {
84675 struct dentry *dentry;
84676 dentry = unix_sk(u)->dentry;
84677 +
84678 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
84679 + err = -EPERM;
84680 + sock_put(u);
84681 + goto fail;
84682 + }
84683 +
84684 if (dentry)
84685 touch_atime(unix_sk(u)->mnt, dentry);
84686 } else
84687 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
84688 err = security_path_mknod(&nd.path, dentry, mode, 0);
84689 if (err)
84690 goto out_mknod_drop_write;
84691 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
84692 + err = -EACCES;
84693 + goto out_mknod_drop_write;
84694 + }
84695 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
84696 out_mknod_drop_write:
84697 mnt_drop_write(nd.path.mnt);
84698 if (err)
84699 goto out_mknod_dput;
84700 +
84701 + gr_handle_create(dentry, nd.path.mnt);
84702 +
84703 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
84704 dput(nd.path.dentry);
84705 nd.path.dentry = dentry;
84706 @@ -2211,7 +2231,11 @@ static int unix_seq_show(struct seq_file *seq, void *v)
84707 unix_state_lock(s);
84708
84709 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
84710 +#ifdef CONFIG_GRKERNSEC_HIDESYM
84711 + NULL,
84712 +#else
84713 s,
84714 +#endif
84715 atomic_read(&s->sk_refcnt),
84716 0,
84717 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
84718 diff --git a/net/wireless/core.h b/net/wireless/core.h
84719 index 376798f..109a61f 100644
84720 --- a/net/wireless/core.h
84721 +++ b/net/wireless/core.h
84722 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
84723 struct mutex mtx;
84724
84725 /* rfkill support */
84726 - struct rfkill_ops rfkill_ops;
84727 + rfkill_ops_no_const rfkill_ops;
84728 struct rfkill *rfkill;
84729 struct work_struct rfkill_sync;
84730
84731 diff --git a/net/wireless/wext.c b/net/wireless/wext.c
84732 index a2e4c60..0979cbe 100644
84733 --- a/net/wireless/wext.c
84734 +++ b/net/wireless/wext.c
84735 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
84736 */
84737
84738 /* Support for very large requests */
84739 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
84740 - (user_length > descr->max_tokens)) {
84741 + if (user_length > descr->max_tokens) {
84742 /* Allow userspace to GET more than max so
84743 * we can support any size GET requests.
84744 * There is still a limit : -ENOMEM.
84745 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
84746 }
84747 }
84748
84749 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
84750 - /*
84751 - * If this is a GET, but not NOMAX, it means that the extra
84752 - * data is not bounded by userspace, but by max_tokens. Thus
84753 - * set the length to max_tokens. This matches the extra data
84754 - * allocation.
84755 - * The driver should fill it with the number of tokens it
84756 - * provided, and it may check iwp->length rather than having
84757 - * knowledge of max_tokens. If the driver doesn't change the
84758 - * iwp->length, this ioctl just copies back max_token tokens
84759 - * filled with zeroes. Hopefully the driver isn't claiming
84760 - * them to be valid data.
84761 - */
84762 - iwp->length = descr->max_tokens;
84763 - }
84764 -
84765 err = handler(dev, info, (union iwreq_data *) iwp, extra);
84766
84767 iwp->length += essid_compat;
84768 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
84769 index cb81ca3..e15d49a 100644
84770 --- a/net/xfrm/xfrm_policy.c
84771 +++ b/net/xfrm/xfrm_policy.c
84772 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
84773 hlist_add_head(&policy->bydst, chain);
84774 xfrm_pol_hold(policy);
84775 net->xfrm.policy_count[dir]++;
84776 - atomic_inc(&flow_cache_genid);
84777 + atomic_inc_unchecked(&flow_cache_genid);
84778 if (delpol)
84779 __xfrm_policy_unlink(delpol, dir);
84780 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
84781 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
84782 write_unlock_bh(&xfrm_policy_lock);
84783
84784 if (ret && delete) {
84785 - atomic_inc(&flow_cache_genid);
84786 + atomic_inc_unchecked(&flow_cache_genid);
84787 xfrm_policy_kill(ret);
84788 }
84789 return ret;
84790 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id,
84791 write_unlock_bh(&xfrm_policy_lock);
84792
84793 if (ret && delete) {
84794 - atomic_inc(&flow_cache_genid);
84795 + atomic_inc_unchecked(&flow_cache_genid);
84796 xfrm_policy_kill(ret);
84797 }
84798 return ret;
84799 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
84800 }
84801
84802 }
84803 - atomic_inc(&flow_cache_genid);
84804 + atomic_inc_unchecked(&flow_cache_genid);
84805 out:
84806 write_unlock_bh(&xfrm_policy_lock);
84807 return err;
84808 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
84809 write_unlock_bh(&xfrm_policy_lock);
84810 if (pol) {
84811 if (dir < XFRM_POLICY_MAX)
84812 - atomic_inc(&flow_cache_genid);
84813 + atomic_inc_unchecked(&flow_cache_genid);
84814 xfrm_policy_kill(pol);
84815 return 0;
84816 }
84817 @@ -1477,7 +1477,7 @@ free_dst:
84818 goto out;
84819 }
84820
84821 -static int inline
84822 +static inline int
84823 xfrm_dst_alloc_copy(void **target, void *src, int size)
84824 {
84825 if (!*target) {
84826 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void *src, int size)
84827 return 0;
84828 }
84829
84830 -static int inline
84831 +static inline int
84832 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
84833 {
84834 #ifdef CONFIG_XFRM_SUB_POLICY
84835 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
84836 #endif
84837 }
84838
84839 -static int inline
84840 +static inline int
84841 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
84842 {
84843 #ifdef CONFIG_XFRM_SUB_POLICY
84844 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
84845 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
84846
84847 restart:
84848 - genid = atomic_read(&flow_cache_genid);
84849 + genid = atomic_read_unchecked(&flow_cache_genid);
84850 policy = NULL;
84851 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
84852 pols[pi] = NULL;
84853 @@ -1680,7 +1680,7 @@ restart:
84854 goto error;
84855 }
84856 if (nx == -EAGAIN ||
84857 - genid != atomic_read(&flow_cache_genid)) {
84858 + genid != atomic_read_unchecked(&flow_cache_genid)) {
84859 xfrm_pols_put(pols, npols);
84860 goto restart;
84861 }
84862 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
84863 index b95a2d6..85c4d78 100644
84864 --- a/net/xfrm/xfrm_user.c
84865 +++ b/net/xfrm/xfrm_user.c
84866 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
84867 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
84868 int i;
84869
84870 + pax_track_stack();
84871 +
84872 if (xp->xfrm_nr == 0)
84873 return 0;
84874
84875 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
84876 int err;
84877 int n = 0;
84878
84879 + pax_track_stack();
84880 +
84881 if (attrs[XFRMA_MIGRATE] == NULL)
84882 return -EINVAL;
84883
84884 diff --git a/samples/kobject/kset-example.c b/samples/kobject/kset-example.c
84885 index 45b7d56..19e828c 100644
84886 --- a/samples/kobject/kset-example.c
84887 +++ b/samples/kobject/kset-example.c
84888 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kobject *kobj,
84889 }
84890
84891 /* Our custom sysfs_ops that we will associate with our ktype later on */
84892 -static struct sysfs_ops foo_sysfs_ops = {
84893 +static const struct sysfs_ops foo_sysfs_ops = {
84894 .show = foo_attr_show,
84895 .store = foo_attr_store,
84896 };
84897 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
84898 index 341b589..405aed3 100644
84899 --- a/scripts/Makefile.build
84900 +++ b/scripts/Makefile.build
84901 @@ -59,7 +59,7 @@ endif
84902 endif
84903
84904 # Do not include host rules unless needed
84905 -ifneq ($(hostprogs-y)$(hostprogs-m),)
84906 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
84907 include scripts/Makefile.host
84908 endif
84909
84910 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
84911 index 6f89fbb..53adc9c 100644
84912 --- a/scripts/Makefile.clean
84913 +++ b/scripts/Makefile.clean
84914 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
84915 __clean-files := $(extra-y) $(always) \
84916 $(targets) $(clean-files) \
84917 $(host-progs) \
84918 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
84919 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
84920 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
84921
84922 # as clean-files is given relative to the current directory, this adds
84923 # a $(obj) prefix, except for absolute paths
84924 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
84925 index 1ac414f..a1c1451 100644
84926 --- a/scripts/Makefile.host
84927 +++ b/scripts/Makefile.host
84928 @@ -31,6 +31,7 @@
84929 # Note: Shared libraries consisting of C++ files are not supported
84930
84931 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
84932 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
84933
84934 # C code
84935 # Executables compiled from a single .c file
84936 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
84937 # Shared libaries (only .c supported)
84938 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
84939 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
84940 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
84941 # Remove .so files from "xxx-objs"
84942 host-cobjs := $(filter-out %.so,$(host-cobjs))
84943
84944 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
84945 index 6bf21f8..c0546b3 100644
84946 --- a/scripts/basic/fixdep.c
84947 +++ b/scripts/basic/fixdep.c
84948 @@ -162,7 +162,7 @@ static void grow_config(int len)
84949 /*
84950 * Lookup a value in the configuration string.
84951 */
84952 -static int is_defined_config(const char * name, int len)
84953 +static int is_defined_config(const char * name, unsigned int len)
84954 {
84955 const char * pconfig;
84956 const char * plast = str_config + len_config - len;
84957 @@ -199,7 +199,7 @@ static void clear_config(void)
84958 /*
84959 * Record the use of a CONFIG_* word.
84960 */
84961 -static void use_config(char *m, int slen)
84962 +static void use_config(char *m, unsigned int slen)
84963 {
84964 char s[PATH_MAX];
84965 char *p;
84966 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen)
84967
84968 static void parse_config_file(char *map, size_t len)
84969 {
84970 - int *end = (int *) (map + len);
84971 + unsigned int *end = (unsigned int *) (map + len);
84972 /* start at +1, so that p can never be < map */
84973 - int *m = (int *) map + 1;
84974 + unsigned int *m = (unsigned int *) map + 1;
84975 char *p, *q;
84976
84977 for (; m < end; m++) {
84978 @@ -371,7 +371,7 @@ static void print_deps(void)
84979 static void traps(void)
84980 {
84981 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
84982 - int *p = (int *)test;
84983 + unsigned int *p = (unsigned int *)test;
84984
84985 if (*p != INT_CONF) {
84986 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
84987 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
84988 new file mode 100644
84989 index 0000000..8729101
84990 --- /dev/null
84991 +++ b/scripts/gcc-plugin.sh
84992 @@ -0,0 +1,2 @@
84993 +#!/bin/sh
84994 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
84995 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
84996 index 62a9025..65b82ad 100644
84997 --- a/scripts/mod/file2alias.c
84998 +++ b/scripts/mod/file2alias.c
84999 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
85000 unsigned long size, unsigned long id_size,
85001 void *symval)
85002 {
85003 - int i;
85004 + unsigned int i;
85005
85006 if (size % id_size || size < id_size) {
85007 if (cross_build != 0)
85008 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
85009 /* USB is special because the bcdDevice can be matched against a numeric range */
85010 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
85011 static void do_usb_entry(struct usb_device_id *id,
85012 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
85013 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
85014 unsigned char range_lo, unsigned char range_hi,
85015 struct module *mod)
85016 {
85017 @@ -151,7 +151,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
85018 {
85019 unsigned int devlo, devhi;
85020 unsigned char chi, clo;
85021 - int ndigits;
85022 + unsigned int ndigits;
85023
85024 id->match_flags = TO_NATIVE(id->match_flags);
85025 id->idVendor = TO_NATIVE(id->idVendor);
85026 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
85027 for (i = 0; i < count; i++) {
85028 const char *id = (char *)devs[i].id;
85029 char acpi_id[sizeof(devs[0].id)];
85030 - int j;
85031 + unsigned int j;
85032
85033 buf_printf(&mod->dev_table_buf,
85034 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
85035 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
85036
85037 for (j = 0; j < PNP_MAX_DEVICES; j++) {
85038 const char *id = (char *)card->devs[j].id;
85039 - int i2, j2;
85040 + unsigned int i2, j2;
85041 int dup = 0;
85042
85043 if (!id[0])
85044 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
85045 /* add an individual alias for every device entry */
85046 if (!dup) {
85047 char acpi_id[sizeof(card->devs[0].id)];
85048 - int k;
85049 + unsigned int k;
85050
85051 buf_printf(&mod->dev_table_buf,
85052 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
85053 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, const char *s)
85054 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
85055 char *alias)
85056 {
85057 - int i, j;
85058 + unsigned int i, j;
85059
85060 sprintf(alias, "dmi*");
85061
85062 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
85063 index 03efeab..0888989 100644
85064 --- a/scripts/mod/modpost.c
85065 +++ b/scripts/mod/modpost.c
85066 @@ -835,6 +835,7 @@ enum mismatch {
85067 INIT_TO_EXIT,
85068 EXIT_TO_INIT,
85069 EXPORT_TO_INIT_EXIT,
85070 + DATA_TO_TEXT
85071 };
85072
85073 struct sectioncheck {
85074 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[] = {
85075 .fromsec = { "__ksymtab*", NULL },
85076 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
85077 .mismatch = EXPORT_TO_INIT_EXIT
85078 +},
85079 +/* Do not reference code from writable data */
85080 +{
85081 + .fromsec = { DATA_SECTIONS, NULL },
85082 + .tosec = { TEXT_SECTIONS, NULL },
85083 + .mismatch = DATA_TO_TEXT
85084 }
85085 };
85086
85087 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
85088 continue;
85089 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
85090 continue;
85091 - if (sym->st_value == addr)
85092 - return sym;
85093 /* Find a symbol nearby - addr are maybe negative */
85094 d = sym->st_value - addr;
85095 + if (d == 0)
85096 + return sym;
85097 if (d < 0)
85098 d = addr - sym->st_value;
85099 if (d < distance) {
85100 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const char *modname, enum mismatch mismatch,
85101 "Fix this by removing the %sannotation of %s "
85102 "or drop the export.\n",
85103 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
85104 + case DATA_TO_TEXT:
85105 +/*
85106 + fprintf(stderr,
85107 + "The variable %s references\n"
85108 + "the %s %s%s%s\n",
85109 + fromsym, to, sec2annotation(tosec), tosym, to_p);
85110 +*/
85111 + break;
85112 case NO_MISMATCH:
85113 /* To get warnings on missing members */
85114 break;
85115 @@ -1495,7 +1510,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
85116 static void check_sec_ref(struct module *mod, const char *modname,
85117 struct elf_info *elf)
85118 {
85119 - int i;
85120 + unsigned int i;
85121 Elf_Shdr *sechdrs = elf->sechdrs;
85122
85123 /* Walk through all sections */
85124 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
85125 va_end(ap);
85126 }
85127
85128 -void buf_write(struct buffer *buf, const char *s, int len)
85129 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
85130 {
85131 if (buf->size - buf->pos < len) {
85132 buf->size += len + SZ;
85133 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
85134 if (fstat(fileno(file), &st) < 0)
85135 goto close_write;
85136
85137 - if (st.st_size != b->pos)
85138 + if (st.st_size != (off_t)b->pos)
85139 goto close_write;
85140
85141 tmp = NOFAIL(malloc(b->pos));
85142 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
85143 index 09f58e3..4b66092 100644
85144 --- a/scripts/mod/modpost.h
85145 +++ b/scripts/mod/modpost.h
85146 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
85147
85148 struct buffer {
85149 char *p;
85150 - int pos;
85151 - int size;
85152 + unsigned int pos;
85153 + unsigned int size;
85154 };
85155
85156 void __attribute__((format(printf, 2, 3)))
85157 buf_printf(struct buffer *buf, const char *fmt, ...);
85158
85159 void
85160 -buf_write(struct buffer *buf, const char *s, int len);
85161 +buf_write(struct buffer *buf, const char *s, unsigned int len);
85162
85163 struct module {
85164 struct module *next;
85165 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
85166 index ecf9c7d..d52b38e 100644
85167 --- a/scripts/mod/sumversion.c
85168 +++ b/scripts/mod/sumversion.c
85169 @@ -455,7 +455,7 @@ static void write_version(const char *filename, const char *sum,
85170 goto out;
85171 }
85172
85173 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
85174 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
85175 warn("writing sum in %s failed: %s\n",
85176 filename, strerror(errno));
85177 goto out;
85178 diff --git a/scripts/package/mkspec b/scripts/package/mkspec
85179 index 47bdd2f..d4d4e93 100755
85180 --- a/scripts/package/mkspec
85181 +++ b/scripts/package/mkspec
85182 @@ -70,7 +70,7 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib/modules'
85183 echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
85184 echo "%endif"
85185
85186 -echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} KBUILD_SRC= modules_install'
85187 +echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
85188 echo "%ifarch ia64"
85189 echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
85190 echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
85191 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
85192 index 5c11312..72742b5 100644
85193 --- a/scripts/pnmtologo.c
85194 +++ b/scripts/pnmtologo.c
85195 @@ -237,14 +237,14 @@ static void write_header(void)
85196 fprintf(out, " * Linux logo %s\n", logoname);
85197 fputs(" */\n\n", out);
85198 fputs("#include <linux/linux_logo.h>\n\n", out);
85199 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
85200 + fprintf(out, "static unsigned char %s_data[] = {\n",
85201 logoname);
85202 }
85203
85204 static void write_footer(void)
85205 {
85206 fputs("\n};\n\n", out);
85207 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
85208 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
85209 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
85210 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
85211 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
85212 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
85213 fputs("\n};\n\n", out);
85214
85215 /* write logo clut */
85216 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
85217 + fprintf(out, "static unsigned char %s_clut[] = {\n",
85218 logoname);
85219 write_hex_cnt = 0;
85220 for (i = 0; i < logo_clutsize; i++) {
85221 diff --git a/scripts/tags.sh b/scripts/tags.sh
85222 index d52f7a0..269eb1b 100755
85223 --- a/scripts/tags.sh
85224 +++ b/scripts/tags.sh
85225 @@ -93,6 +93,11 @@ docscope()
85226 cscope -b -f cscope.out
85227 }
85228
85229 +dogtags()
85230 +{
85231 + all_sources | gtags -f -
85232 +}
85233 +
85234 exuberant()
85235 {
85236 all_sources | xargs $1 -a \
85237 @@ -164,6 +169,10 @@ case "$1" in
85238 docscope
85239 ;;
85240
85241 + "gtags")
85242 + dogtags
85243 + ;;
85244 +
85245 "tags")
85246 rm -f tags
85247 xtags ctags
85248 diff --git a/security/Kconfig b/security/Kconfig
85249 index fb363cd..886ace4 100644
85250 --- a/security/Kconfig
85251 +++ b/security/Kconfig
85252 @@ -4,6 +4,626 @@
85253
85254 menu "Security options"
85255
85256 +source grsecurity/Kconfig
85257 +
85258 +menu "PaX"
85259 +
85260 + config ARCH_TRACK_EXEC_LIMIT
85261 + bool
85262 +
85263 + config PAX_KERNEXEC_PLUGIN
85264 + bool
85265 +
85266 + config PAX_PER_CPU_PGD
85267 + bool
85268 +
85269 + config TASK_SIZE_MAX_SHIFT
85270 + int
85271 + depends on X86_64
85272 + default 47 if !PAX_PER_CPU_PGD
85273 + default 42 if PAX_PER_CPU_PGD
85274 +
85275 + config PAX_ENABLE_PAE
85276 + bool
85277 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
85278 +
85279 +config PAX
85280 + bool "Enable various PaX features"
85281 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
85282 + help
85283 + This allows you to enable various PaX features. PaX adds
85284 + intrusion prevention mechanisms to the kernel that reduce
85285 + the risks posed by exploitable memory corruption bugs.
85286 +
85287 +menu "PaX Control"
85288 + depends on PAX
85289 +
85290 +config PAX_SOFTMODE
85291 + bool 'Support soft mode'
85292 + help
85293 + Enabling this option will allow you to run PaX in soft mode, that
85294 + is, PaX features will not be enforced by default, only on executables
85295 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
85296 + support as they are the only way to mark executables for soft mode use.
85297 +
85298 + Soft mode can be activated by using the "pax_softmode=1" kernel command
85299 + line option on boot. Furthermore you can control various PaX features
85300 + at runtime via the entries in /proc/sys/kernel/pax.
85301 +
85302 +config PAX_EI_PAX
85303 + bool 'Use legacy ELF header marking'
85304 + help
85305 + Enabling this option will allow you to control PaX features on
85306 + a per executable basis via the 'chpax' utility available at
85307 + http://pax.grsecurity.net/. The control flags will be read from
85308 + an otherwise reserved part of the ELF header. This marking has
85309 + numerous drawbacks (no support for soft-mode, toolchain does not
85310 + know about the non-standard use of the ELF header) therefore it
85311 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
85312 + support.
85313 +
85314 + If you have applications not marked by the PT_PAX_FLAGS ELF program
85315 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
85316 + option otherwise they will not get any protection.
85317 +
85318 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
85319 + support as well, they will override the legacy EI_PAX marks.
85320 +
85321 +config PAX_PT_PAX_FLAGS
85322 + bool 'Use ELF program header marking'
85323 + help
85324 + Enabling this option will allow you to control PaX features on
85325 + a per executable basis via the 'paxctl' utility available at
85326 + http://pax.grsecurity.net/. The control flags will be read from
85327 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
85328 + has the benefits of supporting both soft mode and being fully
85329 + integrated into the toolchain (the binutils patch is available
85330 + from http://pax.grsecurity.net).
85331 +
85332 + If you have applications not marked by the PT_PAX_FLAGS ELF program
85333 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
85334 + support otherwise they will not get any protection.
85335 +
85336 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
85337 + must make sure that the marks are the same if a binary has both marks.
85338 +
85339 + Note that if you enable the legacy EI_PAX marking support as well,
85340 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
85341 +
85342 +config PAX_XATTR_PAX_FLAGS
85343 + bool 'Use filesystem extended attributes marking'
85344 + depends on EXPERT
85345 + select CIFS_XATTR if CIFS
85346 + select EXT2_FS_XATTR if EXT2_FS
85347 + select EXT3_FS_XATTR if EXT3_FS
85348 + select EXT4_FS_XATTR if EXT4_FS
85349 + select JFFS2_FS_XATTR if JFFS2_FS
85350 + select REISERFS_FS_XATTR if REISERFS_FS
85351 + select UBIFS_FS_XATTR if UBIFS_FS
85352 + help
85353 + Enabling this option will allow you to control PaX features on
85354 + a per executable basis via the 'setfattr' utility. The control
85355 + flags will be read from the user.pax.flags extended attribute of
85356 + the file. This marking has the benefit of supporting binary-only
85357 + applications that self-check themselves (e.g., skype) and would
85358 + not tolerate chpax/paxctl changes. The main drawback is that
85359 + extended attributes are not supported by some filesystems (e.g.,
85360 + isofs, squashfs, tmpfs, udf, vfat) so copying files through such
85361 + filesystems will lose the extended attributes and these PaX markings.
85362 +
85363 + If you have applications not marked by the PT_PAX_FLAGS ELF program
85364 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
85365 + support otherwise they will not get any protection.
85366 +
85367 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
85368 + must make sure that the marks are the same if a binary has both marks.
85369 +
85370 + Note that if you enable the legacy EI_PAX marking support as well,
85371 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
85372 +
85373 +choice
85374 + prompt 'MAC system integration'
85375 + default PAX_HAVE_ACL_FLAGS
85376 + help
85377 + Mandatory Access Control systems have the option of controlling
85378 + PaX flags on a per executable basis, choose the method supported
85379 + by your particular system.
85380 +
85381 + - "none": if your MAC system does not interact with PaX,
85382 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
85383 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
85384 +
85385 + NOTE: this option is for developers/integrators only.
85386 +
85387 + config PAX_NO_ACL_FLAGS
85388 + bool 'none'
85389 +
85390 + config PAX_HAVE_ACL_FLAGS
85391 + bool 'direct'
85392 +
85393 + config PAX_HOOK_ACL_FLAGS
85394 + bool 'hook'
85395 +endchoice
85396 +
85397 +endmenu
85398 +
85399 +menu "Non-executable pages"
85400 + depends on PAX
85401 +
85402 +config PAX_NOEXEC
85403 + bool "Enforce non-executable pages"
85404 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
85405 + help
85406 + By design some architectures do not allow for protecting memory
85407 + pages against execution or even if they do, Linux does not make
85408 + use of this feature. In practice this means that if a page is
85409 + readable (such as the stack or heap) it is also executable.
85410 +
85411 + There is a well known exploit technique that makes use of this
85412 + fact and a common programming mistake where an attacker can
85413 + introduce code of his choice somewhere in the attacked program's
85414 + memory (typically the stack or the heap) and then execute it.
85415 +
85416 + If the attacked program was running with different (typically
85417 + higher) privileges than that of the attacker, then he can elevate
85418 + his own privilege level (e.g. get a root shell, write to files for
85419 + which he does not have write access to, etc).
85420 +
85421 + Enabling this option will let you choose from various features
85422 + that prevent the injection and execution of 'foreign' code in
85423 + a program.
85424 +
85425 + This will also break programs that rely on the old behaviour and
85426 + expect that dynamically allocated memory via the malloc() family
85427 + of functions is executable (which it is not). Notable examples
85428 + are the XFree86 4.x server, the java runtime and wine.
85429 +
85430 +config PAX_PAGEEXEC
85431 + bool "Paging based non-executable pages"
85432 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
85433 + select S390_SWITCH_AMODE if S390
85434 + select S390_EXEC_PROTECT if S390
85435 + select ARCH_TRACK_EXEC_LIMIT if X86_32
85436 + help
85437 + This implementation is based on the paging feature of the CPU.
85438 + On i386 without hardware non-executable bit support there is a
85439 + variable but usually low performance impact, however on Intel's
85440 + P4 core based CPUs it is very high so you should not enable this
85441 + for kernels meant to be used on such CPUs.
85442 +
85443 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
85444 + with hardware non-executable bit support there is no performance
85445 + impact, on ppc the impact is negligible.
85446 +
85447 + Note that several architectures require various emulations due to
85448 + badly designed userland ABIs, this will cause a performance impact
85449 + but will disappear as soon as userland is fixed. For example, ppc
85450 + userland MUST have been built with secure-plt by a recent toolchain.
85451 +
85452 +config PAX_SEGMEXEC
85453 + bool "Segmentation based non-executable pages"
85454 + depends on PAX_NOEXEC && X86_32
85455 + help
85456 + This implementation is based on the segmentation feature of the
85457 + CPU and has a very small performance impact, however applications
85458 + will be limited to a 1.5 GB address space instead of the normal
85459 + 3 GB.
85460 +
85461 +config PAX_EMUTRAMP
85462 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
85463 + default y if PARISC
85464 + help
85465 + There are some programs and libraries that for one reason or
85466 + another attempt to execute special small code snippets from
85467 + non-executable memory pages. Most notable examples are the
85468 + signal handler return code generated by the kernel itself and
85469 + the GCC trampolines.
85470 +
85471 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
85472 + such programs will no longer work under your kernel.
85473 +
85474 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
85475 + utilities to enable trampoline emulation for the affected programs
85476 + yet still have the protection provided by the non-executable pages.
85477 +
85478 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
85479 + your system will not even boot.
85480 +
85481 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
85482 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
85483 + for the affected files.
85484 +
85485 + NOTE: enabling this feature *may* open up a loophole in the
85486 + protection provided by non-executable pages that an attacker
85487 + could abuse. Therefore the best solution is to not have any
85488 + files on your system that would require this option. This can
85489 + be achieved by not using libc5 (which relies on the kernel
85490 + signal handler return code) and not using or rewriting programs
85491 + that make use of the nested function implementation of GCC.
85492 + Skilled users can just fix GCC itself so that it implements
85493 + nested function calls in a way that does not interfere with PaX.
85494 +
85495 +config PAX_EMUSIGRT
85496 + bool "Automatically emulate sigreturn trampolines"
85497 + depends on PAX_EMUTRAMP && PARISC
85498 + default y
85499 + help
85500 + Enabling this option will have the kernel automatically detect
85501 + and emulate signal return trampolines executing on the stack
85502 + that would otherwise lead to task termination.
85503 +
85504 + This solution is intended as a temporary one for users with
85505 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
85506 + Modula-3 runtime, etc) or executables linked to such, basically
85507 + everything that does not specify its own SA_RESTORER function in
85508 + normal executable memory like glibc 2.1+ does.
85509 +
85510 + On parisc you MUST enable this option, otherwise your system will
85511 + not even boot.
85512 +
85513 + NOTE: this feature cannot be disabled on a per executable basis
85514 + and since it *does* open up a loophole in the protection provided
85515 + by non-executable pages, the best solution is to not have any
85516 + files on your system that would require this option.
85517 +
85518 +config PAX_MPROTECT
85519 + bool "Restrict mprotect()"
85520 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
85521 + help
85522 + Enabling this option will prevent programs from
85523 + - changing the executable status of memory pages that were
85524 + not originally created as executable,
85525 + - making read-only executable pages writable again,
85526 + - creating executable pages from anonymous memory,
85527 + - making read-only-after-relocations (RELRO) data pages writable again.
85528 +
85529 + You should say Y here to complete the protection provided by
85530 + the enforcement of non-executable pages.
85531 +
85532 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
85533 + this feature on a per file basis.
85534 +
85535 +config PAX_MPROTECT_COMPAT
85536 + bool "Use legacy/compat protection demoting (read help)"
85537 + depends on PAX_MPROTECT
85538 + default n
85539 + help
85540 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
85541 + by sending the proper error code to the application. For some broken
85542 + userland, this can cause problems with Python or other applications. The
85543 + current implementation however allows for applications like clamav to
85544 + detect if JIT compilation/execution is allowed and to fall back gracefully
85545 + to an interpreter-based mode if it does not. While we encourage everyone
85546 + to use the current implementation as-is and push upstream to fix broken
85547 + userland (note that the RWX logging option can assist with this), in some
85548 + environments this may not be possible. Having to disable MPROTECT
85549 + completely on certain binaries reduces the security benefit of PaX,
85550 + so this option is provided for those environments to revert to the old
85551 + behavior.
85552 +
85553 +config PAX_ELFRELOCS
85554 + bool "Allow ELF text relocations (read help)"
85555 + depends on PAX_MPROTECT
85556 + default n
85557 + help
85558 + Non-executable pages and mprotect() restrictions are effective
85559 + in preventing the introduction of new executable code into an
85560 + attacked task's address space. There remain only two venues
85561 + for this kind of attack: if the attacker can execute already
85562 + existing code in the attacked task then he can either have it
85563 + create and mmap() a file containing his code or have it mmap()
85564 + an already existing ELF library that does not have position
85565 + independent code in it and use mprotect() on it to make it
85566 + writable and copy his code there. While protecting against
85567 + the former approach is beyond PaX, the latter can be prevented
85568 + by having only PIC ELF libraries on one's system (which do not
85569 + need to relocate their code). If you are sure this is your case,
85570 + as is the case with all modern Linux distributions, then leave
85571 + this option disabled. You should say 'n' here.
85572 +
85573 +config PAX_ETEXECRELOCS
85574 + bool "Allow ELF ET_EXEC text relocations"
85575 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
85576 + select PAX_ELFRELOCS
85577 + default y
85578 + help
85579 + On some architectures there are incorrectly created applications
85580 + that require text relocations and would not work without enabling
85581 + this option. If you are an alpha, ia64 or parisc user, you should
85582 + enable this option and disable it once you have made sure that
85583 + none of your applications need it.
85584 +
85585 +config PAX_EMUPLT
85586 + bool "Automatically emulate ELF PLT"
85587 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
85588 + default y
85589 + help
85590 + Enabling this option will have the kernel automatically detect
85591 + and emulate the Procedure Linkage Table entries in ELF files.
85592 + On some architectures such entries are in writable memory, and
85593 + become non-executable leading to task termination. Therefore
85594 + it is mandatory that you enable this option on alpha, parisc,
85595 + sparc and sparc64, otherwise your system would not even boot.
85596 +
85597 + NOTE: this feature *does* open up a loophole in the protection
85598 + provided by the non-executable pages, therefore the proper
85599 + solution is to modify the toolchain to produce a PLT that does
85600 + not need to be writable.
85601 +
85602 +config PAX_DLRESOLVE
85603 + bool 'Emulate old glibc resolver stub'
85604 + depends on PAX_EMUPLT && SPARC
85605 + default n
85606 + help
85607 + This option is needed if userland has an old glibc (before 2.4)
85608 + that puts a 'save' instruction into the runtime generated resolver
85609 + stub that needs special emulation.
85610 +
85611 +config PAX_KERNEXEC
85612 + bool "Enforce non-executable kernel pages"
85613 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
85614 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
85615 + select PAX_KERNEXEC_PLUGIN if X86_64
85616 + help
85617 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
85618 + that is, enabling this option will make it harder to inject
85619 + and execute 'foreign' code in kernel memory itself.
85620 +
85621 + Note that on x86_64 kernels there is a known regression when
85622 + this feature and KVM/VMX are both enabled in the host kernel.
85623 +
85624 +choice
85625 + prompt "Return Address Instrumentation Method"
85626 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
85627 + depends on PAX_KERNEXEC_PLUGIN
85628 + help
85629 + Select the method used to instrument function pointer dereferences.
85630 + Note that binary modules cannot be instrumented by this approach.
85631 +
85632 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
85633 + bool "bts"
85634 + help
85635 + This method is compatible with binary only modules but has
85636 + a higher runtime overhead.
85637 +
85638 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
85639 + bool "or"
85640 + depends on !PARAVIRT
85641 + help
85642 + This method is incompatible with binary only modules but has
85643 + a lower runtime overhead.
85644 +endchoice
85645 +
85646 +config PAX_KERNEXEC_PLUGIN_METHOD
85647 + string
85648 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
85649 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
85650 + default ""
85651 +
85652 +config PAX_KERNEXEC_MODULE_TEXT
85653 + int "Minimum amount of memory reserved for module code"
85654 + default "4"
85655 + depends on PAX_KERNEXEC && X86_32 && MODULES
85656 + help
85657 + Due to implementation details the kernel must reserve a fixed
85658 + amount of memory for module code at compile time that cannot be
85659 + changed at runtime. Here you can specify the minimum amount
85660 + in MB that will be reserved. Due to the same implementation
85661 + details this size will always be rounded up to the next 2/4 MB
85662 + boundary (depends on PAE) so the actually available memory for
85663 + module code will usually be more than this minimum.
85664 +
85665 + The default 4 MB should be enough for most users but if you have
85666 + an excessive number of modules (e.g., most distribution configs
85667 + compile many drivers as modules) or use huge modules such as
85668 + nvidia's kernel driver, you will need to adjust this amount.
85669 + A good rule of thumb is to look at your currently loaded kernel
85670 + modules and add up their sizes.
85671 +
85672 +endmenu
85673 +
85674 +menu "Address Space Layout Randomization"
85675 + depends on PAX
85676 +
85677 +config PAX_ASLR
85678 + bool "Address Space Layout Randomization"
85679 + help
85680 + Many if not most exploit techniques rely on the knowledge of
85681 + certain addresses in the attacked program. The following options
85682 + will allow the kernel to apply a certain amount of randomization
85683 + to specific parts of the program thereby forcing an attacker to
85684 + guess them in most cases. Any failed guess will most likely crash
85685 + the attacked program which allows the kernel to detect such attempts
85686 + and react on them. PaX itself provides no reaction mechanisms,
85687 + instead it is strongly encouraged that you make use of Nergal's
85688 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
85689 + (http://www.grsecurity.net/) built-in crash detection features or
85690 + develop one yourself.
85691 +
85692 + By saying Y here you can choose to randomize the following areas:
85693 + - top of the task's kernel stack
85694 + - top of the task's userland stack
85695 + - base address for mmap() requests that do not specify one
85696 + (this includes all libraries)
85697 + - base address of the main executable
85698 +
85699 + It is strongly recommended to say Y here as address space layout
85700 + randomization has negligible impact on performance yet it provides
85701 + a very effective protection.
85702 +
85703 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
85704 + this feature on a per file basis.
85705 +
85706 +config PAX_RANDKSTACK
85707 + bool "Randomize kernel stack base"
85708 + depends on X86_TSC && X86
85709 + help
85710 + By saying Y here the kernel will randomize every task's kernel
85711 + stack on every system call. This will not only force an attacker
85712 + to guess it but also prevent him from making use of possible
85713 + leaked information about it.
85714 +
85715 + Since the kernel stack is a rather scarce resource, randomization
85716 + may cause unexpected stack overflows, therefore you should very
85717 + carefully test your system. Note that once enabled in the kernel
85718 + configuration, this feature cannot be disabled on a per file basis.
85719 +
85720 +config PAX_RANDUSTACK
85721 + bool "Randomize user stack base"
85722 + depends on PAX_ASLR
85723 + help
85724 + By saying Y here the kernel will randomize every task's userland
85725 + stack. The randomization is done in two steps where the second
85726 + one may apply a big amount of shift to the top of the stack and
85727 + cause problems for programs that want to use lots of memory (more
85728 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
85729 + For this reason the second step can be controlled by 'chpax' or
85730 + 'paxctl' on a per file basis.
85731 +
85732 +config PAX_RANDMMAP
85733 + bool "Randomize mmap() base"
85734 + depends on PAX_ASLR
85735 + help
85736 + By saying Y here the kernel will use a randomized base address for
85737 + mmap() requests that do not specify one themselves. As a result
85738 + all dynamically loaded libraries will appear at random addresses
85739 + and therefore be harder to exploit by a technique where an attacker
85740 + attempts to execute library code for his purposes (e.g. spawn a
85741 + shell from an exploited program that is running at an elevated
85742 + privilege level).
85743 +
85744 + Furthermore, if a program is relinked as a dynamic ELF file, its
85745 + base address will be randomized as well, completing the full
85746 + randomization of the address space layout. Attacking such programs
85747 + becomes a guess game. You can find an example of doing this at
85748 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
85749 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
85750 +
85751 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
85752 + feature on a per file basis.
85753 +
85754 +endmenu
85755 +
85756 +menu "Miscellaneous hardening features"
85757 +
85758 +config PAX_MEMORY_SANITIZE
85759 + bool "Sanitize all freed memory"
85760 + depends on !HIBERNATION
85761 + help
85762 + By saying Y here the kernel will erase memory pages as soon as they
85763 + are freed. This in turn reduces the lifetime of data stored in the
85764 + pages, making it less likely that sensitive information such as
85765 + passwords, cryptographic secrets, etc stay in memory for too long.
85766 +
85767 + This is especially useful for programs whose runtime is short, long
85768 + lived processes and the kernel itself benefit from this as long as
85769 + they operate on whole memory pages and ensure timely freeing of pages
85770 + that may hold sensitive information.
85771 +
85772 + The tradeoff is performance impact, on a single CPU system kernel
85773 + compilation sees a 3% slowdown, other systems and workloads may vary
85774 + and you are advised to test this feature on your expected workload
85775 + before deploying it.
85776 +
85777 + Note that this feature does not protect data stored in live pages,
85778 + e.g., process memory swapped to disk may stay there for a long time.
85779 +
85780 +config PAX_MEMORY_STACKLEAK
85781 + bool "Sanitize kernel stack"
85782 + depends on X86
85783 + help
85784 + By saying Y here the kernel will erase the kernel stack before it
85785 + returns from a system call. This in turn reduces the information
85786 + that a kernel stack leak bug can reveal.
85787 +
85788 + Note that such a bug can still leak information that was put on
85789 + the stack by the current system call (the one eventually triggering
85790 + the bug) but traces of earlier system calls on the kernel stack
85791 + cannot leak anymore.
85792 +
85793 + The tradeoff is performance impact, on a single CPU system kernel
85794 + compilation sees a 1% slowdown, other systems and workloads may vary
85795 + and you are advised to test this feature on your expected workload
85796 + before deploying it.
85797 +
85798 + Note: full support for this feature requires gcc with plugin support
85799 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
85800 + versions means that functions with large enough stack frames may
85801 + leave uninitialized memory behind that may be exposed to a later
85802 + syscall leaking the stack.
85803 +
85804 +config PAX_MEMORY_UDEREF
85805 + bool "Prevent invalid userland pointer dereference"
85806 + depends on X86 && !UML_X86 && !XEN
85807 + select PAX_PER_CPU_PGD if X86_64
85808 + help
85809 + By saying Y here the kernel will be prevented from dereferencing
85810 + userland pointers in contexts where the kernel expects only kernel
85811 + pointers. This is both a useful runtime debugging feature and a
85812 + security measure that prevents exploiting a class of kernel bugs.
85813 +
85814 + The tradeoff is that some virtualization solutions may experience
85815 + a huge slowdown and therefore you should not enable this feature
85816 + for kernels meant to run in such environments. Whether a given VM
85817 + solution is affected or not is best determined by simply trying it
85818 + out, the performance impact will be obvious right on boot as this
85819 + mechanism engages from very early on. A good rule of thumb is that
85820 + VMs running on CPUs without hardware virtualization support (i.e.,
85821 + the majority of IA-32 CPUs) will likely experience the slowdown.
85822 +
85823 +config PAX_REFCOUNT
85824 + bool "Prevent various kernel object reference counter overflows"
85825 + depends on GRKERNSEC && (X86 || SPARC64)
85826 + help
85827 + By saying Y here the kernel will detect and prevent overflowing
85828 + various (but not all) kinds of object reference counters. Such
85829 + overflows can normally occur due to bugs only and are often, if
85830 + not always, exploitable.
85831 +
85832 + The tradeoff is that data structures protected by an overflowed
85833 + refcount will never be freed and therefore will leak memory. Note
85834 + that this leak also happens even without this protection but in
85835 + that case the overflow can eventually trigger the freeing of the
85836 + data structure while it is still being used elsewhere, resulting
85837 + in the exploitable situation that this feature prevents.
85838 +
85839 + Since this has a negligible performance impact, you should enable
85840 + this feature.
85841 +
85842 +config PAX_USERCOPY
85843 + bool "Harden heap object copies between kernel and userland"
85844 + depends on X86 || PPC || SPARC || ARM
85845 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
85846 + help
85847 + By saying Y here the kernel will enforce the size of heap objects
85848 + when they are copied in either direction between the kernel and
85849 + userland, even if only a part of the heap object is copied.
85850 +
85851 + Specifically, this checking prevents information leaking from the
85852 + kernel heap during kernel to userland copies (if the kernel heap
85853 + object is otherwise fully initialized) and prevents kernel heap
85854 + overflows during userland to kernel copies.
85855 +
85856 + Note that the current implementation provides the strictest bounds
85857 + checks for the SLUB allocator.
85858 +
85859 + Enabling this option also enables per-slab cache protection against
85860 + data in a given cache being copied into/out of via userland
85861 + accessors. Though the whitelist of regions will be reduced over
85862 + time, it notably protects important data structures like task structs.
85863 +
85864 +
85865 + If frame pointers are enabled on x86, this option will also
85866 + restrict copies into and out of the kernel stack to local variables
85867 + within a single frame.
85868 +
85869 + Since this has a negligible performance impact, you should enable
85870 + this feature.
85871 +
85872 +endmenu
85873 +
85874 +endmenu
85875 +
85876 config KEYS
85877 bool "Enable access key retention support"
85878 help
85879 @@ -146,7 +766,7 @@ config INTEL_TXT
85880 config LSM_MMAP_MIN_ADDR
85881 int "Low address space for LSM to protect from user allocation"
85882 depends on SECURITY && SECURITY_SELINUX
85883 - default 65536
85884 + default 32768
85885 help
85886 This is the portion of low virtual memory which should be protected
85887 from userspace allocation. Keeping a user from writing to low pages
85888 diff --git a/security/capability.c b/security/capability.c
85889 index fce07a7..5f12858 100644
85890 --- a/security/capability.c
85891 +++ b/security/capability.c
85892 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *lsmrule)
85893 }
85894 #endif /* CONFIG_AUDIT */
85895
85896 -struct security_operations default_security_ops = {
85897 +struct security_operations default_security_ops __read_only = {
85898 .name = "default",
85899 };
85900
85901 diff --git a/security/commoncap.c b/security/commoncap.c
85902 index fe30751..aaba312 100644
85903 --- a/security/commoncap.c
85904 +++ b/security/commoncap.c
85905 @@ -27,6 +27,8 @@
85906 #include <linux/sched.h>
85907 #include <linux/prctl.h>
85908 #include <linux/securebits.h>
85909 +#include <linux/syslog.h>
85910 +#include <net/sock.h>
85911
85912 /*
85913 * If a non-root user executes a setuid-root binary in
85914 @@ -50,9 +52,18 @@ static void warn_setuid_and_fcaps_mixed(char *fname)
85915 }
85916 }
85917
85918 +#ifdef CONFIG_NET
85919 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
85920 +#endif
85921 +
85922 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
85923 {
85924 +#ifdef CONFIG_NET
85925 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
85926 +#else
85927 NETLINK_CB(skb).eff_cap = current_cap();
85928 +#endif
85929 +
85930 return 0;
85931 }
85932
85933 @@ -582,6 +593,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
85934 {
85935 const struct cred *cred = current_cred();
85936
85937 + if (gr_acl_enable_at_secure())
85938 + return 1;
85939 +
85940 if (cred->uid != 0) {
85941 if (bprm->cap_effective)
85942 return 1;
85943 @@ -956,13 +970,18 @@ error:
85944 /**
85945 * cap_syslog - Determine whether syslog function is permitted
85946 * @type: Function requested
85947 + * @from_file: Whether this request came from an open file (i.e. /proc)
85948 *
85949 * Determine whether the current process is permitted to use a particular
85950 * syslog function, returning 0 if permission is granted, -ve if not.
85951 */
85952 -int cap_syslog(int type)
85953 +int cap_syslog(int type, bool from_file)
85954 {
85955 - if ((type != 3 && type != 10) && !capable(CAP_SYS_ADMIN))
85956 + /* /proc/kmsg can open be opened by CAP_SYS_ADMIN */
85957 + if (type != SYSLOG_ACTION_OPEN && from_file)
85958 + return 0;
85959 + if ((type != SYSLOG_ACTION_READ_ALL &&
85960 + type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
85961 return -EPERM;
85962 return 0;
85963 }
85964 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
85965 index 165eb53..b1db4eb 100644
85966 --- a/security/integrity/ima/ima.h
85967 +++ b/security/integrity/ima/ima.h
85968 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
85969 extern spinlock_t ima_queue_lock;
85970
85971 struct ima_h_table {
85972 - atomic_long_t len; /* number of stored measurements in the list */
85973 - atomic_long_t violations;
85974 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
85975 + atomic_long_unchecked_t violations;
85976 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
85977 };
85978 extern struct ima_h_table ima_htable;
85979 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
85980 index 852bf85..35d6df3 100644
85981 --- a/security/integrity/ima/ima_api.c
85982 +++ b/security/integrity/ima/ima_api.c
85983 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
85984 int result;
85985
85986 /* can overflow, only indicator */
85987 - atomic_long_inc(&ima_htable.violations);
85988 + atomic_long_inc_unchecked(&ima_htable.violations);
85989
85990 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
85991 if (!entry) {
85992 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
85993 index 0c72c9c..433e29b 100644
85994 --- a/security/integrity/ima/ima_fs.c
85995 +++ b/security/integrity/ima/ima_fs.c
85996 @@ -27,12 +27,12 @@
85997 static int valid_policy = 1;
85998 #define TMPBUFLEN 12
85999 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
86000 - loff_t *ppos, atomic_long_t *val)
86001 + loff_t *ppos, atomic_long_unchecked_t *val)
86002 {
86003 char tmpbuf[TMPBUFLEN];
86004 ssize_t len;
86005
86006 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
86007 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
86008 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
86009 }
86010
86011 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
86012 index e19316d..339f7ae 100644
86013 --- a/security/integrity/ima/ima_queue.c
86014 +++ b/security/integrity/ima/ima_queue.c
86015 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
86016 INIT_LIST_HEAD(&qe->later);
86017 list_add_tail_rcu(&qe->later, &ima_measurements);
86018
86019 - atomic_long_inc(&ima_htable.len);
86020 + atomic_long_inc_unchecked(&ima_htable.len);
86021 key = ima_hash_key(entry->digest);
86022 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
86023 return 0;
86024 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
86025 index e031952..c9a535d 100644
86026 --- a/security/keys/keyring.c
86027 +++ b/security/keys/keyring.c
86028 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
86029 ret = -EFAULT;
86030
86031 for (loop = 0; loop < klist->nkeys; loop++) {
86032 + key_serial_t serial;
86033 key = klist->keys[loop];
86034 + serial = key->serial;
86035
86036 tmp = sizeof(key_serial_t);
86037 if (tmp > buflen)
86038 tmp = buflen;
86039
86040 - if (copy_to_user(buffer,
86041 - &key->serial,
86042 - tmp) != 0)
86043 + if (copy_to_user(buffer, &serial, tmp))
86044 goto error;
86045
86046 buflen -= tmp;
86047 diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
86048 index 931cfda..e71808a 100644
86049 --- a/security/keys/process_keys.c
86050 +++ b/security/keys/process_keys.c
86051 @@ -208,7 +208,7 @@ static int install_process_keyring(void)
86052 ret = install_process_keyring_to_cred(new);
86053 if (ret < 0) {
86054 abort_creds(new);
86055 - return ret != -EEXIST ?: 0;
86056 + return ret != -EEXIST ? ret : 0;
86057 }
86058
86059 return commit_creds(new);
86060 diff --git a/security/min_addr.c b/security/min_addr.c
86061 index d9f9425..c28cef4 100644
86062 --- a/security/min_addr.c
86063 +++ b/security/min_addr.c
86064 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
86065 */
86066 static void update_mmap_min_addr(void)
86067 {
86068 +#ifndef SPARC
86069 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
86070 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
86071 mmap_min_addr = dac_mmap_min_addr;
86072 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
86073 #else
86074 mmap_min_addr = dac_mmap_min_addr;
86075 #endif
86076 +#endif
86077 }
86078
86079 /*
86080 diff --git a/security/root_plug.c b/security/root_plug.c
86081 index 2f7ffa6..0455400 100644
86082 --- a/security/root_plug.c
86083 +++ b/security/root_plug.c
86084 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security (struct linux_binprm *bprm)
86085 return 0;
86086 }
86087
86088 -static struct security_operations rootplug_security_ops = {
86089 +static struct security_operations rootplug_security_ops __read_only = {
86090 .bprm_check_security = rootplug_bprm_check_security,
86091 };
86092
86093 diff --git a/security/security.c b/security/security.c
86094 index c4c6732..7abf13b 100644
86095 --- a/security/security.c
86096 +++ b/security/security.c
86097 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1];
86098 extern struct security_operations default_security_ops;
86099 extern void security_fixup_ops(struct security_operations *ops);
86100
86101 -struct security_operations *security_ops; /* Initialized to NULL */
86102 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
86103
86104 static inline int verify(struct security_operations *ops)
86105 {
86106 @@ -106,7 +106,7 @@ int __init security_module_enable(struct security_operations *ops)
86107 * If there is already a security module registered with the kernel,
86108 * an error will be returned. Otherwise %0 is returned on success.
86109 */
86110 -int register_security(struct security_operations *ops)
86111 +int __init register_security(struct security_operations *ops)
86112 {
86113 if (verify(ops)) {
86114 printk(KERN_DEBUG "%s could not verify "
86115 @@ -199,9 +199,9 @@ int security_quota_on(struct dentry *dentry)
86116 return security_ops->quota_on(dentry);
86117 }
86118
86119 -int security_syslog(int type)
86120 +int security_syslog(int type, bool from_file)
86121 {
86122 - return security_ops->syslog(type);
86123 + return security_ops->syslog(type, from_file);
86124 }
86125
86126 int security_settime(struct timespec *ts, struct timezone *tz)
86127 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
86128 index a106754..ca3a589 100644
86129 --- a/security/selinux/hooks.c
86130 +++ b/security/selinux/hooks.c
86131 @@ -76,6 +76,7 @@
86132 #include <linux/selinux.h>
86133 #include <linux/mutex.h>
86134 #include <linux/posix-timers.h>
86135 +#include <linux/syslog.h>
86136
86137 #include "avc.h"
86138 #include "objsec.h"
86139 @@ -131,7 +132,7 @@ int selinux_enabled = 1;
86140 * Minimal support for a secondary security module,
86141 * just to allow the use of the capability module.
86142 */
86143 -static struct security_operations *secondary_ops;
86144 +static struct security_operations *secondary_ops __read_only;
86145
86146 /* Lists of inode and superblock security structures initialized
86147 before the policy was loaded. */
86148 @@ -2050,29 +2051,30 @@ static int selinux_quota_on(struct dentry *dentry)
86149 return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON);
86150 }
86151
86152 -static int selinux_syslog(int type)
86153 +static int selinux_syslog(int type, bool from_file)
86154 {
86155 int rc;
86156
86157 - rc = cap_syslog(type);
86158 + rc = cap_syslog(type, from_file);
86159 if (rc)
86160 return rc;
86161
86162 switch (type) {
86163 - case 3: /* Read last kernel messages */
86164 - case 10: /* Return size of the log buffer */
86165 + case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */
86166 + case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */
86167 rc = task_has_system(current, SYSTEM__SYSLOG_READ);
86168 break;
86169 - case 6: /* Disable logging to console */
86170 - case 7: /* Enable logging to console */
86171 - case 8: /* Set level of messages printed to console */
86172 + case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */
86173 + case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */
86174 + /* Set level of messages printed to console */
86175 + case SYSLOG_ACTION_CONSOLE_LEVEL:
86176 rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE);
86177 break;
86178 - case 0: /* Close log */
86179 - case 1: /* Open log */
86180 - case 2: /* Read from log */
86181 - case 4: /* Read/clear last kernel messages */
86182 - case 5: /* Clear ring buffer */
86183 + case SYSLOG_ACTION_CLOSE: /* Close log */
86184 + case SYSLOG_ACTION_OPEN: /* Open log */
86185 + case SYSLOG_ACTION_READ: /* Read from log */
86186 + case SYSLOG_ACTION_READ_CLEAR: /* Read/clear last kernel messages */
86187 + case SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
86188 default:
86189 rc = task_has_system(current, SYSTEM__SYSLOG_MOD);
86190 break;
86191 @@ -5457,7 +5459,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
86192
86193 #endif
86194
86195 -static struct security_operations selinux_ops = {
86196 +static struct security_operations selinux_ops __read_only = {
86197 .name = "selinux",
86198
86199 .ptrace_access_check = selinux_ptrace_access_check,
86200 @@ -5841,7 +5843,9 @@ int selinux_disable(void)
86201 avc_disable();
86202
86203 /* Reset security_ops to the secondary module, dummy or capability. */
86204 + pax_open_kernel();
86205 security_ops = secondary_ops;
86206 + pax_close_kernel();
86207
86208 /* Unregister netfilter hooks. */
86209 selinux_nf_ip_exit();
86210 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
86211 index 13128f9..c23c736 100644
86212 --- a/security/selinux/include/xfrm.h
86213 +++ b/security/selinux/include/xfrm.h
86214 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
86215
86216 static inline void selinux_xfrm_notify_policyload(void)
86217 {
86218 - atomic_inc(&flow_cache_genid);
86219 + atomic_inc_unchecked(&flow_cache_genid);
86220 }
86221 #else
86222 static inline int selinux_xfrm_enabled(void)
86223 diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
86224 index ff17820..d68084c 100644
86225 --- a/security/selinux/ss/services.c
86226 +++ b/security/selinux/ss/services.c
86227 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, size_t len)
86228 int rc = 0;
86229 struct policy_file file = { data, len }, *fp = &file;
86230
86231 + pax_track_stack();
86232 +
86233 if (!ss_initialized) {
86234 avtab_cache_init();
86235 if (policydb_read(&policydb, fp)) {
86236 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
86237 index c33b6bb..b51f19e 100644
86238 --- a/security/smack/smack_lsm.c
86239 +++ b/security/smack/smack_lsm.c
86240 @@ -157,12 +157,12 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
86241 *
86242 * Returns 0 on success, error code otherwise.
86243 */
86244 -static int smack_syslog(int type)
86245 +static int smack_syslog(int type, bool from_file)
86246 {
86247 int rc;
86248 char *sp = current_security();
86249
86250 - rc = cap_syslog(type);
86251 + rc = cap_syslog(type, from_file);
86252 if (rc != 0)
86253 return rc;
86254
86255 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
86256 return 0;
86257 }
86258
86259 -struct security_operations smack_ops = {
86260 +struct security_operations smack_ops __read_only = {
86261 .name = "smack",
86262
86263 .ptrace_access_check = smack_ptrace_access_check,
86264 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
86265 index 9548a09..9a5f384 100644
86266 --- a/security/tomoyo/tomoyo.c
86267 +++ b/security/tomoyo/tomoyo.c
86268 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct file *f, const struct cred *cred)
86269 * tomoyo_security_ops is a "struct security_operations" which is used for
86270 * registering TOMOYO.
86271 */
86272 -static struct security_operations tomoyo_security_ops = {
86273 +static struct security_operations tomoyo_security_ops __read_only = {
86274 .name = "tomoyo",
86275 .cred_alloc_blank = tomoyo_cred_alloc_blank,
86276 .cred_prepare = tomoyo_cred_prepare,
86277 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
86278 index 84bb07d..c2ab6b6 100644
86279 --- a/sound/aoa/codecs/onyx.c
86280 +++ b/sound/aoa/codecs/onyx.c
86281 @@ -53,7 +53,7 @@ struct onyx {
86282 spdif_locked:1,
86283 analog_locked:1,
86284 original_mute:2;
86285 - int open_count;
86286 + local_t open_count;
86287 struct codec_info *codec_info;
86288
86289 /* mutex serializes concurrent access to the device
86290 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_item *cii,
86291 struct onyx *onyx = cii->codec_data;
86292
86293 mutex_lock(&onyx->mutex);
86294 - onyx->open_count++;
86295 + local_inc(&onyx->open_count);
86296 mutex_unlock(&onyx->mutex);
86297
86298 return 0;
86299 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_item *cii,
86300 struct onyx *onyx = cii->codec_data;
86301
86302 mutex_lock(&onyx->mutex);
86303 - onyx->open_count--;
86304 - if (!onyx->open_count)
86305 + if (local_dec_and_test(&onyx->open_count))
86306 onyx->spdif_locked = onyx->analog_locked = 0;
86307 mutex_unlock(&onyx->mutex);
86308
86309 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
86310 index ffd2025..df062c9 100644
86311 --- a/sound/aoa/codecs/onyx.h
86312 +++ b/sound/aoa/codecs/onyx.h
86313 @@ -11,6 +11,7 @@
86314 #include <linux/i2c.h>
86315 #include <asm/pmac_low_i2c.h>
86316 #include <asm/prom.h>
86317 +#include <asm/local.h>
86318
86319 /* PCM3052 register definitions */
86320
86321 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
86322 index d9c9635..bc0a5a2 100644
86323 --- a/sound/core/oss/pcm_oss.c
86324 +++ b/sound/core/oss/pcm_oss.c
86325 @@ -1395,7 +1395,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
86326 }
86327 } else {
86328 tmp = snd_pcm_oss_write2(substream,
86329 - (const char __force *)buf,
86330 + (const char __force_kernel *)buf,
86331 runtime->oss.period_bytes, 0);
86332 if (tmp <= 0)
86333 goto err;
86334 @@ -1483,7 +1483,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
86335 xfer += tmp;
86336 runtime->oss.buffer_used -= tmp;
86337 } else {
86338 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
86339 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
86340 runtime->oss.period_bytes, 0);
86341 if (tmp <= 0)
86342 goto err;
86343 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
86344 index 038232d..7dd9e5c 100644
86345 --- a/sound/core/pcm_compat.c
86346 +++ b/sound/core/pcm_compat.c
86347 @@ -30,7 +30,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
86348 int err;
86349
86350 fs = snd_enter_user();
86351 - err = snd_pcm_delay(substream, &delay);
86352 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
86353 snd_leave_user(fs);
86354 if (err < 0)
86355 return err;
86356 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
86357 index e6d2d97..4843949 100644
86358 --- a/sound/core/pcm_native.c
86359 +++ b/sound/core/pcm_native.c
86360 @@ -2747,11 +2747,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
86361 switch (substream->stream) {
86362 case SNDRV_PCM_STREAM_PLAYBACK:
86363 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
86364 - (void __user *)arg);
86365 + (void __force_user *)arg);
86366 break;
86367 case SNDRV_PCM_STREAM_CAPTURE:
86368 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
86369 - (void __user *)arg);
86370 + (void __force_user *)arg);
86371 break;
86372 default:
86373 result = -EINVAL;
86374 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
86375 index 1f99767..14636533 100644
86376 --- a/sound/core/seq/seq_device.c
86377 +++ b/sound/core/seq/seq_device.c
86378 @@ -63,7 +63,7 @@ struct ops_list {
86379 int argsize; /* argument size */
86380
86381 /* operators */
86382 - struct snd_seq_dev_ops ops;
86383 + struct snd_seq_dev_ops *ops;
86384
86385 /* registred devices */
86386 struct list_head dev_list; /* list of devices */
86387 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
86388
86389 mutex_lock(&ops->reg_mutex);
86390 /* copy driver operators */
86391 - ops->ops = *entry;
86392 + ops->ops = entry;
86393 ops->driver |= DRIVER_LOADED;
86394 ops->argsize = argsize;
86395
86396 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
86397 dev->name, ops->id, ops->argsize, dev->argsize);
86398 return -EINVAL;
86399 }
86400 - if (ops->ops.init_device(dev) >= 0) {
86401 + if (ops->ops->init_device(dev) >= 0) {
86402 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
86403 ops->num_init_devices++;
86404 } else {
86405 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
86406 dev->name, ops->id, ops->argsize, dev->argsize);
86407 return -EINVAL;
86408 }
86409 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
86410 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
86411 dev->status = SNDRV_SEQ_DEVICE_FREE;
86412 dev->driver_data = NULL;
86413 ops->num_init_devices--;
86414 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
86415 index 9284829..ac8e8b2 100644
86416 --- a/sound/drivers/mts64.c
86417 +++ b/sound/drivers/mts64.c
86418 @@ -27,6 +27,7 @@
86419 #include <sound/initval.h>
86420 #include <sound/rawmidi.h>
86421 #include <sound/control.h>
86422 +#include <asm/local.h>
86423
86424 #define CARD_NAME "Miditerminal 4140"
86425 #define DRIVER_NAME "MTS64"
86426 @@ -65,7 +66,7 @@ struct mts64 {
86427 struct pardevice *pardev;
86428 int pardev_claimed;
86429
86430 - int open_count;
86431 + local_t open_count;
86432 int current_midi_output_port;
86433 int current_midi_input_port;
86434 u8 mode[MTS64_NUM_INPUT_PORTS];
86435 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
86436 {
86437 struct mts64 *mts = substream->rmidi->private_data;
86438
86439 - if (mts->open_count == 0) {
86440 + if (local_read(&mts->open_count) == 0) {
86441 /* We don't need a spinlock here, because this is just called
86442 if the device has not been opened before.
86443 So there aren't any IRQs from the device */
86444 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
86445
86446 msleep(50);
86447 }
86448 - ++(mts->open_count);
86449 + local_inc(&mts->open_count);
86450
86451 return 0;
86452 }
86453 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
86454 struct mts64 *mts = substream->rmidi->private_data;
86455 unsigned long flags;
86456
86457 - --(mts->open_count);
86458 - if (mts->open_count == 0) {
86459 + if (local_dec_return(&mts->open_count) == 0) {
86460 /* We need the spinlock_irqsave here because we can still
86461 have IRQs at this point */
86462 spin_lock_irqsave(&mts->lock, flags);
86463 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
86464
86465 msleep(500);
86466
86467 - } else if (mts->open_count < 0)
86468 - mts->open_count = 0;
86469 + } else if (local_read(&mts->open_count) < 0)
86470 + local_set(&mts->open_count, 0);
86471
86472 return 0;
86473 }
86474 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
86475 index 01997f2..cbc1195 100644
86476 --- a/sound/drivers/opl4/opl4_lib.c
86477 +++ b/sound/drivers/opl4/opl4_lib.c
86478 @@ -27,7 +27,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
86479 MODULE_DESCRIPTION("OPL4 driver");
86480 MODULE_LICENSE("GPL");
86481
86482 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
86483 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
86484 {
86485 int timeout = 10;
86486 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
86487 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
86488 index 60158e2..0a0cc1a 100644
86489 --- a/sound/drivers/portman2x4.c
86490 +++ b/sound/drivers/portman2x4.c
86491 @@ -46,6 +46,7 @@
86492 #include <sound/initval.h>
86493 #include <sound/rawmidi.h>
86494 #include <sound/control.h>
86495 +#include <asm/local.h>
86496
86497 #define CARD_NAME "Portman 2x4"
86498 #define DRIVER_NAME "portman"
86499 @@ -83,7 +84,7 @@ struct portman {
86500 struct pardevice *pardev;
86501 int pardev_claimed;
86502
86503 - int open_count;
86504 + local_t open_count;
86505 int mode[PORTMAN_NUM_INPUT_PORTS];
86506 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
86507 };
86508 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
86509 index 02f79d2..8691d43 100644
86510 --- a/sound/isa/cmi8330.c
86511 +++ b/sound/isa/cmi8330.c
86512 @@ -173,7 +173,7 @@ struct snd_cmi8330 {
86513
86514 struct snd_pcm *pcm;
86515 struct snd_cmi8330_stream {
86516 - struct snd_pcm_ops ops;
86517 + snd_pcm_ops_no_const ops;
86518 snd_pcm_open_callback_t open;
86519 void *private_data; /* sb or wss */
86520 } streams[2];
86521 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
86522 index 733b014..56ce96f 100644
86523 --- a/sound/oss/sb_audio.c
86524 +++ b/sound/oss/sb_audio.c
86525 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
86526 buf16 = (signed short *)(localbuf + localoffs);
86527 while (c)
86528 {
86529 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
86530 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
86531 if (copy_from_user(lbuf8,
86532 userbuf+useroffs + p,
86533 locallen))
86534 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
86535 index 3136c88..28ad950 100644
86536 --- a/sound/oss/swarm_cs4297a.c
86537 +++ b/sound/oss/swarm_cs4297a.c
86538 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
86539 {
86540 struct cs4297a_state *s;
86541 u32 pwr, id;
86542 - mm_segment_t fs;
86543 int rval;
86544 #ifndef CONFIG_BCM_CS4297A_CSWARM
86545 u64 cfg;
86546 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
86547 if (!rval) {
86548 char *sb1250_duart_present;
86549
86550 +#if 0
86551 + mm_segment_t fs;
86552 fs = get_fs();
86553 set_fs(KERNEL_DS);
86554 -#if 0
86555 val = SOUND_MASK_LINE;
86556 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
86557 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
86558 val = initvol[i].vol;
86559 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
86560 }
86561 + set_fs(fs);
86562 // cs4297a_write_ac97(s, 0x18, 0x0808);
86563 #else
86564 // cs4297a_write_ac97(s, 0x5e, 0x180);
86565 cs4297a_write_ac97(s, 0x02, 0x0808);
86566 cs4297a_write_ac97(s, 0x18, 0x0808);
86567 #endif
86568 - set_fs(fs);
86569
86570 list_add(&s->list, &cs4297a_devs);
86571
86572 diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
86573 index 78288db..0406809 100644
86574 --- a/sound/pci/ac97/ac97_codec.c
86575 +++ b/sound/pci/ac97/ac97_codec.c
86576 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struct snd_device *device)
86577 }
86578
86579 /* build_ops to do nothing */
86580 -static struct snd_ac97_build_ops null_build_ops;
86581 +static const struct snd_ac97_build_ops null_build_ops;
86582
86583 #ifdef CONFIG_SND_AC97_POWER_SAVE
86584 static void do_update_power(struct work_struct *work)
86585 diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
86586 index eeb2e23..82bf625 100644
86587 --- a/sound/pci/ac97/ac97_patch.c
86588 +++ b/sound/pci/ac97/ac97_patch.c
86589 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spdif(struct snd_ac97 *ac97)
86590 return 0;
86591 }
86592
86593 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
86594 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
86595 .build_spdif = patch_yamaha_ymf743_build_spdif,
86596 .build_3d = patch_yamaha_ymf7x3_3d,
86597 };
86598 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdif(struct snd_ac97 * ac97)
86599 return 0;
86600 }
86601
86602 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
86603 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
86604 .build_3d = patch_yamaha_ymf7x3_3d,
86605 .build_post_spdif = patch_yamaha_ymf753_post_spdif
86606 };
86607 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific(struct snd_ac97 * ac97)
86608 return 0;
86609 }
86610
86611 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
86612 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
86613 .build_specific = patch_wolfson_wm9703_specific,
86614 };
86615
86616 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific(struct snd_ac97 * ac97)
86617 return 0;
86618 }
86619
86620 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
86621 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
86622 .build_specific = patch_wolfson_wm9704_specific,
86623 };
86624
86625 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific(struct snd_ac97 * ac97)
86626 return 0;
86627 }
86628
86629 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
86630 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
86631 .build_specific = patch_wolfson_wm9705_specific,
86632 };
86633
86634 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific(struct snd_ac97 * ac97)
86635 return 0;
86636 }
86637
86638 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
86639 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
86640 .build_specific = patch_wolfson_wm9711_specific,
86641 };
86642
86643 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume (struct snd_ac97 * ac97)
86644 }
86645 #endif
86646
86647 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
86648 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
86649 .build_specific = patch_wolfson_wm9713_specific,
86650 .build_3d = patch_wolfson_wm9713_3d,
86651 #ifdef CONFIG_PM
86652 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_specific(struct snd_ac97 * ac97)
86653 return 0;
86654 }
86655
86656 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
86657 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
86658 .build_3d = patch_sigmatel_stac9700_3d,
86659 .build_specific = patch_sigmatel_stac97xx_specific
86660 };
86661 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_specific(struct snd_ac97 *ac97)
86662 return patch_sigmatel_stac97xx_specific(ac97);
86663 }
86664
86665 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
86666 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
86667 .build_3d = patch_sigmatel_stac9708_3d,
86668 .build_specific = patch_sigmatel_stac9708_specific
86669 };
86670 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_specific(struct snd_ac97 *ac97)
86671 return 0;
86672 }
86673
86674 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
86675 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
86676 .build_3d = patch_sigmatel_stac9700_3d,
86677 .build_specific = patch_sigmatel_stac9758_specific
86678 };
86679 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(struct snd_ac97 * ac97)
86680 return 0;
86681 }
86682
86683 -static struct snd_ac97_build_ops patch_cirrus_ops = {
86684 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
86685 .build_spdif = patch_cirrus_build_spdif
86686 };
86687
86688 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(struct snd_ac97 * ac97)
86689 return 0;
86690 }
86691
86692 -static struct snd_ac97_build_ops patch_conexant_ops = {
86693 +static const struct snd_ac97_build_ops patch_conexant_ops = {
86694 .build_spdif = patch_conexant_build_spdif
86695 };
86696
86697 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct snd_ac97 * ac97, int unchained_idx, int
86698 }
86699 }
86700
86701 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
86702 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
86703 #ifdef CONFIG_PM
86704 .resume = ad18xx_resume
86705 #endif
86706 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct snd_ac97 * ac97)
86707 return 0;
86708 }
86709
86710 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
86711 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
86712 .build_specific = &patch_ad1885_specific,
86713 #ifdef CONFIG_PM
86714 .resume = ad18xx_resume
86715 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct snd_ac97 * ac97)
86716 return 0;
86717 }
86718
86719 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
86720 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
86721 .build_specific = &patch_ad1886_specific,
86722 #ifdef CONFIG_PM
86723 .resume = ad18xx_resume
86724 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct snd_ac97 * ac97)
86725 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
86726 }
86727
86728 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
86729 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
86730 .build_post_spdif = patch_ad198x_post_spdif,
86731 .build_specific = patch_ad1981a_specific,
86732 #ifdef CONFIG_PM
86733 @@ -1952,7 +1952,7 @@ static int patch_ad1981b_specific(struct snd_ac97 *ac97)
86734 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
86735 }
86736
86737 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
86738 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
86739 .build_post_spdif = patch_ad198x_post_spdif,
86740 .build_specific = patch_ad1981b_specific,
86741 #ifdef CONFIG_PM
86742 @@ -2091,7 +2091,7 @@ static int patch_ad1888_specific(struct snd_ac97 *ac97)
86743 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
86744 }
86745
86746 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
86747 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
86748 .build_post_spdif = patch_ad198x_post_spdif,
86749 .build_specific = patch_ad1888_specific,
86750 #ifdef CONFIG_PM
86751 @@ -2140,7 +2140,7 @@ static int patch_ad1980_specific(struct snd_ac97 *ac97)
86752 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
86753 }
86754
86755 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
86756 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
86757 .build_post_spdif = patch_ad198x_post_spdif,
86758 .build_specific = patch_ad1980_specific,
86759 #ifdef CONFIG_PM
86760 @@ -2255,7 +2255,7 @@ static int patch_ad1985_specific(struct snd_ac97 *ac97)
86761 ARRAY_SIZE(snd_ac97_ad1985_controls));
86762 }
86763
86764 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
86765 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
86766 .build_post_spdif = patch_ad198x_post_spdif,
86767 .build_specific = patch_ad1985_specific,
86768 #ifdef CONFIG_PM
86769 @@ -2547,7 +2547,7 @@ static int patch_ad1986_specific(struct snd_ac97 *ac97)
86770 ARRAY_SIZE(snd_ac97_ad1985_controls));
86771 }
86772
86773 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
86774 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
86775 .build_post_spdif = patch_ad198x_post_spdif,
86776 .build_specific = patch_ad1986_specific,
86777 #ifdef CONFIG_PM
86778 @@ -2652,7 +2652,7 @@ static int patch_alc650_specific(struct snd_ac97 * ac97)
86779 return 0;
86780 }
86781
86782 -static struct snd_ac97_build_ops patch_alc650_ops = {
86783 +static const struct snd_ac97_build_ops patch_alc650_ops = {
86784 .build_specific = patch_alc650_specific,
86785 .update_jacks = alc650_update_jacks
86786 };
86787 @@ -2804,7 +2804,7 @@ static int patch_alc655_specific(struct snd_ac97 * ac97)
86788 return 0;
86789 }
86790
86791 -static struct snd_ac97_build_ops patch_alc655_ops = {
86792 +static const struct snd_ac97_build_ops patch_alc655_ops = {
86793 .build_specific = patch_alc655_specific,
86794 .update_jacks = alc655_update_jacks
86795 };
86796 @@ -2916,7 +2916,7 @@ static int patch_alc850_specific(struct snd_ac97 *ac97)
86797 return 0;
86798 }
86799
86800 -static struct snd_ac97_build_ops patch_alc850_ops = {
86801 +static const struct snd_ac97_build_ops patch_alc850_ops = {
86802 .build_specific = patch_alc850_specific,
86803 .update_jacks = alc850_update_jacks
86804 };
86805 @@ -2978,7 +2978,7 @@ static int patch_cm9738_specific(struct snd_ac97 * ac97)
86806 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
86807 }
86808
86809 -static struct snd_ac97_build_ops patch_cm9738_ops = {
86810 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
86811 .build_specific = patch_cm9738_specific,
86812 .update_jacks = cm9738_update_jacks
86813 };
86814 @@ -3069,7 +3069,7 @@ static int patch_cm9739_post_spdif(struct snd_ac97 * ac97)
86815 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
86816 }
86817
86818 -static struct snd_ac97_build_ops patch_cm9739_ops = {
86819 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
86820 .build_specific = patch_cm9739_specific,
86821 .build_post_spdif = patch_cm9739_post_spdif,
86822 .update_jacks = cm9739_update_jacks
86823 @@ -3243,7 +3243,7 @@ static int patch_cm9761_specific(struct snd_ac97 * ac97)
86824 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
86825 }
86826
86827 -static struct snd_ac97_build_ops patch_cm9761_ops = {
86828 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
86829 .build_specific = patch_cm9761_specific,
86830 .build_post_spdif = patch_cm9761_post_spdif,
86831 .update_jacks = cm9761_update_jacks
86832 @@ -3339,7 +3339,7 @@ static int patch_cm9780_specific(struct snd_ac97 *ac97)
86833 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
86834 }
86835
86836 -static struct snd_ac97_build_ops patch_cm9780_ops = {
86837 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
86838 .build_specific = patch_cm9780_specific,
86839 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
86840 };
86841 @@ -3459,7 +3459,7 @@ static int patch_vt1616_specific(struct snd_ac97 * ac97)
86842 return 0;
86843 }
86844
86845 -static struct snd_ac97_build_ops patch_vt1616_ops = {
86846 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
86847 .build_specific = patch_vt1616_specific
86848 };
86849
86850 @@ -3813,7 +3813,7 @@ static int patch_it2646_specific(struct snd_ac97 * ac97)
86851 return 0;
86852 }
86853
86854 -static struct snd_ac97_build_ops patch_it2646_ops = {
86855 +static const struct snd_ac97_build_ops patch_it2646_ops = {
86856 .build_specific = patch_it2646_specific,
86857 .update_jacks = it2646_update_jacks
86858 };
86859 @@ -3847,7 +3847,7 @@ static int patch_si3036_specific(struct snd_ac97 * ac97)
86860 return 0;
86861 }
86862
86863 -static struct snd_ac97_build_ops patch_si3036_ops = {
86864 +static const struct snd_ac97_build_ops patch_si3036_ops = {
86865 .build_specific = patch_si3036_specific,
86866 };
86867
86868 @@ -3914,7 +3914,7 @@ static int patch_ucb1400_specific(struct snd_ac97 * ac97)
86869 return 0;
86870 }
86871
86872 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
86873 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
86874 .build_specific = patch_ucb1400_specific,
86875 };
86876
86877 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
86878 index 99552fb..4dcc2c5 100644
86879 --- a/sound/pci/hda/hda_codec.h
86880 +++ b/sound/pci/hda/hda_codec.h
86881 @@ -580,7 +580,7 @@ struct hda_bus_ops {
86882 /* notify power-up/down from codec to controller */
86883 void (*pm_notify)(struct hda_bus *bus);
86884 #endif
86885 -};
86886 +} __no_const;
86887
86888 /* template to pass to the bus constructor */
86889 struct hda_bus_template {
86890 @@ -675,6 +675,7 @@ struct hda_codec_ops {
86891 int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid);
86892 #endif
86893 };
86894 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
86895
86896 /* record for amp information cache */
86897 struct hda_cache_head {
86898 @@ -705,7 +706,7 @@ struct hda_pcm_ops {
86899 struct snd_pcm_substream *substream);
86900 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
86901 struct snd_pcm_substream *substream);
86902 -};
86903 +} __no_const;
86904
86905 /* PCM information for each substream */
86906 struct hda_pcm_stream {
86907 @@ -760,7 +761,7 @@ struct hda_codec {
86908 const char *modelname; /* model name for preset */
86909
86910 /* set by patch */
86911 - struct hda_codec_ops patch_ops;
86912 + hda_codec_ops_no_const patch_ops;
86913
86914 /* PCM to create, set by patch_ops.build_pcms callback */
86915 unsigned int num_pcms;
86916 diff --git a/sound/pci/hda/patch_atihdmi.c b/sound/pci/hda/patch_atihdmi.c
86917 index fb684f0..2b11cea 100644
86918 --- a/sound/pci/hda/patch_atihdmi.c
86919 +++ b/sound/pci/hda/patch_atihdmi.c
86920 @@ -177,7 +177,7 @@ static int patch_atihdmi(struct hda_codec *codec)
86921 */
86922 spec->multiout.dig_out_nid = CVT_NID;
86923
86924 - codec->patch_ops = atihdmi_patch_ops;
86925 + memcpy((void *)&codec->patch_ops, &atihdmi_patch_ops, sizeof(atihdmi_patch_ops));
86926
86927 return 0;
86928 }
86929 diff --git a/sound/pci/hda/patch_intelhdmi.c b/sound/pci/hda/patch_intelhdmi.c
86930 index 7c23016..c5bfdd7 100644
86931 --- a/sound/pci/hda/patch_intelhdmi.c
86932 +++ b/sound/pci/hda/patch_intelhdmi.c
86933 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
86934 cp_ready);
86935
86936 /* TODO */
86937 - if (cp_state)
86938 - ;
86939 - if (cp_ready)
86940 - ;
86941 + if (cp_state) {
86942 + }
86943 + if (cp_ready) {
86944 + }
86945 }
86946
86947
86948 @@ -656,7 +656,7 @@ static int do_patch_intel_hdmi(struct hda_codec *codec)
86949 spec->multiout.dig_out_nid = cvt_nid;
86950
86951 codec->spec = spec;
86952 - codec->patch_ops = intel_hdmi_patch_ops;
86953 + memcpy((void *)&codec->patch_ops, &intel_hdmi_patch_ops, sizeof(intel_hdmi_patch_ops));
86954
86955 snd_hda_eld_proc_new(codec, &spec->sink_eld);
86956
86957 diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
86958 index 6afdab0..68ed352 100644
86959 --- a/sound/pci/hda/patch_nvhdmi.c
86960 +++ b/sound/pci/hda/patch_nvhdmi.c
86961 @@ -367,7 +367,7 @@ static int patch_nvhdmi_8ch(struct hda_codec *codec)
86962 spec->multiout.max_channels = 8;
86963 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
86964
86965 - codec->patch_ops = nvhdmi_patch_ops_8ch;
86966 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_8ch, sizeof(nvhdmi_patch_ops_8ch));
86967
86968 return 0;
86969 }
86970 @@ -386,7 +386,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
86971 spec->multiout.max_channels = 2;
86972 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
86973
86974 - codec->patch_ops = nvhdmi_patch_ops_2ch;
86975 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_2ch, sizeof(nvhdmi_patch_ops_2ch));
86976
86977 return 0;
86978 }
86979 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
86980 index 2fcd70d..a143eaf 100644
86981 --- a/sound/pci/hda/patch_sigmatel.c
86982 +++ b/sound/pci/hda/patch_sigmatel.c
86983 @@ -5220,7 +5220,7 @@ again:
86984 snd_hda_codec_write_cache(codec, nid, 0,
86985 AC_VERB_SET_CONNECT_SEL, num_dacs);
86986
86987 - codec->patch_ops = stac92xx_patch_ops;
86988 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
86989
86990 codec->proc_widget_hook = stac92hd_proc_hook;
86991
86992 @@ -5294,7 +5294,7 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
86993 return -ENOMEM;
86994
86995 codec->spec = spec;
86996 - codec->patch_ops = stac92xx_patch_ops;
86997 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
86998 spec->num_pins = STAC92HD71BXX_NUM_PINS;
86999 switch (codec->vendor_id) {
87000 case 0x111d76b6:
87001 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
87002 index d063149..01599a4 100644
87003 --- a/sound/pci/ice1712/ice1712.h
87004 +++ b/sound/pci/ice1712/ice1712.h
87005 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
87006 unsigned int mask_flags; /* total mask bits */
87007 struct snd_akm4xxx_ops {
87008 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
87009 - } ops;
87010 + } __no_const ops;
87011 };
87012
87013 struct snd_ice1712_spdif {
87014 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
87015 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
87016 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
87017 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
87018 - } ops;
87019 + } __no_const ops;
87020 };
87021
87022
87023 diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
87024 index 9e7d12e..3e3bc64 100644
87025 --- a/sound/pci/intel8x0m.c
87026 +++ b/sound/pci/intel8x0m.c
87027 @@ -1264,7 +1264,7 @@ static struct shortname_table {
87028 { 0x5455, "ALi M5455" },
87029 { 0x746d, "AMD AMD8111" },
87030 #endif
87031 - { 0 },
87032 + { 0, },
87033 };
87034
87035 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
87036 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
87037 index 5518371..45cf7ac 100644
87038 --- a/sound/pci/ymfpci/ymfpci_main.c
87039 +++ b/sound/pci/ymfpci/ymfpci_main.c
87040 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
87041 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
87042 break;
87043 }
87044 - if (atomic_read(&chip->interrupt_sleep_count)) {
87045 - atomic_set(&chip->interrupt_sleep_count, 0);
87046 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
87047 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
87048 wake_up(&chip->interrupt_sleep);
87049 }
87050 __end:
87051 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
87052 continue;
87053 init_waitqueue_entry(&wait, current);
87054 add_wait_queue(&chip->interrupt_sleep, &wait);
87055 - atomic_inc(&chip->interrupt_sleep_count);
87056 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
87057 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
87058 remove_wait_queue(&chip->interrupt_sleep, &wait);
87059 }
87060 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
87061 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
87062 spin_unlock(&chip->reg_lock);
87063
87064 - if (atomic_read(&chip->interrupt_sleep_count)) {
87065 - atomic_set(&chip->interrupt_sleep_count, 0);
87066 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
87067 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
87068 wake_up(&chip->interrupt_sleep);
87069 }
87070 }
87071 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
87072 spin_lock_init(&chip->reg_lock);
87073 spin_lock_init(&chip->voice_lock);
87074 init_waitqueue_head(&chip->interrupt_sleep);
87075 - atomic_set(&chip->interrupt_sleep_count, 0);
87076 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
87077 chip->card = card;
87078 chip->pci = pci;
87079 chip->irq = -1;
87080 diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
87081 index 0a1b2f6..776bb19 100644
87082 --- a/sound/soc/soc-core.c
87083 +++ b/sound/soc/soc-core.c
87084 @@ -609,7 +609,7 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
87085 }
87086
87087 /* ASoC PCM operations */
87088 -static struct snd_pcm_ops soc_pcm_ops = {
87089 +static snd_pcm_ops_no_const soc_pcm_ops = {
87090 .open = soc_pcm_open,
87091 .close = soc_codec_close,
87092 .hw_params = soc_pcm_hw_params,
87093 diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
87094 index 79633ea..9732e90 100644
87095 --- a/sound/usb/usbaudio.c
87096 +++ b/sound/usb/usbaudio.c
87097 @@ -963,12 +963,12 @@ static int snd_usb_pcm_playback_trigger(struct snd_pcm_substream *substream,
87098 switch (cmd) {
87099 case SNDRV_PCM_TRIGGER_START:
87100 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
87101 - subs->ops.prepare = prepare_playback_urb;
87102 + *(void **)&subs->ops.prepare = prepare_playback_urb;
87103 return 0;
87104 case SNDRV_PCM_TRIGGER_STOP:
87105 return deactivate_urbs(subs, 0, 0);
87106 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
87107 - subs->ops.prepare = prepare_nodata_playback_urb;
87108 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
87109 return 0;
87110 default:
87111 return -EINVAL;
87112 @@ -985,15 +985,15 @@ static int snd_usb_pcm_capture_trigger(struct snd_pcm_substream *substream,
87113
87114 switch (cmd) {
87115 case SNDRV_PCM_TRIGGER_START:
87116 - subs->ops.retire = retire_capture_urb;
87117 + *(void **)&subs->ops.retire = retire_capture_urb;
87118 return start_urbs(subs, substream->runtime);
87119 case SNDRV_PCM_TRIGGER_STOP:
87120 return deactivate_urbs(subs, 0, 0);
87121 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
87122 - subs->ops.retire = retire_paused_capture_urb;
87123 + *(void **)&subs->ops.retire = retire_paused_capture_urb;
87124 return 0;
87125 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
87126 - subs->ops.retire = retire_capture_urb;
87127 + *(void **)&subs->ops.retire = retire_capture_urb;
87128 return 0;
87129 default:
87130 return -EINVAL;
87131 @@ -1542,7 +1542,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
87132 /* for playback, submit the URBs now; otherwise, the first hwptr_done
87133 * updates for all URBs would happen at the same time when starting */
87134 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
87135 - subs->ops.prepare = prepare_nodata_playback_urb;
87136 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
87137 return start_urbs(subs, runtime);
87138 } else
87139 return 0;
87140 @@ -2228,14 +2228,14 @@ static void init_substream(struct snd_usb_stream *as, int stream, struct audiofo
87141 subs->direction = stream;
87142 subs->dev = as->chip->dev;
87143 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
87144 - subs->ops = audio_urb_ops[stream];
87145 + memcpy((void *)&subs->ops, &audio_urb_ops[stream], sizeof(subs->ops));
87146 } else {
87147 - subs->ops = audio_urb_ops_high_speed[stream];
87148 + memcpy((void *)&subs->ops, &audio_urb_ops_high_speed[stream], sizeof(subs->ops));
87149 switch (as->chip->usb_id) {
87150 case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
87151 case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
87152 case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
87153 - subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
87154 + *(void **)&subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
87155 break;
87156 }
87157 }
87158 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
87159 new file mode 100644
87160 index 0000000..469b06a
87161 --- /dev/null
87162 +++ b/tools/gcc/Makefile
87163 @@ -0,0 +1,21 @@
87164 +#CC := gcc
87165 +#PLUGIN_SOURCE_FILES := pax_plugin.c
87166 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
87167 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
87168 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
87169 +
87170 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
87171 +
87172 +hostlibs-y := constify_plugin.so
87173 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
87174 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
87175 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
87176 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
87177 +
87178 +always := $(hostlibs-y)
87179 +
87180 +constify_plugin-objs := constify_plugin.o
87181 +stackleak_plugin-objs := stackleak_plugin.o
87182 +kallocstat_plugin-objs := kallocstat_plugin.o
87183 +kernexec_plugin-objs := kernexec_plugin.o
87184 +checker_plugin-objs := checker_plugin.o
87185 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
87186 new file mode 100644
87187 index 0000000..d41b5af
87188 --- /dev/null
87189 +++ b/tools/gcc/checker_plugin.c
87190 @@ -0,0 +1,171 @@
87191 +/*
87192 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
87193 + * Licensed under the GPL v2
87194 + *
87195 + * Note: the choice of the license means that the compilation process is
87196 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
87197 + * but for the kernel it doesn't matter since it doesn't link against
87198 + * any of the gcc libraries
87199 + *
87200 + * gcc plugin to implement various sparse (source code checker) features
87201 + *
87202 + * TODO:
87203 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
87204 + *
87205 + * BUGS:
87206 + * - none known
87207 + */
87208 +#include "gcc-plugin.h"
87209 +#include "config.h"
87210 +#include "system.h"
87211 +#include "coretypes.h"
87212 +#include "tree.h"
87213 +#include "tree-pass.h"
87214 +#include "flags.h"
87215 +#include "intl.h"
87216 +#include "toplev.h"
87217 +#include "plugin.h"
87218 +//#include "expr.h" where are you...
87219 +#include "diagnostic.h"
87220 +#include "plugin-version.h"
87221 +#include "tm.h"
87222 +#include "function.h"
87223 +#include "basic-block.h"
87224 +#include "gimple.h"
87225 +#include "rtl.h"
87226 +#include "emit-rtl.h"
87227 +#include "tree-flow.h"
87228 +#include "target.h"
87229 +
87230 +extern void c_register_addr_space (const char *str, addr_space_t as);
87231 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
87232 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
87233 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
87234 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
87235 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
87236 +
87237 +extern void print_gimple_stmt(FILE *, gimple, int, int);
87238 +extern rtx emit_move_insn(rtx x, rtx y);
87239 +
87240 +int plugin_is_GPL_compatible;
87241 +
87242 +static struct plugin_info checker_plugin_info = {
87243 + .version = "201111150100",
87244 +};
87245 +
87246 +#define ADDR_SPACE_KERNEL 0
87247 +#define ADDR_SPACE_FORCE_KERNEL 1
87248 +#define ADDR_SPACE_USER 2
87249 +#define ADDR_SPACE_FORCE_USER 3
87250 +#define ADDR_SPACE_IOMEM 0
87251 +#define ADDR_SPACE_FORCE_IOMEM 0
87252 +#define ADDR_SPACE_PERCPU 0
87253 +#define ADDR_SPACE_FORCE_PERCPU 0
87254 +#define ADDR_SPACE_RCU 0
87255 +#define ADDR_SPACE_FORCE_RCU 0
87256 +
87257 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
87258 +{
87259 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
87260 +}
87261 +
87262 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
87263 +{
87264 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
87265 +}
87266 +
87267 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
87268 +{
87269 + return default_addr_space_valid_pointer_mode(mode, as);
87270 +}
87271 +
87272 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
87273 +{
87274 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
87275 +}
87276 +
87277 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
87278 +{
87279 + return default_addr_space_legitimize_address(x, oldx, mode, as);
87280 +}
87281 +
87282 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
87283 +{
87284 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
87285 + return true;
87286 +
87287 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
87288 + return true;
87289 +
87290 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
87291 + return true;
87292 +
87293 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
87294 + return true;
87295 +
87296 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
87297 + return true;
87298 +
87299 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
87300 + return true;
87301 +
87302 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
87303 + return true;
87304 +
87305 + return subset == superset;
87306 +}
87307 +
87308 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
87309 +{
87310 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
87311 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
87312 +
87313 + return op;
87314 +}
87315 +
87316 +static void register_checker_address_spaces(void *event_data, void *data)
87317 +{
87318 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
87319 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
87320 + c_register_addr_space("__user", ADDR_SPACE_USER);
87321 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
87322 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
87323 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
87324 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
87325 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
87326 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
87327 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
87328 +
87329 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
87330 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
87331 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
87332 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
87333 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
87334 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
87335 + targetm.addr_space.convert = checker_addr_space_convert;
87336 +}
87337 +
87338 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87339 +{
87340 + const char * const plugin_name = plugin_info->base_name;
87341 + const int argc = plugin_info->argc;
87342 + const struct plugin_argument * const argv = plugin_info->argv;
87343 + int i;
87344 +
87345 + if (!plugin_default_version_check(version, &gcc_version)) {
87346 + error(G_("incompatible gcc/plugin versions"));
87347 + return 1;
87348 + }
87349 +
87350 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
87351 +
87352 + for (i = 0; i < argc; ++i)
87353 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87354 +
87355 + if (TARGET_64BIT == 0)
87356 + return 0;
87357 +
87358 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
87359 +
87360 + return 0;
87361 +}
87362 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
87363 new file mode 100644
87364 index 0000000..704a564
87365 --- /dev/null
87366 +++ b/tools/gcc/constify_plugin.c
87367 @@ -0,0 +1,303 @@
87368 +/*
87369 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
87370 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
87371 + * Licensed under the GPL v2, or (at your option) v3
87372 + *
87373 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
87374 + *
87375 + * Homepage:
87376 + * http://www.grsecurity.net/~ephox/const_plugin/
87377 + *
87378 + * Usage:
87379 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
87380 + * $ gcc -fplugin=constify_plugin.so test.c -O2
87381 + */
87382 +
87383 +#include "gcc-plugin.h"
87384 +#include "config.h"
87385 +#include "system.h"
87386 +#include "coretypes.h"
87387 +#include "tree.h"
87388 +#include "tree-pass.h"
87389 +#include "flags.h"
87390 +#include "intl.h"
87391 +#include "toplev.h"
87392 +#include "plugin.h"
87393 +#include "diagnostic.h"
87394 +#include "plugin-version.h"
87395 +#include "tm.h"
87396 +#include "function.h"
87397 +#include "basic-block.h"
87398 +#include "gimple.h"
87399 +#include "rtl.h"
87400 +#include "emit-rtl.h"
87401 +#include "tree-flow.h"
87402 +
87403 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
87404 +
87405 +int plugin_is_GPL_compatible;
87406 +
87407 +static struct plugin_info const_plugin_info = {
87408 + .version = "201111150100",
87409 + .help = "no-constify\tturn off constification\n",
87410 +};
87411 +
87412 +static void constify_type(tree type);
87413 +static bool walk_struct(tree node);
87414 +
87415 +static tree deconstify_type(tree old_type)
87416 +{
87417 + tree new_type, field;
87418 +
87419 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
87420 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
87421 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
87422 + DECL_FIELD_CONTEXT(field) = new_type;
87423 + TYPE_READONLY(new_type) = 0;
87424 + C_TYPE_FIELDS_READONLY(new_type) = 0;
87425 + return new_type;
87426 +}
87427 +
87428 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
87429 +{
87430 + tree type;
87431 +
87432 + *no_add_attrs = true;
87433 + if (TREE_CODE(*node) == FUNCTION_DECL) {
87434 + error("%qE attribute does not apply to functions", name);
87435 + return NULL_TREE;
87436 + }
87437 +
87438 + if (TREE_CODE(*node) == VAR_DECL) {
87439 + error("%qE attribute does not apply to variables", name);
87440 + return NULL_TREE;
87441 + }
87442 +
87443 + if (TYPE_P(*node)) {
87444 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
87445 + *no_add_attrs = false;
87446 + else
87447 + error("%qE attribute applies to struct and union types only", name);
87448 + return NULL_TREE;
87449 + }
87450 +
87451 + type = TREE_TYPE(*node);
87452 +
87453 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
87454 + error("%qE attribute applies to struct and union types only", name);
87455 + return NULL_TREE;
87456 + }
87457 +
87458 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
87459 + error("%qE attribute is already applied to the type", name);
87460 + return NULL_TREE;
87461 + }
87462 +
87463 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
87464 + error("%qE attribute used on type that is not constified", name);
87465 + return NULL_TREE;
87466 + }
87467 +
87468 + if (TREE_CODE(*node) == TYPE_DECL) {
87469 + TREE_TYPE(*node) = deconstify_type(type);
87470 + TREE_READONLY(*node) = 0;
87471 + return NULL_TREE;
87472 + }
87473 +
87474 + return NULL_TREE;
87475 +}
87476 +
87477 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
87478 +{
87479 + *no_add_attrs = true;
87480 + if (!TYPE_P(*node)) {
87481 + error("%qE attribute applies to types only", name);
87482 + return NULL_TREE;
87483 + }
87484 +
87485 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
87486 + error("%qE attribute applies to struct and union types only", name);
87487 + return NULL_TREE;
87488 + }
87489 +
87490 + *no_add_attrs = false;
87491 + constify_type(*node);
87492 + return NULL_TREE;
87493 +}
87494 +
87495 +static struct attribute_spec no_const_attr = {
87496 + .name = "no_const",
87497 + .min_length = 0,
87498 + .max_length = 0,
87499 + .decl_required = false,
87500 + .type_required = false,
87501 + .function_type_required = false,
87502 + .handler = handle_no_const_attribute,
87503 +#if BUILDING_GCC_VERSION >= 4007
87504 + .affects_type_identity = true
87505 +#endif
87506 +};
87507 +
87508 +static struct attribute_spec do_const_attr = {
87509 + .name = "do_const",
87510 + .min_length = 0,
87511 + .max_length = 0,
87512 + .decl_required = false,
87513 + .type_required = false,
87514 + .function_type_required = false,
87515 + .handler = handle_do_const_attribute,
87516 +#if BUILDING_GCC_VERSION >= 4007
87517 + .affects_type_identity = true
87518 +#endif
87519 +};
87520 +
87521 +static void register_attributes(void *event_data, void *data)
87522 +{
87523 + register_attribute(&no_const_attr);
87524 + register_attribute(&do_const_attr);
87525 +}
87526 +
87527 +static void constify_type(tree type)
87528 +{
87529 + TYPE_READONLY(type) = 1;
87530 + C_TYPE_FIELDS_READONLY(type) = 1;
87531 +}
87532 +
87533 +static bool is_fptr(tree field)
87534 +{
87535 + tree ptr = TREE_TYPE(field);
87536 +
87537 + if (TREE_CODE(ptr) != POINTER_TYPE)
87538 + return false;
87539 +
87540 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
87541 +}
87542 +
87543 +static bool walk_struct(tree node)
87544 +{
87545 + tree field;
87546 +
87547 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
87548 + return false;
87549 +
87550 + if (TYPE_FIELDS(node) == NULL_TREE)
87551 + return false;
87552 +
87553 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
87554 + tree type = TREE_TYPE(field);
87555 + enum tree_code code = TREE_CODE(type);
87556 + if (code == RECORD_TYPE || code == UNION_TYPE) {
87557 + if (!(walk_struct(type)))
87558 + return false;
87559 + } else if (!is_fptr(field) && !TREE_READONLY(field))
87560 + return false;
87561 + }
87562 + return true;
87563 +}
87564 +
87565 +static void finish_type(void *event_data, void *data)
87566 +{
87567 + tree type = (tree)event_data;
87568 +
87569 + if (type == NULL_TREE)
87570 + return;
87571 +
87572 + if (TYPE_READONLY(type))
87573 + return;
87574 +
87575 + if (walk_struct(type))
87576 + constify_type(type);
87577 +}
87578 +
87579 +static unsigned int check_local_variables(void);
87580 +
87581 +struct gimple_opt_pass pass_local_variable = {
87582 + {
87583 + .type = GIMPLE_PASS,
87584 + .name = "check_local_variables",
87585 + .gate = NULL,
87586 + .execute = check_local_variables,
87587 + .sub = NULL,
87588 + .next = NULL,
87589 + .static_pass_number = 0,
87590 + .tv_id = TV_NONE,
87591 + .properties_required = 0,
87592 + .properties_provided = 0,
87593 + .properties_destroyed = 0,
87594 + .todo_flags_start = 0,
87595 + .todo_flags_finish = 0
87596 + }
87597 +};
87598 +
87599 +static unsigned int check_local_variables(void)
87600 +{
87601 + tree var;
87602 + referenced_var_iterator rvi;
87603 +
87604 +#if BUILDING_GCC_VERSION == 4005
87605 + FOR_EACH_REFERENCED_VAR(var, rvi) {
87606 +#else
87607 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
87608 +#endif
87609 + tree type = TREE_TYPE(var);
87610 +
87611 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
87612 + continue;
87613 +
87614 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
87615 + continue;
87616 +
87617 + if (!TYPE_READONLY(type))
87618 + continue;
87619 +
87620 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
87621 +// continue;
87622 +
87623 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
87624 +// continue;
87625 +
87626 + if (walk_struct(type)) {
87627 + error("constified variable %qE cannot be local", var);
87628 + return 1;
87629 + }
87630 + }
87631 + return 0;
87632 +}
87633 +
87634 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87635 +{
87636 + const char * const plugin_name = plugin_info->base_name;
87637 + const int argc = plugin_info->argc;
87638 + const struct plugin_argument * const argv = plugin_info->argv;
87639 + int i;
87640 + bool constify = true;
87641 +
87642 + struct register_pass_info local_variable_pass_info = {
87643 + .pass = &pass_local_variable.pass,
87644 + .reference_pass_name = "*referenced_vars",
87645 + .ref_pass_instance_number = 0,
87646 + .pos_op = PASS_POS_INSERT_AFTER
87647 + };
87648 +
87649 + if (!plugin_default_version_check(version, &gcc_version)) {
87650 + error(G_("incompatible gcc/plugin versions"));
87651 + return 1;
87652 + }
87653 +
87654 + for (i = 0; i < argc; ++i) {
87655 + if (!(strcmp(argv[i].key, "no-constify"))) {
87656 + constify = false;
87657 + continue;
87658 + }
87659 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87660 + }
87661 +
87662 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
87663 + if (constify) {
87664 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
87665 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
87666 + }
87667 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
87668 +
87669 + return 0;
87670 +}
87671 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
87672 new file mode 100644
87673 index 0000000..a5eabce
87674 --- /dev/null
87675 +++ b/tools/gcc/kallocstat_plugin.c
87676 @@ -0,0 +1,167 @@
87677 +/*
87678 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
87679 + * Licensed under the GPL v2
87680 + *
87681 + * Note: the choice of the license means that the compilation process is
87682 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
87683 + * but for the kernel it doesn't matter since it doesn't link against
87684 + * any of the gcc libraries
87685 + *
87686 + * gcc plugin to find the distribution of k*alloc sizes
87687 + *
87688 + * TODO:
87689 + *
87690 + * BUGS:
87691 + * - none known
87692 + */
87693 +#include "gcc-plugin.h"
87694 +#include "config.h"
87695 +#include "system.h"
87696 +#include "coretypes.h"
87697 +#include "tree.h"
87698 +#include "tree-pass.h"
87699 +#include "flags.h"
87700 +#include "intl.h"
87701 +#include "toplev.h"
87702 +#include "plugin.h"
87703 +//#include "expr.h" where are you...
87704 +#include "diagnostic.h"
87705 +#include "plugin-version.h"
87706 +#include "tm.h"
87707 +#include "function.h"
87708 +#include "basic-block.h"
87709 +#include "gimple.h"
87710 +#include "rtl.h"
87711 +#include "emit-rtl.h"
87712 +
87713 +extern void print_gimple_stmt(FILE *, gimple, int, int);
87714 +
87715 +int plugin_is_GPL_compatible;
87716 +
87717 +static const char * const kalloc_functions[] = {
87718 + "__kmalloc",
87719 + "kmalloc",
87720 + "kmalloc_large",
87721 + "kmalloc_node",
87722 + "kmalloc_order",
87723 + "kmalloc_order_trace",
87724 + "kmalloc_slab",
87725 + "kzalloc",
87726 + "kzalloc_node",
87727 +};
87728 +
87729 +static struct plugin_info kallocstat_plugin_info = {
87730 + .version = "201111150100",
87731 +};
87732 +
87733 +static unsigned int execute_kallocstat(void);
87734 +
87735 +static struct gimple_opt_pass kallocstat_pass = {
87736 + .pass = {
87737 + .type = GIMPLE_PASS,
87738 + .name = "kallocstat",
87739 + .gate = NULL,
87740 + .execute = execute_kallocstat,
87741 + .sub = NULL,
87742 + .next = NULL,
87743 + .static_pass_number = 0,
87744 + .tv_id = TV_NONE,
87745 + .properties_required = 0,
87746 + .properties_provided = 0,
87747 + .properties_destroyed = 0,
87748 + .todo_flags_start = 0,
87749 + .todo_flags_finish = 0
87750 + }
87751 +};
87752 +
87753 +static bool is_kalloc(const char *fnname)
87754 +{
87755 + size_t i;
87756 +
87757 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
87758 + if (!strcmp(fnname, kalloc_functions[i]))
87759 + return true;
87760 + return false;
87761 +}
87762 +
87763 +static unsigned int execute_kallocstat(void)
87764 +{
87765 + basic_block bb;
87766 +
87767 + // 1. loop through BBs and GIMPLE statements
87768 + FOR_EACH_BB(bb) {
87769 + gimple_stmt_iterator gsi;
87770 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
87771 + // gimple match:
87772 + tree fndecl, size;
87773 + gimple call_stmt;
87774 + const char *fnname;
87775 +
87776 + // is it a call
87777 + call_stmt = gsi_stmt(gsi);
87778 + if (!is_gimple_call(call_stmt))
87779 + continue;
87780 + fndecl = gimple_call_fndecl(call_stmt);
87781 + if (fndecl == NULL_TREE)
87782 + continue;
87783 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
87784 + continue;
87785 +
87786 + // is it a call to k*alloc
87787 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
87788 + if (!is_kalloc(fnname))
87789 + continue;
87790 +
87791 + // is the size arg the result of a simple const assignment
87792 + size = gimple_call_arg(call_stmt, 0);
87793 + while (true) {
87794 + gimple def_stmt;
87795 + expanded_location xloc;
87796 + size_t size_val;
87797 +
87798 + if (TREE_CODE(size) != SSA_NAME)
87799 + break;
87800 + def_stmt = SSA_NAME_DEF_STMT(size);
87801 + if (!def_stmt || !is_gimple_assign(def_stmt))
87802 + break;
87803 + if (gimple_num_ops(def_stmt) != 2)
87804 + break;
87805 + size = gimple_assign_rhs1(def_stmt);
87806 + if (!TREE_CONSTANT(size))
87807 + continue;
87808 + xloc = expand_location(gimple_location(def_stmt));
87809 + if (!xloc.file)
87810 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
87811 + size_val = TREE_INT_CST_LOW(size);
87812 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
87813 + break;
87814 + }
87815 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
87816 +//debug_tree(gimple_call_fn(call_stmt));
87817 +//print_node(stderr, "pax", fndecl, 4);
87818 + }
87819 + }
87820 +
87821 + return 0;
87822 +}
87823 +
87824 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87825 +{
87826 + const char * const plugin_name = plugin_info->base_name;
87827 + struct register_pass_info kallocstat_pass_info = {
87828 + .pass = &kallocstat_pass.pass,
87829 + .reference_pass_name = "ssa",
87830 + .ref_pass_instance_number = 0,
87831 + .pos_op = PASS_POS_INSERT_AFTER
87832 + };
87833 +
87834 + if (!plugin_default_version_check(version, &gcc_version)) {
87835 + error(G_("incompatible gcc/plugin versions"));
87836 + return 1;
87837 + }
87838 +
87839 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
87840 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
87841 +
87842 + return 0;
87843 +}
87844 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
87845 new file mode 100644
87846 index 0000000..008f159
87847 --- /dev/null
87848 +++ b/tools/gcc/kernexec_plugin.c
87849 @@ -0,0 +1,427 @@
87850 +/*
87851 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
87852 + * Licensed under the GPL v2
87853 + *
87854 + * Note: the choice of the license means that the compilation process is
87855 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
87856 + * but for the kernel it doesn't matter since it doesn't link against
87857 + * any of the gcc libraries
87858 + *
87859 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
87860 + *
87861 + * TODO:
87862 + *
87863 + * BUGS:
87864 + * - none known
87865 + */
87866 +#include "gcc-plugin.h"
87867 +#include "config.h"
87868 +#include "system.h"
87869 +#include "coretypes.h"
87870 +#include "tree.h"
87871 +#include "tree-pass.h"
87872 +#include "flags.h"
87873 +#include "intl.h"
87874 +#include "toplev.h"
87875 +#include "plugin.h"
87876 +//#include "expr.h" where are you...
87877 +#include "diagnostic.h"
87878 +#include "plugin-version.h"
87879 +#include "tm.h"
87880 +#include "function.h"
87881 +#include "basic-block.h"
87882 +#include "gimple.h"
87883 +#include "rtl.h"
87884 +#include "emit-rtl.h"
87885 +#include "tree-flow.h"
87886 +
87887 +extern void print_gimple_stmt(FILE *, gimple, int, int);
87888 +extern rtx emit_move_insn(rtx x, rtx y);
87889 +
87890 +int plugin_is_GPL_compatible;
87891 +
87892 +static struct plugin_info kernexec_plugin_info = {
87893 + .version = "201111291120",
87894 + .help = "method=[bts|or]\tinstrumentation method\n"
87895 +};
87896 +
87897 +static unsigned int execute_kernexec_reload(void);
87898 +static unsigned int execute_kernexec_fptr(void);
87899 +static unsigned int execute_kernexec_retaddr(void);
87900 +static bool kernexec_cmodel_check(void);
87901 +
87902 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
87903 +static void (*kernexec_instrument_retaddr)(rtx);
87904 +
87905 +static struct gimple_opt_pass kernexec_reload_pass = {
87906 + .pass = {
87907 + .type = GIMPLE_PASS,
87908 + .name = "kernexec_reload",
87909 + .gate = kernexec_cmodel_check,
87910 + .execute = execute_kernexec_reload,
87911 + .sub = NULL,
87912 + .next = NULL,
87913 + .static_pass_number = 0,
87914 + .tv_id = TV_NONE,
87915 + .properties_required = 0,
87916 + .properties_provided = 0,
87917 + .properties_destroyed = 0,
87918 + .todo_flags_start = 0,
87919 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
87920 + }
87921 +};
87922 +
87923 +static struct gimple_opt_pass kernexec_fptr_pass = {
87924 + .pass = {
87925 + .type = GIMPLE_PASS,
87926 + .name = "kernexec_fptr",
87927 + .gate = kernexec_cmodel_check,
87928 + .execute = execute_kernexec_fptr,
87929 + .sub = NULL,
87930 + .next = NULL,
87931 + .static_pass_number = 0,
87932 + .tv_id = TV_NONE,
87933 + .properties_required = 0,
87934 + .properties_provided = 0,
87935 + .properties_destroyed = 0,
87936 + .todo_flags_start = 0,
87937 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
87938 + }
87939 +};
87940 +
87941 +static struct rtl_opt_pass kernexec_retaddr_pass = {
87942 + .pass = {
87943 + .type = RTL_PASS,
87944 + .name = "kernexec_retaddr",
87945 + .gate = kernexec_cmodel_check,
87946 + .execute = execute_kernexec_retaddr,
87947 + .sub = NULL,
87948 + .next = NULL,
87949 + .static_pass_number = 0,
87950 + .tv_id = TV_NONE,
87951 + .properties_required = 0,
87952 + .properties_provided = 0,
87953 + .properties_destroyed = 0,
87954 + .todo_flags_start = 0,
87955 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
87956 + }
87957 +};
87958 +
87959 +static bool kernexec_cmodel_check(void)
87960 +{
87961 + tree section;
87962 +
87963 + if (ix86_cmodel != CM_KERNEL)
87964 + return false;
87965 +
87966 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
87967 + if (!section || !TREE_VALUE(section))
87968 + return true;
87969 +
87970 + section = TREE_VALUE(TREE_VALUE(section));
87971 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
87972 + return true;
87973 +
87974 + return false;
87975 +}
87976 +
87977 +/*
87978 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
87979 + */
87980 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
87981 +{
87982 + gimple asm_movabs_stmt;
87983 +
87984 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
87985 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
87986 + gimple_asm_set_volatile(asm_movabs_stmt, true);
87987 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
87988 + update_stmt(asm_movabs_stmt);
87989 +}
87990 +
87991 +/*
87992 + * find all asm() stmts that clobber r10 and add a reload of r10
87993 + */
87994 +static unsigned int execute_kernexec_reload(void)
87995 +{
87996 + basic_block bb;
87997 +
87998 + // 1. loop through BBs and GIMPLE statements
87999 + FOR_EACH_BB(bb) {
88000 + gimple_stmt_iterator gsi;
88001 +
88002 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
88003 + // gimple match: __asm__ ("" : : : "r10");
88004 + gimple asm_stmt;
88005 + size_t nclobbers;
88006 +
88007 + // is it an asm ...
88008 + asm_stmt = gsi_stmt(gsi);
88009 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
88010 + continue;
88011 +
88012 + // ... clobbering r10
88013 + nclobbers = gimple_asm_nclobbers(asm_stmt);
88014 + while (nclobbers--) {
88015 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
88016 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
88017 + continue;
88018 + kernexec_reload_fptr_mask(&gsi);
88019 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
88020 + break;
88021 + }
88022 + }
88023 + }
88024 +
88025 + return 0;
88026 +}
88027 +
88028 +/*
88029 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
88030 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
88031 + */
88032 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
88033 +{
88034 + gimple assign_intptr, assign_new_fptr, call_stmt;
88035 + tree intptr, old_fptr, new_fptr, kernexec_mask;
88036 +
88037 + call_stmt = gsi_stmt(*gsi);
88038 + old_fptr = gimple_call_fn(call_stmt);
88039 +
88040 + // create temporary unsigned long variable used for bitops and cast fptr to it
88041 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
88042 + add_referenced_var(intptr);
88043 + mark_sym_for_renaming(intptr);
88044 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
88045 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
88046 + update_stmt(assign_intptr);
88047 +
88048 + // apply logical or to temporary unsigned long and bitmask
88049 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
88050 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
88051 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
88052 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
88053 + update_stmt(assign_intptr);
88054 +
88055 + // cast temporary unsigned long back to a temporary fptr variable
88056 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
88057 + add_referenced_var(new_fptr);
88058 + mark_sym_for_renaming(new_fptr);
88059 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
88060 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
88061 + update_stmt(assign_new_fptr);
88062 +
88063 + // replace call stmt fn with the new fptr
88064 + gimple_call_set_fn(call_stmt, new_fptr);
88065 + update_stmt(call_stmt);
88066 +}
88067 +
88068 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
88069 +{
88070 + gimple asm_or_stmt, call_stmt;
88071 + tree old_fptr, new_fptr, input, output;
88072 + VEC(tree, gc) *inputs = NULL;
88073 + VEC(tree, gc) *outputs = NULL;
88074 +
88075 + call_stmt = gsi_stmt(*gsi);
88076 + old_fptr = gimple_call_fn(call_stmt);
88077 +
88078 + // create temporary fptr variable
88079 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
88080 + add_referenced_var(new_fptr);
88081 + mark_sym_for_renaming(new_fptr);
88082 +
88083 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
88084 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
88085 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
88086 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
88087 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
88088 + VEC_safe_push(tree, gc, inputs, input);
88089 + VEC_safe_push(tree, gc, outputs, output);
88090 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
88091 + gimple_asm_set_volatile(asm_or_stmt, true);
88092 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
88093 + update_stmt(asm_or_stmt);
88094 +
88095 + // replace call stmt fn with the new fptr
88096 + gimple_call_set_fn(call_stmt, new_fptr);
88097 + update_stmt(call_stmt);
88098 +}
88099 +
88100 +/*
88101 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
88102 + */
88103 +static unsigned int execute_kernexec_fptr(void)
88104 +{
88105 + basic_block bb;
88106 +
88107 + // 1. loop through BBs and GIMPLE statements
88108 + FOR_EACH_BB(bb) {
88109 + gimple_stmt_iterator gsi;
88110 +
88111 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
88112 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
88113 + tree fn;
88114 + gimple call_stmt;
88115 +
88116 + // is it a call ...
88117 + call_stmt = gsi_stmt(gsi);
88118 + if (!is_gimple_call(call_stmt))
88119 + continue;
88120 + fn = gimple_call_fn(call_stmt);
88121 + if (TREE_CODE(fn) == ADDR_EXPR)
88122 + continue;
88123 + if (TREE_CODE(fn) != SSA_NAME)
88124 + gcc_unreachable();
88125 +
88126 + // ... through a function pointer
88127 + fn = SSA_NAME_VAR(fn);
88128 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
88129 + continue;
88130 + fn = TREE_TYPE(fn);
88131 + if (TREE_CODE(fn) != POINTER_TYPE)
88132 + continue;
88133 + fn = TREE_TYPE(fn);
88134 + if (TREE_CODE(fn) != FUNCTION_TYPE)
88135 + continue;
88136 +
88137 + kernexec_instrument_fptr(&gsi);
88138 +
88139 +//debug_tree(gimple_call_fn(call_stmt));
88140 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
88141 + }
88142 + }
88143 +
88144 + return 0;
88145 +}
88146 +
88147 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
88148 +static void kernexec_instrument_retaddr_bts(rtx insn)
88149 +{
88150 + rtx btsq;
88151 + rtvec argvec, constraintvec, labelvec;
88152 + int line;
88153 +
88154 + // create asm volatile("btsq $63,(%%rsp)":::)
88155 + argvec = rtvec_alloc(0);
88156 + constraintvec = rtvec_alloc(0);
88157 + labelvec = rtvec_alloc(0);
88158 + line = expand_location(RTL_LOCATION(insn)).line;
88159 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
88160 + MEM_VOLATILE_P(btsq) = 1;
88161 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
88162 + emit_insn_before(btsq, insn);
88163 +}
88164 +
88165 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
88166 +static void kernexec_instrument_retaddr_or(rtx insn)
88167 +{
88168 + rtx orq;
88169 + rtvec argvec, constraintvec, labelvec;
88170 + int line;
88171 +
88172 + // create asm volatile("orq %%r10,(%%rsp)":::)
88173 + argvec = rtvec_alloc(0);
88174 + constraintvec = rtvec_alloc(0);
88175 + labelvec = rtvec_alloc(0);
88176 + line = expand_location(RTL_LOCATION(insn)).line;
88177 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
88178 + MEM_VOLATILE_P(orq) = 1;
88179 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
88180 + emit_insn_before(orq, insn);
88181 +}
88182 +
88183 +/*
88184 + * find all asm level function returns and forcibly set the highest bit of the return address
88185 + */
88186 +static unsigned int execute_kernexec_retaddr(void)
88187 +{
88188 + rtx insn;
88189 +
88190 + // 1. find function returns
88191 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
88192 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
88193 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
88194 + rtx body;
88195 +
88196 + // is it a retn
88197 + if (!JUMP_P(insn))
88198 + continue;
88199 + body = PATTERN(insn);
88200 + if (GET_CODE(body) == PARALLEL)
88201 + body = XVECEXP(body, 0, 0);
88202 + if (GET_CODE(body) != RETURN)
88203 + continue;
88204 + kernexec_instrument_retaddr(insn);
88205 + }
88206 +
88207 +// print_simple_rtl(stderr, get_insns());
88208 +// print_rtl(stderr, get_insns());
88209 +
88210 + return 0;
88211 +}
88212 +
88213 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
88214 +{
88215 + const char * const plugin_name = plugin_info->base_name;
88216 + const int argc = plugin_info->argc;
88217 + const struct plugin_argument * const argv = plugin_info->argv;
88218 + int i;
88219 + struct register_pass_info kernexec_reload_pass_info = {
88220 + .pass = &kernexec_reload_pass.pass,
88221 + .reference_pass_name = "ssa",
88222 + .ref_pass_instance_number = 0,
88223 + .pos_op = PASS_POS_INSERT_AFTER
88224 + };
88225 + struct register_pass_info kernexec_fptr_pass_info = {
88226 + .pass = &kernexec_fptr_pass.pass,
88227 + .reference_pass_name = "ssa",
88228 + .ref_pass_instance_number = 0,
88229 + .pos_op = PASS_POS_INSERT_AFTER
88230 + };
88231 + struct register_pass_info kernexec_retaddr_pass_info = {
88232 + .pass = &kernexec_retaddr_pass.pass,
88233 + .reference_pass_name = "pro_and_epilogue",
88234 + .ref_pass_instance_number = 0,
88235 + .pos_op = PASS_POS_INSERT_AFTER
88236 + };
88237 +
88238 + if (!plugin_default_version_check(version, &gcc_version)) {
88239 + error(G_("incompatible gcc/plugin versions"));
88240 + return 1;
88241 + }
88242 +
88243 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
88244 +
88245 + if (TARGET_64BIT == 0)
88246 + return 0;
88247 +
88248 + for (i = 0; i < argc; ++i) {
88249 + if (!strcmp(argv[i].key, "method")) {
88250 + if (!argv[i].value) {
88251 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88252 + continue;
88253 + }
88254 + if (!strcmp(argv[i].value, "bts")) {
88255 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
88256 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
88257 + } else if (!strcmp(argv[i].value, "or")) {
88258 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
88259 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
88260 + fix_register("r10", 1, 1);
88261 + } else
88262 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
88263 + continue;
88264 + }
88265 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88266 + }
88267 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
88268 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
88269 +
88270 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
88271 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
88272 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
88273 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
88274 +
88275 + return 0;
88276 +}
88277 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
88278 new file mode 100644
88279 index 0000000..4a9b187
88280 --- /dev/null
88281 +++ b/tools/gcc/stackleak_plugin.c
88282 @@ -0,0 +1,326 @@
88283 +/*
88284 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
88285 + * Licensed under the GPL v2
88286 + *
88287 + * Note: the choice of the license means that the compilation process is
88288 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
88289 + * but for the kernel it doesn't matter since it doesn't link against
88290 + * any of the gcc libraries
88291 + *
88292 + * gcc plugin to help implement various PaX features
88293 + *
88294 + * - track lowest stack pointer
88295 + *
88296 + * TODO:
88297 + * - initialize all local variables
88298 + *
88299 + * BUGS:
88300 + * - none known
88301 + */
88302 +#include "gcc-plugin.h"
88303 +#include "config.h"
88304 +#include "system.h"
88305 +#include "coretypes.h"
88306 +#include "tree.h"
88307 +#include "tree-pass.h"
88308 +#include "flags.h"
88309 +#include "intl.h"
88310 +#include "toplev.h"
88311 +#include "plugin.h"
88312 +//#include "expr.h" where are you...
88313 +#include "diagnostic.h"
88314 +#include "plugin-version.h"
88315 +#include "tm.h"
88316 +#include "function.h"
88317 +#include "basic-block.h"
88318 +#include "gimple.h"
88319 +#include "rtl.h"
88320 +#include "emit-rtl.h"
88321 +
88322 +extern void print_gimple_stmt(FILE *, gimple, int, int);
88323 +
88324 +int plugin_is_GPL_compatible;
88325 +
88326 +static int track_frame_size = -1;
88327 +static const char track_function[] = "pax_track_stack";
88328 +static const char check_function[] = "pax_check_alloca";
88329 +static tree pax_check_alloca_decl;
88330 +static tree pax_track_stack_decl;
88331 +static bool init_locals;
88332 +
88333 +static struct plugin_info stackleak_plugin_info = {
88334 + .version = "201203021600",
88335 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
88336 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
88337 +};
88338 +
88339 +static bool gate_stackleak_track_stack(void);
88340 +static unsigned int execute_stackleak_tree_instrument(void);
88341 +static unsigned int execute_stackleak_final(void);
88342 +
88343 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
88344 + .pass = {
88345 + .type = GIMPLE_PASS,
88346 + .name = "stackleak_tree_instrument",
88347 + .gate = gate_stackleak_track_stack,
88348 + .execute = execute_stackleak_tree_instrument,
88349 + .sub = NULL,
88350 + .next = NULL,
88351 + .static_pass_number = 0,
88352 + .tv_id = TV_NONE,
88353 + .properties_required = PROP_gimple_leh | PROP_cfg,
88354 + .properties_provided = 0,
88355 + .properties_destroyed = 0,
88356 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
88357 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
88358 + }
88359 +};
88360 +
88361 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
88362 + .pass = {
88363 + .type = RTL_PASS,
88364 + .name = "stackleak_final",
88365 + .gate = gate_stackleak_track_stack,
88366 + .execute = execute_stackleak_final,
88367 + .sub = NULL,
88368 + .next = NULL,
88369 + .static_pass_number = 0,
88370 + .tv_id = TV_NONE,
88371 + .properties_required = 0,
88372 + .properties_provided = 0,
88373 + .properties_destroyed = 0,
88374 + .todo_flags_start = 0,
88375 + .todo_flags_finish = TODO_dump_func
88376 + }
88377 +};
88378 +
88379 +static bool gate_stackleak_track_stack(void)
88380 +{
88381 + return track_frame_size >= 0;
88382 +}
88383 +
88384 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
88385 +{
88386 + gimple check_alloca;
88387 + tree alloca_size;
88388 +
88389 + // insert call to void pax_check_alloca(unsigned long size)
88390 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
88391 + check_alloca = gimple_build_call(pax_check_alloca_decl, 1, alloca_size);
88392 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
88393 +}
88394 +
88395 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
88396 +{
88397 + gimple track_stack;
88398 +
88399 + // insert call to void pax_track_stack(void)
88400 + track_stack = gimple_build_call(pax_track_stack_decl, 0);
88401 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
88402 +}
88403 +
88404 +#if BUILDING_GCC_VERSION == 4005
88405 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
88406 +{
88407 + tree fndecl;
88408 +
88409 + if (!is_gimple_call(stmt))
88410 + return false;
88411 + fndecl = gimple_call_fndecl(stmt);
88412 + if (!fndecl)
88413 + return false;
88414 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
88415 + return false;
88416 +// print_node(stderr, "pax", fndecl, 4);
88417 + return DECL_FUNCTION_CODE(fndecl) == code;
88418 +}
88419 +#endif
88420 +
88421 +static bool is_alloca(gimple stmt)
88422 +{
88423 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
88424 + return true;
88425 +
88426 +#if BUILDING_GCC_VERSION >= 4007
88427 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
88428 + return true;
88429 +#endif
88430 +
88431 + return false;
88432 +}
88433 +
88434 +static unsigned int execute_stackleak_tree_instrument(void)
88435 +{
88436 + basic_block bb, entry_bb;
88437 + bool prologue_instrumented = false, is_leaf = true;
88438 +
88439 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
88440 +
88441 + // 1. loop through BBs and GIMPLE statements
88442 + FOR_EACH_BB(bb) {
88443 + gimple_stmt_iterator gsi;
88444 +
88445 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
88446 + gimple stmt;
88447 +
88448 + stmt = gsi_stmt(gsi);
88449 +
88450 + if (is_gimple_call(stmt))
88451 + is_leaf = false;
88452 +
88453 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
88454 + if (!is_alloca(stmt))
88455 + continue;
88456 +
88457 + // 2. insert stack overflow check before each __builtin_alloca call
88458 + stackleak_check_alloca(&gsi);
88459 +
88460 + // 3. insert track call after each __builtin_alloca call
88461 + stackleak_add_instrumentation(&gsi);
88462 + if (bb == entry_bb)
88463 + prologue_instrumented = true;
88464 + }
88465 + }
88466 +
88467 + // special case for some bad linux code: taking the address of static inline functions will materialize them
88468 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
88469 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
88470 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
88471 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
88472 + return 0;
88473 +
88474 + // 4. insert track call at the beginning
88475 + if (!prologue_instrumented) {
88476 + gimple_stmt_iterator gsi;
88477 +
88478 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
88479 + if (dom_info_available_p(CDI_DOMINATORS))
88480 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
88481 + gsi = gsi_start_bb(bb);
88482 + stackleak_add_instrumentation(&gsi);
88483 + }
88484 +
88485 + return 0;
88486 +}
88487 +
88488 +static unsigned int execute_stackleak_final(void)
88489 +{
88490 + rtx insn;
88491 +
88492 + if (cfun->calls_alloca)
88493 + return 0;
88494 +
88495 + // keep calls only if function frame is big enough
88496 + if (get_frame_size() >= track_frame_size)
88497 + return 0;
88498 +
88499 + // 1. find pax_track_stack calls
88500 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
88501 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
88502 + rtx body;
88503 +
88504 + if (!CALL_P(insn))
88505 + continue;
88506 + body = PATTERN(insn);
88507 + if (GET_CODE(body) != CALL)
88508 + continue;
88509 + body = XEXP(body, 0);
88510 + if (GET_CODE(body) != MEM)
88511 + continue;
88512 + body = XEXP(body, 0);
88513 + if (GET_CODE(body) != SYMBOL_REF)
88514 + continue;
88515 + if (strcmp(XSTR(body, 0), track_function))
88516 + continue;
88517 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
88518 + // 2. delete call
88519 + insn = delete_insn_and_edges(insn);
88520 +#if BUILDING_GCC_VERSION >= 4007
88521 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
88522 + insn = delete_insn_and_edges(insn);
88523 +#endif
88524 + }
88525 +
88526 +// print_simple_rtl(stderr, get_insns());
88527 +// print_rtl(stderr, get_insns());
88528 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
88529 +
88530 + return 0;
88531 +}
88532 +
88533 +static void stackleak_start_unit(void *gcc_data, void *user_dat)
88534 +{
88535 + tree fntype;
88536 +
88537 + // declare void pax_check_alloca(unsigned long size)
88538 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
88539 + pax_check_alloca_decl = build_fn_decl(check_function, fntype);
88540 + DECL_ASSEMBLER_NAME(pax_check_alloca_decl); // for LTO
88541 + TREE_PUBLIC(pax_check_alloca_decl) = 1;
88542 + DECL_EXTERNAL(pax_check_alloca_decl) = 1;
88543 + DECL_ARTIFICIAL(pax_check_alloca_decl) = 1;
88544 +
88545 + // declare void pax_track_stack(void)
88546 + fntype = build_function_type_list(void_type_node, NULL_TREE);
88547 + pax_track_stack_decl = build_fn_decl(track_function, fntype);
88548 + DECL_ASSEMBLER_NAME(pax_track_stack_decl); // for LTO
88549 + TREE_PUBLIC(pax_track_stack_decl) = 1;
88550 + DECL_EXTERNAL(pax_track_stack_decl) = 1;
88551 + DECL_ARTIFICIAL(pax_track_stack_decl) = 1;
88552 +}
88553 +
88554 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
88555 +{
88556 + const char * const plugin_name = plugin_info->base_name;
88557 + const int argc = plugin_info->argc;
88558 + const struct plugin_argument * const argv = plugin_info->argv;
88559 + int i;
88560 + struct register_pass_info stackleak_tree_instrument_pass_info = {
88561 + .pass = &stackleak_tree_instrument_pass.pass,
88562 +// .reference_pass_name = "tree_profile",
88563 + .reference_pass_name = "optimized",
88564 + .ref_pass_instance_number = 0,
88565 + .pos_op = PASS_POS_INSERT_BEFORE
88566 + };
88567 + struct register_pass_info stackleak_final_pass_info = {
88568 + .pass = &stackleak_final_rtl_opt_pass.pass,
88569 + .reference_pass_name = "final",
88570 + .ref_pass_instance_number = 0,
88571 + .pos_op = PASS_POS_INSERT_BEFORE
88572 + };
88573 +
88574 + if (!plugin_default_version_check(version, &gcc_version)) {
88575 + error(G_("incompatible gcc/plugin versions"));
88576 + return 1;
88577 + }
88578 +
88579 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
88580 +
88581 + for (i = 0; i < argc; ++i) {
88582 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
88583 + if (!argv[i].value) {
88584 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88585 + continue;
88586 + }
88587 + track_frame_size = atoi(argv[i].value);
88588 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
88589 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
88590 + continue;
88591 + }
88592 + if (!strcmp(argv[i].key, "initialize-locals")) {
88593 + if (argv[i].value) {
88594 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
88595 + continue;
88596 + }
88597 + init_locals = true;
88598 + continue;
88599 + }
88600 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88601 + }
88602 +
88603 + register_callback("start_unit", PLUGIN_START_UNIT, &stackleak_start_unit, NULL);
88604 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
88605 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
88606 +
88607 + return 0;
88608 +}
88609 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
88610 index 83b3dde..835bee7 100644
88611 --- a/usr/gen_init_cpio.c
88612 +++ b/usr/gen_init_cpio.c
88613 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name, const char *location,
88614 int retval;
88615 int rc = -1;
88616 int namesize;
88617 - int i;
88618 + unsigned int i;
88619
88620 mode |= S_IFREG;
88621
88622 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_location)
88623 *env_var = *expanded = '\0';
88624 strncat(env_var, start + 2, end - start - 2);
88625 strncat(expanded, new_location, start - new_location);
88626 - strncat(expanded, getenv(env_var), PATH_MAX);
88627 - strncat(expanded, end + 1, PATH_MAX);
88628 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
88629 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
88630 strncpy(new_location, expanded, PATH_MAX);
88631 + new_location[PATH_MAX] = 0;
88632 } else
88633 break;
88634 }
88635 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
88636 index 4f3434f..fc63040 100644
88637 --- a/virt/kvm/kvm_main.c
88638 +++ b/virt/kvm/kvm_main.c
88639 @@ -43,6 +43,8 @@
88640 #include <linux/swap.h>
88641 #include <linux/bitops.h>
88642 #include <linux/spinlock.h>
88643 +#include <linux/namei.h>
88644 +#include <linux/fs.h>
88645
88646 #include <asm/processor.h>
88647 #include <asm/io.h>
88648 @@ -575,12 +577,73 @@ out:
88649 return r;
88650 }
88651
88652 +/*
88653 + * We want to test whether the caller has been granted permissions to
88654 + * use this device. To be able to configure and control the device,
88655 + * the user needs access to PCI configuration space and BAR resources.
88656 + * These are accessed through PCI sysfs. PCI config space is often
88657 + * passed to the process calling this ioctl via file descriptor, so we
88658 + * can't rely on access to that file. We can check for permissions
88659 + * on each of the BAR resource files, which is a pretty clear
88660 + * indicator that the user has been granted access to the device.
88661 + */
88662 +static int probe_sysfs_permissions(struct pci_dev *dev)
88663 +{
88664 +#ifdef CONFIG_SYSFS
88665 + int i;
88666 + bool bar_found = false;
88667 +
88668 + for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) {
88669 + char *kpath, *syspath;
88670 + struct path path;
88671 + struct inode *inode;
88672 + int r;
88673 +
88674 + if (!pci_resource_len(dev, i))
88675 + continue;
88676 +
88677 + kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
88678 + if (!kpath)
88679 + return -ENOMEM;
88680 +
88681 + /* Per sysfs-rules, sysfs is always at /sys */
88682 + syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i);
88683 + kfree(kpath);
88684 + if (!syspath)
88685 + return -ENOMEM;
88686 +
88687 + r = kern_path(syspath, LOOKUP_FOLLOW, &path);
88688 + kfree(syspath);
88689 + if (r)
88690 + return r;
88691 +
88692 + inode = path.dentry->d_inode;
88693 +
88694 + r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS);
88695 + path_put(&path);
88696 + if (r)
88697 + return r;
88698 +
88699 + bar_found = true;
88700 + }
88701 +
88702 + /* If no resources, probably something special */
88703 + if (!bar_found)
88704 + return -EPERM;
88705 +
88706 + return 0;
88707 +#else
88708 + return -EINVAL; /* No way to control the device without sysfs */
88709 +#endif
88710 +}
88711 +
88712 static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
88713 struct kvm_assigned_pci_dev *assigned_dev)
88714 {
88715 int r = 0;
88716 struct kvm_assigned_dev_kernel *match;
88717 struct pci_dev *dev;
88718 + u8 header_type;
88719
88720 down_read(&kvm->slots_lock);
88721 mutex_lock(&kvm->lock);
88722 @@ -607,6 +670,18 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
88723 r = -EINVAL;
88724 goto out_free;
88725 }
88726 +
88727 + /* Don't allow bridges to be assigned */
88728 + pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
88729 + if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) {
88730 + r = -EPERM;
88731 + goto out_put;
88732 + }
88733 +
88734 + r = probe_sysfs_permissions(dev);
88735 + if (r)
88736 + goto out_put;
88737 +
88738 if (pci_enable_device(dev)) {
88739 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
88740 r = -EBUSY;
88741 @@ -2494,7 +2569,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void)
88742 if (kvm_rebooting)
88743 /* spin while reset goes on */
88744 while (true)
88745 - ;
88746 + cpu_relax();
88747 /* Fault while not rebooting. We want the trace. */
88748 BUG();
88749 }
88750 @@ -2714,7 +2789,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
88751 kvm_arch_vcpu_put(vcpu);
88752 }
88753
88754 -int kvm_init(void *opaque, unsigned int vcpu_size,
88755 +int kvm_init(const void *opaque, unsigned int vcpu_size,
88756 struct module *module)
88757 {
88758 int r;
88759 @@ -2767,15 +2842,17 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
88760 /* A kmem cache lets us meet the alignment requirements of fx_save. */
88761 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
88762 __alignof__(struct kvm_vcpu),
88763 - 0, NULL);
88764 + SLAB_USERCOPY, NULL);
88765 if (!kvm_vcpu_cache) {
88766 r = -ENOMEM;
88767 goto out_free_5;
88768 }
88769
88770 - kvm_chardev_ops.owner = module;
88771 - kvm_vm_fops.owner = module;
88772 - kvm_vcpu_fops.owner = module;
88773 + pax_open_kernel();
88774 + *(void **)&kvm_chardev_ops.owner = module;
88775 + *(void **)&kvm_vm_fops.owner = module;
88776 + *(void **)&kvm_vcpu_fops.owner = module;
88777 + pax_close_kernel();
88778
88779 r = misc_register(&kvm_dev);
88780 if (r) {