]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9-2.6.32.57-201202261953.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-2.6.32.57-201202261953.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index e1efc40..47f0daf 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -1,15 +1,19 @@
6 *.a
7 *.aux
8 *.bin
9 +*.cis
10 *.cpio
11 *.csp
12 +*.dbg
13 *.dsp
14 *.dvi
15 *.elf
16 *.eps
17 *.fw
18 +*.gcno
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -38,8 +42,10 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *_MODULES
32 +*_reg_safe.h
33 *_vga16.c
34 *~
35 *.9
36 @@ -49,11 +55,16 @@
37 53c700_d.h
38 CVS
39 ChangeSet
40 +GPATH
41 +GRTAGS
42 +GSYMS
43 +GTAGS
44 Image
45 Kerntypes
46 Module.markers
47 Module.symvers
48 PENDING
49 +PERF*
50 SCCS
51 System.map*
52 TAGS
53 @@ -76,7 +87,11 @@ btfixupprep
54 build
55 bvmlinux
56 bzImage*
57 +capability_names.h
58 +capflags.c
59 classlist.h*
60 +clut_vga16.c
61 +common-cmds.h
62 comp*.log
63 compile.h*
64 conf
65 @@ -84,6 +99,8 @@ config
66 config-*
67 config_data.h*
68 config_data.gz*
69 +config.c
70 +config.tmp
71 conmakehash
72 consolemap_deftbl.c*
73 cpustr.h
74 @@ -97,19 +114,23 @@ elfconfig.h*
75 fixdep
76 fore200e_mkfirm
77 fore200e_pca_fw.c*
78 +gate.lds
79 gconf
80 gen-devlist
81 gen_crc32table
82 gen_init_cpio
83 genksyms
84 *_gray256.c
85 +hash
86 +hid-example
87 ihex2fw
88 ikconfig.h*
89 initramfs_data.cpio
90 +initramfs_data.cpio.bz2
91 initramfs_data.cpio.gz
92 initramfs_list
93 kallsyms
94 -kconfig
95 +kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99 @@ -127,13 +148,16 @@ machtypes.h
100 map
101 maui_boot.h
102 mconf
103 +mdp
104 miboot*
105 mk_elfconfig
106 mkboot
107 mkbugboot
108 mkcpustr
109 mkdep
110 +mkpiggy
111 mkprep
112 +mkregtable
113 mktables
114 mktree
115 modpost
116 @@ -149,6 +173,7 @@ patches*
117 pca200e.bin
118 pca200e_ecd.bin2
119 piggy.gz
120 +piggy.S
121 piggyback
122 pnmtologo
123 ppc_defs.h*
124 @@ -157,12 +182,15 @@ qconf
125 raid6altivec*.c
126 raid6int*.c
127 raid6tables.c
128 +regdb.c
129 relocs
130 +rlim_names.h
131 series
132 setup
133 setup.bin
134 setup.elf
135 sImage
136 +slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140 @@ -171,6 +199,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144 +user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148 @@ -186,14 +215,20 @@ version.h*
149 vmlinux
150 vmlinux-*
151 vmlinux.aout
152 +vmlinux.bin.all
153 +vmlinux.bin.bz2
154 vmlinux.lds
155 +vmlinux.relocs
156 +voffset.h
157 vsyscall.lds
158 vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162 +utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168 +zoffset.h
169 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
170 index c840e7d..f4c451c 100644
171 --- a/Documentation/kernel-parameters.txt
172 +++ b/Documentation/kernel-parameters.txt
173 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
174 the specified number of seconds. This is to be used if
175 your oopses keep scrolling off the screen.
176
177 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
178 + virtualization environments that don't cope well with the
179 + expand down segment used by UDEREF on X86-32 or the frequent
180 + page table updates on X86-64.
181 +
182 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
183 +
184 pcbit= [HW,ISDN]
185
186 pcd. [PARIDE]
187 diff --git a/Makefile b/Makefile
188 index 3377650..095e46d 100644
189 --- a/Makefile
190 +++ b/Makefile
191 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
192
193 HOSTCC = gcc
194 HOSTCXX = g++
195 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
196 -HOSTCXXFLAGS = -O2
197 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
198 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
199 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
200
201 # Decide whether to build built-in, modular, or both.
202 # Normally, just do built-in.
203 @@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
204 # Rules shared between *config targets and build targets
205
206 # Basic helpers built in scripts/
207 -PHONY += scripts_basic
208 -scripts_basic:
209 +PHONY += scripts_basic gcc-plugins
210 +scripts_basic: gcc-plugins
211 $(Q)$(MAKE) $(build)=scripts/basic
212
213 # To avoid any implicit rule to kick in, define an empty command.
214 @@ -403,7 +404,7 @@ endif
215 # of make so .config is not included in this case either (for *config).
216
217 no-dot-config-targets := clean mrproper distclean \
218 - cscope TAGS tags help %docs check% \
219 + cscope gtags TAGS tags help %docs check% \
220 include/linux/version.h headers_% \
221 kernelrelease kernelversion
222
223 @@ -526,6 +527,48 @@ else
224 KBUILD_CFLAGS += -O2
225 endif
226
227 +ifndef DISABLE_PAX_PLUGINS
228 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
229 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
230 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
231 +endif
232 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
233 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
234 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
235 +endif
236 +ifdef CONFIG_KALLOCSTAT_PLUGIN
237 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
238 +endif
239 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
240 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
241 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
242 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
243 +endif
244 +ifdef CONFIG_CHECKER_PLUGIN
245 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
246 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
247 +endif
248 +endif
249 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS) $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS)
250 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
251 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
252 +ifeq ($(KBUILD_EXTMOD),)
253 +gcc-plugins:
254 + $(Q)$(MAKE) $(build)=tools/gcc
255 +else
256 +gcc-plugins: ;
257 +endif
258 +else
259 +gcc-plugins:
260 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
261 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
262 +else
263 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
264 +endif
265 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
266 +endif
267 +endif
268 +
269 include $(srctree)/arch/$(SRCARCH)/Makefile
270
271 ifneq ($(CONFIG_FRAME_WARN),0)
272 @@ -647,7 +690,7 @@ export mod_strip_cmd
273
274
275 ifeq ($(KBUILD_EXTMOD),)
276 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
277 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
278
279 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
280 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
281 @@ -868,6 +911,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
282
283 # The actual objects are generated when descending,
284 # make sure no implicit rule kicks in
285 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
286 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
287 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
288
289 # Handle descending into subdirectories listed in $(vmlinux-dirs)
290 @@ -877,7 +922,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
291 # Error messages still appears in the original language
292
293 PHONY += $(vmlinux-dirs)
294 -$(vmlinux-dirs): prepare scripts
295 +$(vmlinux-dirs): gcc-plugins prepare scripts
296 $(Q)$(MAKE) $(build)=$@
297
298 # Build the kernel release string
299 @@ -986,6 +1031,7 @@ prepare0: archprepare FORCE
300 $(Q)$(MAKE) $(build)=. missing-syscalls
301
302 # All the preparing..
303 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
304 prepare: prepare0
305
306 # The asm symlink changes when $(ARCH) changes.
307 @@ -1127,6 +1173,8 @@ all: modules
308 # using awk while concatenating to the final file.
309
310 PHONY += modules
311 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
312 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
313 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
314 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
315 @$(kecho) ' Building modules, stage 2.';
316 @@ -1136,7 +1184,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
317
318 # Target to prepare building external modules
319 PHONY += modules_prepare
320 -modules_prepare: prepare scripts
321 +modules_prepare: gcc-plugins prepare scripts
322
323 # Target to install modules
324 PHONY += modules_install
325 @@ -1201,7 +1249,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
326 include/linux/autoconf.h include/linux/version.h \
327 include/linux/utsrelease.h \
328 include/linux/bounds.h include/asm*/asm-offsets.h \
329 - Module.symvers Module.markers tags TAGS cscope*
330 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
331
332 # clean - Delete most, but leave enough to build external modules
333 #
334 @@ -1245,7 +1293,7 @@ distclean: mrproper
335 @find $(srctree) $(RCS_FIND_IGNORE) \
336 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
337 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
338 - -o -name '.*.rej' -o -size 0 \
339 + -o -name '.*.rej' -o -name '*.so' -o -size 0 \
340 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
341 -type f -print | xargs rm -f
342
343 @@ -1292,6 +1340,7 @@ help:
344 @echo ' modules_prepare - Set up for building external modules'
345 @echo ' tags/TAGS - Generate tags file for editors'
346 @echo ' cscope - Generate cscope index'
347 + @echo ' gtags - Generate GNU GLOBAL index'
348 @echo ' kernelrelease - Output the release version string'
349 @echo ' kernelversion - Output the version stored in Makefile'
350 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
351 @@ -1393,6 +1442,8 @@ PHONY += $(module-dirs) modules
352 $(module-dirs): crmodverdir $(objtree)/Module.symvers
353 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
354
355 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
356 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
357 modules: $(module-dirs)
358 @$(kecho) ' Building modules, stage 2.';
359 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
360 @@ -1448,7 +1499,7 @@ endif # KBUILD_EXTMOD
361 quiet_cmd_tags = GEN $@
362 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
363
364 -tags TAGS cscope: FORCE
365 +tags TAGS cscope gtags: FORCE
366 $(call cmd,tags)
367
368 # Scripts to check various things for consistency
369 @@ -1513,17 +1564,21 @@ else
370 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
371 endif
372
373 -%.s: %.c prepare scripts FORCE
374 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
375 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
376 +%.s: %.c gcc-plugins prepare scripts FORCE
377 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
378 %.i: %.c prepare scripts FORCE
379 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
380 -%.o: %.c prepare scripts FORCE
381 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
382 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
383 +%.o: %.c gcc-plugins prepare scripts FORCE
384 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
385 %.lst: %.c prepare scripts FORCE
386 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
387 -%.s: %.S prepare scripts FORCE
388 +%.s: %.S gcc-plugins prepare scripts FORCE
389 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
390 -%.o: %.S prepare scripts FORCE
391 +%.o: %.S gcc-plugins prepare scripts FORCE
392 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
393 %.symtypes: %.c prepare scripts FORCE
394 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
395 @@ -1533,11 +1588,15 @@ endif
396 $(cmd_crmodverdir)
397 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
398 $(build)=$(build-dir)
399 -%/: prepare scripts FORCE
400 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
401 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
402 +%/: gcc-plugins prepare scripts FORCE
403 $(cmd_crmodverdir)
404 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
405 $(build)=$(build-dir)
406 -%.ko: prepare scripts FORCE
407 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
408 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
409 +%.ko: gcc-plugins prepare scripts FORCE
410 $(cmd_crmodverdir)
411 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
412 $(build)=$(build-dir) $(@:.ko=.o)
413 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
414 index 610dff4..f396854 100644
415 --- a/arch/alpha/include/asm/atomic.h
416 +++ b/arch/alpha/include/asm/atomic.h
417 @@ -251,6 +251,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
418 #define atomic_dec(v) atomic_sub(1,(v))
419 #define atomic64_dec(v) atomic64_sub(1,(v))
420
421 +#define atomic64_read_unchecked(v) atomic64_read(v)
422 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
423 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
424 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
425 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
426 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
427 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
428 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
429 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
430 +
431 #define smp_mb__before_atomic_dec() smp_mb()
432 #define smp_mb__after_atomic_dec() smp_mb()
433 #define smp_mb__before_atomic_inc() smp_mb()
434 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
435 index 5c75c1b..c82f878 100644
436 --- a/arch/alpha/include/asm/elf.h
437 +++ b/arch/alpha/include/asm/elf.h
438 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
439
440 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
441
442 +#ifdef CONFIG_PAX_ASLR
443 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
444 +
445 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
446 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
447 +#endif
448 +
449 /* $0 is set by ld.so to a pointer to a function which might be
450 registered using atexit. This provides a mean for the dynamic
451 linker to call DT_FINI functions for shared libraries that have
452 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
453 index 3f0c59f..cf1e100 100644
454 --- a/arch/alpha/include/asm/pgtable.h
455 +++ b/arch/alpha/include/asm/pgtable.h
456 @@ -101,6 +101,17 @@ struct vm_area_struct;
457 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
458 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
459 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
460 +
461 +#ifdef CONFIG_PAX_PAGEEXEC
462 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
463 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
464 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
465 +#else
466 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
467 +# define PAGE_COPY_NOEXEC PAGE_COPY
468 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
469 +#endif
470 +
471 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
472
473 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
474 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
475 index ebc3c89..20cfa63 100644
476 --- a/arch/alpha/kernel/module.c
477 +++ b/arch/alpha/kernel/module.c
478 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
479
480 /* The small sections were sorted to the end of the segment.
481 The following should definitely cover them. */
482 - gp = (u64)me->module_core + me->core_size - 0x8000;
483 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
484 got = sechdrs[me->arch.gotsecindex].sh_addr;
485
486 for (i = 0; i < n; i++) {
487 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
488 index a94e49c..d71dd44 100644
489 --- a/arch/alpha/kernel/osf_sys.c
490 +++ b/arch/alpha/kernel/osf_sys.c
491 @@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
492 /* At this point: (!vma || addr < vma->vm_end). */
493 if (limit - len < addr)
494 return -ENOMEM;
495 - if (!vma || addr + len <= vma->vm_start)
496 + if (check_heap_stack_gap(vma, addr, len))
497 return addr;
498 addr = vma->vm_end;
499 vma = vma->vm_next;
500 @@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
501 merely specific addresses, but regions of memory -- perhaps
502 this feature should be incorporated into all ports? */
503
504 +#ifdef CONFIG_PAX_RANDMMAP
505 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
506 +#endif
507 +
508 if (addr) {
509 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
510 if (addr != (unsigned long) -ENOMEM)
511 @@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
512 }
513
514 /* Next, try allocating at TASK_UNMAPPED_BASE. */
515 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
516 - len, limit);
517 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
518 +
519 if (addr != (unsigned long) -ENOMEM)
520 return addr;
521
522 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
523 index 00a31de..2ded0f2 100644
524 --- a/arch/alpha/mm/fault.c
525 +++ b/arch/alpha/mm/fault.c
526 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
527 __reload_thread(pcb);
528 }
529
530 +#ifdef CONFIG_PAX_PAGEEXEC
531 +/*
532 + * PaX: decide what to do with offenders (regs->pc = fault address)
533 + *
534 + * returns 1 when task should be killed
535 + * 2 when patched PLT trampoline was detected
536 + * 3 when unpatched PLT trampoline was detected
537 + */
538 +static int pax_handle_fetch_fault(struct pt_regs *regs)
539 +{
540 +
541 +#ifdef CONFIG_PAX_EMUPLT
542 + int err;
543 +
544 + do { /* PaX: patched PLT emulation #1 */
545 + unsigned int ldah, ldq, jmp;
546 +
547 + err = get_user(ldah, (unsigned int *)regs->pc);
548 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
549 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
550 +
551 + if (err)
552 + break;
553 +
554 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
555 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
556 + jmp == 0x6BFB0000U)
557 + {
558 + unsigned long r27, addr;
559 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
560 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
561 +
562 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
563 + err = get_user(r27, (unsigned long *)addr);
564 + if (err)
565 + break;
566 +
567 + regs->r27 = r27;
568 + regs->pc = r27;
569 + return 2;
570 + }
571 + } while (0);
572 +
573 + do { /* PaX: patched PLT emulation #2 */
574 + unsigned int ldah, lda, br;
575 +
576 + err = get_user(ldah, (unsigned int *)regs->pc);
577 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
578 + err |= get_user(br, (unsigned int *)(regs->pc+8));
579 +
580 + if (err)
581 + break;
582 +
583 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
584 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
585 + (br & 0xFFE00000U) == 0xC3E00000U)
586 + {
587 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
588 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
589 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
590 +
591 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
592 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
593 + return 2;
594 + }
595 + } while (0);
596 +
597 + do { /* PaX: unpatched PLT emulation */
598 + unsigned int br;
599 +
600 + err = get_user(br, (unsigned int *)regs->pc);
601 +
602 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
603 + unsigned int br2, ldq, nop, jmp;
604 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
605 +
606 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
607 + err = get_user(br2, (unsigned int *)addr);
608 + err |= get_user(ldq, (unsigned int *)(addr+4));
609 + err |= get_user(nop, (unsigned int *)(addr+8));
610 + err |= get_user(jmp, (unsigned int *)(addr+12));
611 + err |= get_user(resolver, (unsigned long *)(addr+16));
612 +
613 + if (err)
614 + break;
615 +
616 + if (br2 == 0xC3600000U &&
617 + ldq == 0xA77B000CU &&
618 + nop == 0x47FF041FU &&
619 + jmp == 0x6B7B0000U)
620 + {
621 + regs->r28 = regs->pc+4;
622 + regs->r27 = addr+16;
623 + regs->pc = resolver;
624 + return 3;
625 + }
626 + }
627 + } while (0);
628 +#endif
629 +
630 + return 1;
631 +}
632 +
633 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
634 +{
635 + unsigned long i;
636 +
637 + printk(KERN_ERR "PAX: bytes at PC: ");
638 + for (i = 0; i < 5; i++) {
639 + unsigned int c;
640 + if (get_user(c, (unsigned int *)pc+i))
641 + printk(KERN_CONT "???????? ");
642 + else
643 + printk(KERN_CONT "%08x ", c);
644 + }
645 + printk("\n");
646 +}
647 +#endif
648
649 /*
650 * This routine handles page faults. It determines the address,
651 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
652 good_area:
653 si_code = SEGV_ACCERR;
654 if (cause < 0) {
655 - if (!(vma->vm_flags & VM_EXEC))
656 + if (!(vma->vm_flags & VM_EXEC)) {
657 +
658 +#ifdef CONFIG_PAX_PAGEEXEC
659 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
660 + goto bad_area;
661 +
662 + up_read(&mm->mmap_sem);
663 + switch (pax_handle_fetch_fault(regs)) {
664 +
665 +#ifdef CONFIG_PAX_EMUPLT
666 + case 2:
667 + case 3:
668 + return;
669 +#endif
670 +
671 + }
672 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
673 + do_group_exit(SIGKILL);
674 +#else
675 goto bad_area;
676 +#endif
677 +
678 + }
679 } else if (!cause) {
680 /* Allow reads even for write-only mappings */
681 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
682 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
683 index b68faef..6dd1496 100644
684 --- a/arch/arm/Kconfig
685 +++ b/arch/arm/Kconfig
686 @@ -14,6 +14,7 @@ config ARM
687 select SYS_SUPPORTS_APM_EMULATION
688 select HAVE_OPROFILE
689 select HAVE_ARCH_KGDB
690 + select GENERIC_ATOMIC64
691 select HAVE_KPROBES if (!XIP_KERNEL)
692 select HAVE_KRETPROBES if (HAVE_KPROBES)
693 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
694 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
695 index d0daeab..ff286a8 100644
696 --- a/arch/arm/include/asm/atomic.h
697 +++ b/arch/arm/include/asm/atomic.h
698 @@ -15,6 +15,10 @@
699 #include <linux/types.h>
700 #include <asm/system.h>
701
702 +#ifdef CONFIG_GENERIC_ATOMIC64
703 +#include <asm-generic/atomic64.h>
704 +#endif
705 +
706 #define ATOMIC_INIT(i) { (i) }
707
708 #ifdef __KERNEL__
709 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
710 index 6aac3f5..265536b 100644
711 --- a/arch/arm/include/asm/elf.h
712 +++ b/arch/arm/include/asm/elf.h
713 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
714 the loader. We need to make sure that it is out of the way of the program
715 that it will "exec", and that there is sufficient room for the brk. */
716
717 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
718 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
719 +
720 +#ifdef CONFIG_PAX_ASLR
721 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
722 +
723 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
724 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
725 +#endif
726
727 /* When the program starts, a1 contains a pointer to a function to be
728 registered with atexit, as per the SVR4 ABI. A value of 0 means we
729 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
730 index c019949..388fdd1 100644
731 --- a/arch/arm/include/asm/kmap_types.h
732 +++ b/arch/arm/include/asm/kmap_types.h
733 @@ -19,6 +19,7 @@ enum km_type {
734 KM_SOFTIRQ0,
735 KM_SOFTIRQ1,
736 KM_L2_CACHE,
737 + KM_CLEARPAGE,
738 KM_TYPE_NR
739 };
740
741 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
742 index 1d6bd40..fba0cb9 100644
743 --- a/arch/arm/include/asm/uaccess.h
744 +++ b/arch/arm/include/asm/uaccess.h
745 @@ -22,6 +22,8 @@
746 #define VERIFY_READ 0
747 #define VERIFY_WRITE 1
748
749 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
750 +
751 /*
752 * The exception table consists of pairs of addresses: the first is the
753 * address of an instruction that is allowed to fault, and the second is
754 @@ -387,8 +389,23 @@ do { \
755
756
757 #ifdef CONFIG_MMU
758 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
759 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
760 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
761 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
762 +
763 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
764 +{
765 + if (!__builtin_constant_p(n))
766 + check_object_size(to, n, false);
767 + return ___copy_from_user(to, from, n);
768 +}
769 +
770 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
771 +{
772 + if (!__builtin_constant_p(n))
773 + check_object_size(from, n, true);
774 + return ___copy_to_user(to, from, n);
775 +}
776 +
777 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
778 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
779 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
780 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
781
782 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
783 {
784 + if ((long)n < 0)
785 + return n;
786 +
787 if (access_ok(VERIFY_READ, from, n))
788 n = __copy_from_user(to, from, n);
789 else /* security hole - plug it */
790 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
791
792 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
793 {
794 + if ((long)n < 0)
795 + return n;
796 +
797 if (access_ok(VERIFY_WRITE, to, n))
798 n = __copy_to_user(to, from, n);
799 return n;
800 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
801 index 0e62770..e2c2cd6 100644
802 --- a/arch/arm/kernel/armksyms.c
803 +++ b/arch/arm/kernel/armksyms.c
804 @@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
805 #ifdef CONFIG_MMU
806 EXPORT_SYMBOL(copy_page);
807
808 -EXPORT_SYMBOL(__copy_from_user);
809 -EXPORT_SYMBOL(__copy_to_user);
810 +EXPORT_SYMBOL(___copy_from_user);
811 +EXPORT_SYMBOL(___copy_to_user);
812 EXPORT_SYMBOL(__clear_user);
813
814 EXPORT_SYMBOL(__get_user_1);
815 diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
816 index ba8ccfe..2dc34dc 100644
817 --- a/arch/arm/kernel/kgdb.c
818 +++ b/arch/arm/kernel/kgdb.c
819 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
820 * and we handle the normal undef case within the do_undefinstr
821 * handler.
822 */
823 -struct kgdb_arch arch_kgdb_ops = {
824 +const struct kgdb_arch arch_kgdb_ops = {
825 #ifndef __ARMEB__
826 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
827 #else /* ! __ARMEB__ */
828 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
829 index 3f361a7..6e806e1 100644
830 --- a/arch/arm/kernel/traps.c
831 +++ b/arch/arm/kernel/traps.c
832 @@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
833
834 DEFINE_SPINLOCK(die_lock);
835
836 +extern void gr_handle_kernel_exploit(void);
837 +
838 /*
839 * This function is protected against re-entrancy.
840 */
841 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
842 if (panic_on_oops)
843 panic("Fatal exception");
844
845 + gr_handle_kernel_exploit();
846 +
847 do_exit(SIGSEGV);
848 }
849
850 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
851 index e4fe124..0fc246b 100644
852 --- a/arch/arm/lib/copy_from_user.S
853 +++ b/arch/arm/lib/copy_from_user.S
854 @@ -16,7 +16,7 @@
855 /*
856 * Prototype:
857 *
858 - * size_t __copy_from_user(void *to, const void *from, size_t n)
859 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
860 *
861 * Purpose:
862 *
863 @@ -84,11 +84,11 @@
864
865 .text
866
867 -ENTRY(__copy_from_user)
868 +ENTRY(___copy_from_user)
869
870 #include "copy_template.S"
871
872 -ENDPROC(__copy_from_user)
873 +ENDPROC(___copy_from_user)
874
875 .section .fixup,"ax"
876 .align 0
877 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
878 index 1a71e15..ac7b258 100644
879 --- a/arch/arm/lib/copy_to_user.S
880 +++ b/arch/arm/lib/copy_to_user.S
881 @@ -16,7 +16,7 @@
882 /*
883 * Prototype:
884 *
885 - * size_t __copy_to_user(void *to, const void *from, size_t n)
886 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
887 *
888 * Purpose:
889 *
890 @@ -88,11 +88,11 @@
891 .text
892
893 ENTRY(__copy_to_user_std)
894 -WEAK(__copy_to_user)
895 +WEAK(___copy_to_user)
896
897 #include "copy_template.S"
898
899 -ENDPROC(__copy_to_user)
900 +ENDPROC(___copy_to_user)
901
902 .section .fixup,"ax"
903 .align 0
904 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
905 index ffdd274..91017b6 100644
906 --- a/arch/arm/lib/uaccess.S
907 +++ b/arch/arm/lib/uaccess.S
908 @@ -19,7 +19,7 @@
909
910 #define PAGE_SHIFT 12
911
912 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
913 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
914 * Purpose : copy a block to user memory from kernel memory
915 * Params : to - user memory
916 * : from - kernel memory
917 @@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
918 sub r2, r2, ip
919 b .Lc2u_dest_aligned
920
921 -ENTRY(__copy_to_user)
922 +ENTRY(___copy_to_user)
923 stmfd sp!, {r2, r4 - r7, lr}
924 cmp r2, #4
925 blt .Lc2u_not_enough
926 @@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
927 ldrgtb r3, [r1], #0
928 USER( strgtbt r3, [r0], #1) @ May fault
929 b .Lc2u_finished
930 -ENDPROC(__copy_to_user)
931 +ENDPROC(___copy_to_user)
932
933 .section .fixup,"ax"
934 .align 0
935 9001: ldmfd sp!, {r0, r4 - r7, pc}
936 .previous
937
938 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
939 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
940 * Purpose : copy a block from user memory to kernel memory
941 * Params : to - kernel memory
942 * : from - user memory
943 @@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
944 sub r2, r2, ip
945 b .Lcfu_dest_aligned
946
947 -ENTRY(__copy_from_user)
948 +ENTRY(___copy_from_user)
949 stmfd sp!, {r0, r2, r4 - r7, lr}
950 cmp r2, #4
951 blt .Lcfu_not_enough
952 @@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
953 USER( ldrgtbt r3, [r1], #1) @ May fault
954 strgtb r3, [r0], #1
955 b .Lcfu_finished
956 -ENDPROC(__copy_from_user)
957 +ENDPROC(___copy_from_user)
958
959 .section .fixup,"ax"
960 .align 0
961 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
962 index 6b967ff..67d5b2b 100644
963 --- a/arch/arm/lib/uaccess_with_memcpy.c
964 +++ b/arch/arm/lib/uaccess_with_memcpy.c
965 @@ -97,7 +97,7 @@ out:
966 }
967
968 unsigned long
969 -__copy_to_user(void __user *to, const void *from, unsigned long n)
970 +___copy_to_user(void __user *to, const void *from, unsigned long n)
971 {
972 /*
973 * This test is stubbed out of the main function above to keep
974 diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
975 index 4028724..beec230 100644
976 --- a/arch/arm/mach-at91/pm.c
977 +++ b/arch/arm/mach-at91/pm.c
978 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
979 }
980
981
982 -static struct platform_suspend_ops at91_pm_ops ={
983 +static const struct platform_suspend_ops at91_pm_ops ={
984 .valid = at91_pm_valid_state,
985 .begin = at91_pm_begin,
986 .enter = at91_pm_enter,
987 diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
988 index 5218943..0a34552 100644
989 --- a/arch/arm/mach-omap1/pm.c
990 +++ b/arch/arm/mach-omap1/pm.c
991 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
992
993
994
995 -static struct platform_suspend_ops omap_pm_ops ={
996 +static const struct platform_suspend_ops omap_pm_ops ={
997 .prepare = omap_pm_prepare,
998 .enter = omap_pm_enter,
999 .finish = omap_pm_finish,
1000 diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
1001 index bff5c4e..d4c649b 100644
1002 --- a/arch/arm/mach-omap2/pm24xx.c
1003 +++ b/arch/arm/mach-omap2/pm24xx.c
1004 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
1005 enable_hlt();
1006 }
1007
1008 -static struct platform_suspend_ops omap_pm_ops = {
1009 +static const struct platform_suspend_ops omap_pm_ops = {
1010 .prepare = omap2_pm_prepare,
1011 .enter = omap2_pm_enter,
1012 .finish = omap2_pm_finish,
1013 diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
1014 index 8946319..7d3e661 100644
1015 --- a/arch/arm/mach-omap2/pm34xx.c
1016 +++ b/arch/arm/mach-omap2/pm34xx.c
1017 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
1018 return;
1019 }
1020
1021 -static struct platform_suspend_ops omap_pm_ops = {
1022 +static const struct platform_suspend_ops omap_pm_ops = {
1023 .begin = omap3_pm_begin,
1024 .end = omap3_pm_end,
1025 .prepare = omap3_pm_prepare,
1026 diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
1027 index b3d8d53..6e68ebc 100644
1028 --- a/arch/arm/mach-pnx4008/pm.c
1029 +++ b/arch/arm/mach-pnx4008/pm.c
1030 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
1031 (state == PM_SUSPEND_MEM);
1032 }
1033
1034 -static struct platform_suspend_ops pnx4008_pm_ops = {
1035 +static const struct platform_suspend_ops pnx4008_pm_ops = {
1036 .enter = pnx4008_pm_enter,
1037 .valid = pnx4008_pm_valid,
1038 };
1039 diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
1040 index 7693355..9beb00a 100644
1041 --- a/arch/arm/mach-pxa/pm.c
1042 +++ b/arch/arm/mach-pxa/pm.c
1043 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
1044 pxa_cpu_pm_fns->finish();
1045 }
1046
1047 -static struct platform_suspend_ops pxa_pm_ops = {
1048 +static const struct platform_suspend_ops pxa_pm_ops = {
1049 .valid = pxa_pm_valid,
1050 .enter = pxa_pm_enter,
1051 .prepare = pxa_pm_prepare,
1052 diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
1053 index 629e05d..06be589 100644
1054 --- a/arch/arm/mach-pxa/sharpsl_pm.c
1055 +++ b/arch/arm/mach-pxa/sharpsl_pm.c
1056 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
1057 }
1058
1059 #ifdef CONFIG_PM
1060 -static struct platform_suspend_ops sharpsl_pm_ops = {
1061 +static const struct platform_suspend_ops sharpsl_pm_ops = {
1062 .prepare = pxa_pm_prepare,
1063 .finish = pxa_pm_finish,
1064 .enter = corgi_pxa_pm_enter,
1065 diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1066 index c83fdc8..ab9fc44 100644
1067 --- a/arch/arm/mach-sa1100/pm.c
1068 +++ b/arch/arm/mach-sa1100/pm.c
1069 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1070 return virt_to_phys(sp);
1071 }
1072
1073 -static struct platform_suspend_ops sa11x0_pm_ops = {
1074 +static const struct platform_suspend_ops sa11x0_pm_ops = {
1075 .enter = sa11x0_pm_enter,
1076 .valid = suspend_valid_only_mem,
1077 };
1078 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1079 index 3191cd6..c0739db 100644
1080 --- a/arch/arm/mm/fault.c
1081 +++ b/arch/arm/mm/fault.c
1082 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1083 }
1084 #endif
1085
1086 +#ifdef CONFIG_PAX_PAGEEXEC
1087 + if (fsr & FSR_LNX_PF) {
1088 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1089 + do_group_exit(SIGKILL);
1090 + }
1091 +#endif
1092 +
1093 tsk->thread.address = addr;
1094 tsk->thread.error_code = fsr;
1095 tsk->thread.trap_no = 14;
1096 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1097 }
1098 #endif /* CONFIG_MMU */
1099
1100 +#ifdef CONFIG_PAX_PAGEEXEC
1101 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1102 +{
1103 + long i;
1104 +
1105 + printk(KERN_ERR "PAX: bytes at PC: ");
1106 + for (i = 0; i < 20; i++) {
1107 + unsigned char c;
1108 + if (get_user(c, (__force unsigned char __user *)pc+i))
1109 + printk(KERN_CONT "?? ");
1110 + else
1111 + printk(KERN_CONT "%02x ", c);
1112 + }
1113 + printk("\n");
1114 +
1115 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1116 + for (i = -1; i < 20; i++) {
1117 + unsigned long c;
1118 + if (get_user(c, (__force unsigned long __user *)sp+i))
1119 + printk(KERN_CONT "???????? ");
1120 + else
1121 + printk(KERN_CONT "%08lx ", c);
1122 + }
1123 + printk("\n");
1124 +}
1125 +#endif
1126 +
1127 /*
1128 * First Level Translation Fault Handler
1129 *
1130 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1131 index f5abc51..7ec524c 100644
1132 --- a/arch/arm/mm/mmap.c
1133 +++ b/arch/arm/mm/mmap.c
1134 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1135 if (len > TASK_SIZE)
1136 return -ENOMEM;
1137
1138 +#ifdef CONFIG_PAX_RANDMMAP
1139 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1140 +#endif
1141 +
1142 if (addr) {
1143 if (do_align)
1144 addr = COLOUR_ALIGN(addr, pgoff);
1145 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1146 addr = PAGE_ALIGN(addr);
1147
1148 vma = find_vma(mm, addr);
1149 - if (TASK_SIZE - len >= addr &&
1150 - (!vma || addr + len <= vma->vm_start))
1151 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1152 return addr;
1153 }
1154 if (len > mm->cached_hole_size) {
1155 - start_addr = addr = mm->free_area_cache;
1156 + start_addr = addr = mm->free_area_cache;
1157 } else {
1158 - start_addr = addr = TASK_UNMAPPED_BASE;
1159 - mm->cached_hole_size = 0;
1160 + start_addr = addr = mm->mmap_base;
1161 + mm->cached_hole_size = 0;
1162 }
1163
1164 full_search:
1165 @@ -94,14 +97,14 @@ full_search:
1166 * Start a new search - just in case we missed
1167 * some holes.
1168 */
1169 - if (start_addr != TASK_UNMAPPED_BASE) {
1170 - start_addr = addr = TASK_UNMAPPED_BASE;
1171 + if (start_addr != mm->mmap_base) {
1172 + start_addr = addr = mm->mmap_base;
1173 mm->cached_hole_size = 0;
1174 goto full_search;
1175 }
1176 return -ENOMEM;
1177 }
1178 - if (!vma || addr + len <= vma->vm_start) {
1179 + if (check_heap_stack_gap(vma, addr, len)) {
1180 /*
1181 * Remember the place where we stopped the search:
1182 */
1183 diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1184 index 8d97db2..b66cfa5 100644
1185 --- a/arch/arm/plat-s3c/pm.c
1186 +++ b/arch/arm/plat-s3c/pm.c
1187 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1188 s3c_pm_check_cleanup();
1189 }
1190
1191 -static struct platform_suspend_ops s3c_pm_ops = {
1192 +static const struct platform_suspend_ops s3c_pm_ops = {
1193 .enter = s3c_pm_enter,
1194 .prepare = s3c_pm_prepare,
1195 .finish = s3c_pm_finish,
1196 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1197 index d5d1d41..856e2ed 100644
1198 --- a/arch/avr32/include/asm/elf.h
1199 +++ b/arch/avr32/include/asm/elf.h
1200 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1201 the loader. We need to make sure that it is out of the way of the program
1202 that it will "exec", and that there is sufficient room for the brk. */
1203
1204 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1205 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1206
1207 +#ifdef CONFIG_PAX_ASLR
1208 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1209 +
1210 +#define PAX_DELTA_MMAP_LEN 15
1211 +#define PAX_DELTA_STACK_LEN 15
1212 +#endif
1213
1214 /* This yields a mask that user programs can use to figure out what
1215 instruction set this CPU supports. This could be done in user space,
1216 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1217 index b7f5c68..556135c 100644
1218 --- a/arch/avr32/include/asm/kmap_types.h
1219 +++ b/arch/avr32/include/asm/kmap_types.h
1220 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1221 D(11) KM_IRQ1,
1222 D(12) KM_SOFTIRQ0,
1223 D(13) KM_SOFTIRQ1,
1224 -D(14) KM_TYPE_NR
1225 +D(14) KM_CLEARPAGE,
1226 +D(15) KM_TYPE_NR
1227 };
1228
1229 #undef D
1230 diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1231 index f021edf..32d680e 100644
1232 --- a/arch/avr32/mach-at32ap/pm.c
1233 +++ b/arch/avr32/mach-at32ap/pm.c
1234 @@ -176,7 +176,7 @@ out:
1235 return 0;
1236 }
1237
1238 -static struct platform_suspend_ops avr32_pm_ops = {
1239 +static const struct platform_suspend_ops avr32_pm_ops = {
1240 .valid = avr32_pm_valid_state,
1241 .enter = avr32_pm_enter,
1242 };
1243 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1244 index b61d86d..e292c7f 100644
1245 --- a/arch/avr32/mm/fault.c
1246 +++ b/arch/avr32/mm/fault.c
1247 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1248
1249 int exception_trace = 1;
1250
1251 +#ifdef CONFIG_PAX_PAGEEXEC
1252 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1253 +{
1254 + unsigned long i;
1255 +
1256 + printk(KERN_ERR "PAX: bytes at PC: ");
1257 + for (i = 0; i < 20; i++) {
1258 + unsigned char c;
1259 + if (get_user(c, (unsigned char *)pc+i))
1260 + printk(KERN_CONT "???????? ");
1261 + else
1262 + printk(KERN_CONT "%02x ", c);
1263 + }
1264 + printk("\n");
1265 +}
1266 +#endif
1267 +
1268 /*
1269 * This routine handles page faults. It determines the address and the
1270 * problem, and then passes it off to one of the appropriate routines.
1271 @@ -157,6 +174,16 @@ bad_area:
1272 up_read(&mm->mmap_sem);
1273
1274 if (user_mode(regs)) {
1275 +
1276 +#ifdef CONFIG_PAX_PAGEEXEC
1277 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1278 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1279 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1280 + do_group_exit(SIGKILL);
1281 + }
1282 + }
1283 +#endif
1284 +
1285 if (exception_trace && printk_ratelimit())
1286 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1287 "sp %08lx ecr %lu\n",
1288 diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1289 index cce79d0..c406c85 100644
1290 --- a/arch/blackfin/kernel/kgdb.c
1291 +++ b/arch/blackfin/kernel/kgdb.c
1292 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1293 return -1; /* this means that we do not want to exit from the handler */
1294 }
1295
1296 -struct kgdb_arch arch_kgdb_ops = {
1297 +const struct kgdb_arch arch_kgdb_ops = {
1298 .gdb_bpt_instr = {0xa1},
1299 #ifdef CONFIG_SMP
1300 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1301 diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1302 index 8837be4..b2fb413 100644
1303 --- a/arch/blackfin/mach-common/pm.c
1304 +++ b/arch/blackfin/mach-common/pm.c
1305 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1306 return 0;
1307 }
1308
1309 -struct platform_suspend_ops bfin_pm_ops = {
1310 +const struct platform_suspend_ops bfin_pm_ops = {
1311 .enter = bfin_pm_enter,
1312 .valid = bfin_pm_valid,
1313 };
1314 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
1315 index 00a57af..c3ef0cd 100644
1316 --- a/arch/frv/include/asm/atomic.h
1317 +++ b/arch/frv/include/asm/atomic.h
1318 @@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
1319 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
1320 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
1321
1322 +#define atomic64_read_unchecked(v) atomic64_read(v)
1323 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1324 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1325 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1326 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1327 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1328 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1329 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1330 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1331 +
1332 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
1333 {
1334 int c, old;
1335 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1336 index f8e16b2..c73ff79 100644
1337 --- a/arch/frv/include/asm/kmap_types.h
1338 +++ b/arch/frv/include/asm/kmap_types.h
1339 @@ -23,6 +23,7 @@ enum km_type {
1340 KM_IRQ1,
1341 KM_SOFTIRQ0,
1342 KM_SOFTIRQ1,
1343 + KM_CLEARPAGE,
1344 KM_TYPE_NR
1345 };
1346
1347 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1348 index 385fd30..6c3d97e 100644
1349 --- a/arch/frv/mm/elf-fdpic.c
1350 +++ b/arch/frv/mm/elf-fdpic.c
1351 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1352 if (addr) {
1353 addr = PAGE_ALIGN(addr);
1354 vma = find_vma(current->mm, addr);
1355 - if (TASK_SIZE - len >= addr &&
1356 - (!vma || addr + len <= vma->vm_start))
1357 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1358 goto success;
1359 }
1360
1361 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1362 for (; vma; vma = vma->vm_next) {
1363 if (addr > limit)
1364 break;
1365 - if (addr + len <= vma->vm_start)
1366 + if (check_heap_stack_gap(vma, addr, len))
1367 goto success;
1368 addr = vma->vm_end;
1369 }
1370 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1371 for (; vma; vma = vma->vm_next) {
1372 if (addr > limit)
1373 break;
1374 - if (addr + len <= vma->vm_start)
1375 + if (check_heap_stack_gap(vma, addr, len))
1376 goto success;
1377 addr = vma->vm_end;
1378 }
1379 diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1380 index e4a80d8..11a7ea1 100644
1381 --- a/arch/ia64/hp/common/hwsw_iommu.c
1382 +++ b/arch/ia64/hp/common/hwsw_iommu.c
1383 @@ -17,7 +17,7 @@
1384 #include <linux/swiotlb.h>
1385 #include <asm/machvec.h>
1386
1387 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1388 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1389
1390 /* swiotlb declarations & definitions: */
1391 extern int swiotlb_late_init_with_default_size (size_t size);
1392 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1393 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1394 }
1395
1396 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1397 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1398 {
1399 if (use_swiotlb(dev))
1400 return &swiotlb_dma_ops;
1401 diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1402 index 01ae69b..35752fd 100644
1403 --- a/arch/ia64/hp/common/sba_iommu.c
1404 +++ b/arch/ia64/hp/common/sba_iommu.c
1405 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
1406 },
1407 };
1408
1409 -extern struct dma_map_ops swiotlb_dma_ops;
1410 +extern const struct dma_map_ops swiotlb_dma_ops;
1411
1412 static int __init
1413 sba_init(void)
1414 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
1415
1416 __setup("sbapagesize=",sba_page_override);
1417
1418 -struct dma_map_ops sba_dma_ops = {
1419 +const struct dma_map_ops sba_dma_ops = {
1420 .alloc_coherent = sba_alloc_coherent,
1421 .free_coherent = sba_free_coherent,
1422 .map_page = sba_map_page,
1423 diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
1424 index c69552b..c7122f4 100644
1425 --- a/arch/ia64/ia32/binfmt_elf32.c
1426 +++ b/arch/ia64/ia32/binfmt_elf32.c
1427 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
1428
1429 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
1430
1431 +#ifdef CONFIG_PAX_ASLR
1432 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1433 +
1434 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1435 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1436 +#endif
1437 +
1438 /* Ugly but avoids duplication */
1439 #include "../../../fs/binfmt_elf.c"
1440
1441 diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
1442 index 0f15349..26b3429 100644
1443 --- a/arch/ia64/ia32/ia32priv.h
1444 +++ b/arch/ia64/ia32/ia32priv.h
1445 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
1446 #define ELF_DATA ELFDATA2LSB
1447 #define ELF_ARCH EM_386
1448
1449 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
1450 +#ifdef CONFIG_PAX_RANDUSTACK
1451 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
1452 +#else
1453 +#define __IA32_DELTA_STACK 0UL
1454 +#endif
1455 +
1456 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
1457 +
1458 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
1459 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
1460
1461 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
1462 index 88405cb..de5ca5d 100644
1463 --- a/arch/ia64/include/asm/atomic.h
1464 +++ b/arch/ia64/include/asm/atomic.h
1465 @@ -210,6 +210,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
1466 #define atomic64_inc(v) atomic64_add(1, (v))
1467 #define atomic64_dec(v) atomic64_sub(1, (v))
1468
1469 +#define atomic64_read_unchecked(v) atomic64_read(v)
1470 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1471 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1472 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1473 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1474 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1475 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1476 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1477 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1478 +
1479 /* Atomic operations are already serializing */
1480 #define smp_mb__before_atomic_dec() barrier()
1481 #define smp_mb__after_atomic_dec() barrier()
1482 diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
1483 index 8d3c79c..71b3af6 100644
1484 --- a/arch/ia64/include/asm/dma-mapping.h
1485 +++ b/arch/ia64/include/asm/dma-mapping.h
1486 @@ -12,7 +12,7 @@
1487
1488 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1489
1490 -extern struct dma_map_ops *dma_ops;
1491 +extern const struct dma_map_ops *dma_ops;
1492 extern struct ia64_machine_vector ia64_mv;
1493 extern void set_iommu_machvec(void);
1494
1495 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
1496 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1497 dma_addr_t *daddr, gfp_t gfp)
1498 {
1499 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1500 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1501 void *caddr;
1502
1503 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1504 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1505 static inline void dma_free_coherent(struct device *dev, size_t size,
1506 void *caddr, dma_addr_t daddr)
1507 {
1508 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1509 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1510 debug_dma_free_coherent(dev, size, caddr, daddr);
1511 ops->free_coherent(dev, size, caddr, daddr);
1512 }
1513 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
1514
1515 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1516 {
1517 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1518 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1519 return ops->mapping_error(dev, daddr);
1520 }
1521
1522 static inline int dma_supported(struct device *dev, u64 mask)
1523 {
1524 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1525 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1526 return ops->dma_supported(dev, mask);
1527 }
1528
1529 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1530 index 86eddee..b116bb4 100644
1531 --- a/arch/ia64/include/asm/elf.h
1532 +++ b/arch/ia64/include/asm/elf.h
1533 @@ -43,6 +43,13 @@
1534 */
1535 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1536
1537 +#ifdef CONFIG_PAX_ASLR
1538 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1539 +
1540 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1541 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1542 +#endif
1543 +
1544 #define PT_IA_64_UNWIND 0x70000001
1545
1546 /* IA-64 relocations: */
1547 diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
1548 index 367d299..9ad4279 100644
1549 --- a/arch/ia64/include/asm/machvec.h
1550 +++ b/arch/ia64/include/asm/machvec.h
1551 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
1552 /* DMA-mapping interface: */
1553 typedef void ia64_mv_dma_init (void);
1554 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1555 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1556 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1557
1558 /*
1559 * WARNING: The legacy I/O space is _architected_. Platforms are
1560 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
1561 # endif /* CONFIG_IA64_GENERIC */
1562
1563 extern void swiotlb_dma_init(void);
1564 -extern struct dma_map_ops *dma_get_ops(struct device *);
1565 +extern const struct dma_map_ops *dma_get_ops(struct device *);
1566
1567 /*
1568 * Define default versions so we can extend machvec for new platforms without having
1569 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1570 index 8840a69..cdb63d9 100644
1571 --- a/arch/ia64/include/asm/pgtable.h
1572 +++ b/arch/ia64/include/asm/pgtable.h
1573 @@ -12,7 +12,7 @@
1574 * David Mosberger-Tang <davidm@hpl.hp.com>
1575 */
1576
1577 -
1578 +#include <linux/const.h>
1579 #include <asm/mman.h>
1580 #include <asm/page.h>
1581 #include <asm/processor.h>
1582 @@ -143,6 +143,17 @@
1583 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1584 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1585 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1586 +
1587 +#ifdef CONFIG_PAX_PAGEEXEC
1588 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1589 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1590 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1591 +#else
1592 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1593 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1594 +# define PAGE_COPY_NOEXEC PAGE_COPY
1595 +#endif
1596 +
1597 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1598 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1599 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1600 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1601 index 239ecdc..f94170e 100644
1602 --- a/arch/ia64/include/asm/spinlock.h
1603 +++ b/arch/ia64/include/asm/spinlock.h
1604 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
1605 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1606
1607 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1608 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1609 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1610 }
1611
1612 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1613 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1614 index 449c8c0..432a3d2 100644
1615 --- a/arch/ia64/include/asm/uaccess.h
1616 +++ b/arch/ia64/include/asm/uaccess.h
1617 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1618 const void *__cu_from = (from); \
1619 long __cu_len = (n); \
1620 \
1621 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1622 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1623 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1624 __cu_len; \
1625 })
1626 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1627 long __cu_len = (n); \
1628 \
1629 __chk_user_ptr(__cu_from); \
1630 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1631 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1632 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1633 __cu_len; \
1634 })
1635 diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
1636 index f2c1600..969398a 100644
1637 --- a/arch/ia64/kernel/dma-mapping.c
1638 +++ b/arch/ia64/kernel/dma-mapping.c
1639 @@ -3,7 +3,7 @@
1640 /* Set this to 1 if there is a HW IOMMU in the system */
1641 int iommu_detected __read_mostly;
1642
1643 -struct dma_map_ops *dma_ops;
1644 +const struct dma_map_ops *dma_ops;
1645 EXPORT_SYMBOL(dma_ops);
1646
1647 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1648 @@ -16,7 +16,7 @@ static int __init dma_init(void)
1649 }
1650 fs_initcall(dma_init);
1651
1652 -struct dma_map_ops *dma_get_ops(struct device *dev)
1653 +const struct dma_map_ops *dma_get_ops(struct device *dev)
1654 {
1655 return dma_ops;
1656 }
1657 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1658 index 1481b0a..e7d38ff 100644
1659 --- a/arch/ia64/kernel/module.c
1660 +++ b/arch/ia64/kernel/module.c
1661 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1662 void
1663 module_free (struct module *mod, void *module_region)
1664 {
1665 - if (mod && mod->arch.init_unw_table &&
1666 - module_region == mod->module_init) {
1667 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1668 unw_remove_unwind_table(mod->arch.init_unw_table);
1669 mod->arch.init_unw_table = NULL;
1670 }
1671 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1672 }
1673
1674 static inline int
1675 +in_init_rx (const struct module *mod, uint64_t addr)
1676 +{
1677 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1678 +}
1679 +
1680 +static inline int
1681 +in_init_rw (const struct module *mod, uint64_t addr)
1682 +{
1683 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1684 +}
1685 +
1686 +static inline int
1687 in_init (const struct module *mod, uint64_t addr)
1688 {
1689 - return addr - (uint64_t) mod->module_init < mod->init_size;
1690 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1691 +}
1692 +
1693 +static inline int
1694 +in_core_rx (const struct module *mod, uint64_t addr)
1695 +{
1696 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1697 +}
1698 +
1699 +static inline int
1700 +in_core_rw (const struct module *mod, uint64_t addr)
1701 +{
1702 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1703 }
1704
1705 static inline int
1706 in_core (const struct module *mod, uint64_t addr)
1707 {
1708 - return addr - (uint64_t) mod->module_core < mod->core_size;
1709 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1710 }
1711
1712 static inline int
1713 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1714 break;
1715
1716 case RV_BDREL:
1717 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1718 + if (in_init_rx(mod, val))
1719 + val -= (uint64_t) mod->module_init_rx;
1720 + else if (in_init_rw(mod, val))
1721 + val -= (uint64_t) mod->module_init_rw;
1722 + else if (in_core_rx(mod, val))
1723 + val -= (uint64_t) mod->module_core_rx;
1724 + else if (in_core_rw(mod, val))
1725 + val -= (uint64_t) mod->module_core_rw;
1726 break;
1727
1728 case RV_LTV:
1729 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1730 * addresses have been selected...
1731 */
1732 uint64_t gp;
1733 - if (mod->core_size > MAX_LTOFF)
1734 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1735 /*
1736 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1737 * at the end of the module.
1738 */
1739 - gp = mod->core_size - MAX_LTOFF / 2;
1740 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1741 else
1742 - gp = mod->core_size / 2;
1743 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1744 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1745 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1746 mod->arch.gp = gp;
1747 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1748 }
1749 diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
1750 index f6b1ff0..de773fb 100644
1751 --- a/arch/ia64/kernel/pci-dma.c
1752 +++ b/arch/ia64/kernel/pci-dma.c
1753 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1754 .dma_mask = &fallback_dev.coherent_dma_mask,
1755 };
1756
1757 -extern struct dma_map_ops intel_dma_ops;
1758 +extern const struct dma_map_ops intel_dma_ops;
1759
1760 static int __init pci_iommu_init(void)
1761 {
1762 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
1763 }
1764 EXPORT_SYMBOL(iommu_dma_supported);
1765
1766 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1767 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1768 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1769 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1770 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1771 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1772 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1773 +
1774 +static const struct dma_map_ops intel_iommu_dma_ops = {
1775 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1776 + .alloc_coherent = intel_alloc_coherent,
1777 + .free_coherent = intel_free_coherent,
1778 + .map_sg = intel_map_sg,
1779 + .unmap_sg = intel_unmap_sg,
1780 + .map_page = intel_map_page,
1781 + .unmap_page = intel_unmap_page,
1782 + .mapping_error = intel_mapping_error,
1783 +
1784 + .sync_single_for_cpu = machvec_dma_sync_single,
1785 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1786 + .sync_single_for_device = machvec_dma_sync_single,
1787 + .sync_sg_for_device = machvec_dma_sync_sg,
1788 + .dma_supported = iommu_dma_supported,
1789 +};
1790 +
1791 void __init pci_iommu_alloc(void)
1792 {
1793 - dma_ops = &intel_dma_ops;
1794 -
1795 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1796 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1797 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1798 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1799 - dma_ops->dma_supported = iommu_dma_supported;
1800 + dma_ops = &intel_iommu_dma_ops;
1801
1802 /*
1803 * The order of these functions is important for
1804 diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
1805 index 285aae8..61dbab6 100644
1806 --- a/arch/ia64/kernel/pci-swiotlb.c
1807 +++ b/arch/ia64/kernel/pci-swiotlb.c
1808 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
1809 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1810 }
1811
1812 -struct dma_map_ops swiotlb_dma_ops = {
1813 +const struct dma_map_ops swiotlb_dma_ops = {
1814 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1815 .free_coherent = swiotlb_free_coherent,
1816 .map_page = swiotlb_map_page,
1817 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1818 index 609d500..7dde2a8 100644
1819 --- a/arch/ia64/kernel/sys_ia64.c
1820 +++ b/arch/ia64/kernel/sys_ia64.c
1821 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1822 if (REGION_NUMBER(addr) == RGN_HPAGE)
1823 addr = 0;
1824 #endif
1825 +
1826 +#ifdef CONFIG_PAX_RANDMMAP
1827 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1828 + addr = mm->free_area_cache;
1829 + else
1830 +#endif
1831 +
1832 if (!addr)
1833 addr = mm->free_area_cache;
1834
1835 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1836 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1837 /* At this point: (!vma || addr < vma->vm_end). */
1838 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1839 - if (start_addr != TASK_UNMAPPED_BASE) {
1840 + if (start_addr != mm->mmap_base) {
1841 /* Start a new search --- just in case we missed some holes. */
1842 - addr = TASK_UNMAPPED_BASE;
1843 + addr = mm->mmap_base;
1844 goto full_search;
1845 }
1846 return -ENOMEM;
1847 }
1848 - if (!vma || addr + len <= vma->vm_start) {
1849 + if (check_heap_stack_gap(vma, addr, len)) {
1850 /* Remember the address where we stopped this search: */
1851 mm->free_area_cache = addr + len;
1852 return addr;
1853 diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
1854 index 8f06035..b3a5818 100644
1855 --- a/arch/ia64/kernel/topology.c
1856 +++ b/arch/ia64/kernel/topology.c
1857 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
1858 return ret;
1859 }
1860
1861 -static struct sysfs_ops cache_sysfs_ops = {
1862 +static const struct sysfs_ops cache_sysfs_ops = {
1863 .show = cache_show
1864 };
1865
1866 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1867 index 0a0c77b..8e55a81 100644
1868 --- a/arch/ia64/kernel/vmlinux.lds.S
1869 +++ b/arch/ia64/kernel/vmlinux.lds.S
1870 @@ -190,7 +190,7 @@ SECTIONS
1871 /* Per-cpu data: */
1872 . = ALIGN(PERCPU_PAGE_SIZE);
1873 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1874 - __phys_per_cpu_start = __per_cpu_load;
1875 + __phys_per_cpu_start = per_cpu_load;
1876 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1877 * into percpu page size
1878 */
1879 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1880 index 19261a9..1611b7a 100644
1881 --- a/arch/ia64/mm/fault.c
1882 +++ b/arch/ia64/mm/fault.c
1883 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
1884 return pte_present(pte);
1885 }
1886
1887 +#ifdef CONFIG_PAX_PAGEEXEC
1888 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1889 +{
1890 + unsigned long i;
1891 +
1892 + printk(KERN_ERR "PAX: bytes at PC: ");
1893 + for (i = 0; i < 8; i++) {
1894 + unsigned int c;
1895 + if (get_user(c, (unsigned int *)pc+i))
1896 + printk(KERN_CONT "???????? ");
1897 + else
1898 + printk(KERN_CONT "%08x ", c);
1899 + }
1900 + printk("\n");
1901 +}
1902 +#endif
1903 +
1904 void __kprobes
1905 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1906 {
1907 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1908 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1909 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1910
1911 - if ((vma->vm_flags & mask) != mask)
1912 + if ((vma->vm_flags & mask) != mask) {
1913 +
1914 +#ifdef CONFIG_PAX_PAGEEXEC
1915 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1916 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1917 + goto bad_area;
1918 +
1919 + up_read(&mm->mmap_sem);
1920 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1921 + do_group_exit(SIGKILL);
1922 + }
1923 +#endif
1924 +
1925 goto bad_area;
1926
1927 + }
1928 +
1929 survive:
1930 /*
1931 * If for any reason at all we couldn't handle the fault, make
1932 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1933 index b0f6157..a082bbc 100644
1934 --- a/arch/ia64/mm/hugetlbpage.c
1935 +++ b/arch/ia64/mm/hugetlbpage.c
1936 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1937 /* At this point: (!vmm || addr < vmm->vm_end). */
1938 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1939 return -ENOMEM;
1940 - if (!vmm || (addr + len) <= vmm->vm_start)
1941 + if (check_heap_stack_gap(vmm, addr, len))
1942 return addr;
1943 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1944 }
1945 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1946 index 1857766..05cc6a3 100644
1947 --- a/arch/ia64/mm/init.c
1948 +++ b/arch/ia64/mm/init.c
1949 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1950 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1951 vma->vm_end = vma->vm_start + PAGE_SIZE;
1952 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1953 +
1954 +#ifdef CONFIG_PAX_PAGEEXEC
1955 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1956 + vma->vm_flags &= ~VM_EXEC;
1957 +
1958 +#ifdef CONFIG_PAX_MPROTECT
1959 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1960 + vma->vm_flags &= ~VM_MAYEXEC;
1961 +#endif
1962 +
1963 + }
1964 +#endif
1965 +
1966 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1967 down_write(&current->mm->mmap_sem);
1968 if (insert_vm_struct(current->mm, vma)) {
1969 diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
1970 index 98b6849..8046766 100644
1971 --- a/arch/ia64/sn/pci/pci_dma.c
1972 +++ b/arch/ia64/sn/pci/pci_dma.c
1973 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
1974 return ret;
1975 }
1976
1977 -static struct dma_map_ops sn_dma_ops = {
1978 +static const struct dma_map_ops sn_dma_ops = {
1979 .alloc_coherent = sn_dma_alloc_coherent,
1980 .free_coherent = sn_dma_free_coherent,
1981 .map_page = sn_dma_map_page,
1982 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1983 index 82abd15..d95ae5d 100644
1984 --- a/arch/m32r/lib/usercopy.c
1985 +++ b/arch/m32r/lib/usercopy.c
1986 @@ -14,6 +14,9 @@
1987 unsigned long
1988 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1989 {
1990 + if ((long)n < 0)
1991 + return n;
1992 +
1993 prefetch(from);
1994 if (access_ok(VERIFY_WRITE, to, n))
1995 __copy_user(to,from,n);
1996 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1997 unsigned long
1998 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1999 {
2000 + if ((long)n < 0)
2001 + return n;
2002 +
2003 prefetchw(to);
2004 if (access_ok(VERIFY_READ, from, n))
2005 __copy_user_zeroing(to,from,n);
2006 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
2007 index fd7620f..63d73a6 100644
2008 --- a/arch/mips/Kconfig
2009 +++ b/arch/mips/Kconfig
2010 @@ -5,6 +5,7 @@ config MIPS
2011 select HAVE_IDE
2012 select HAVE_OPROFILE
2013 select HAVE_ARCH_KGDB
2014 + select GENERIC_ATOMIC64 if !64BIT
2015 # Horrible source of confusion. Die, die, die ...
2016 select EMBEDDED
2017 select RTC_LIB if !LEMOTE_FULOONG2E
2018 diff --git a/arch/mips/Makefile b/arch/mips/Makefile
2019 index 77f5021..2b1db8a 100644
2020 --- a/arch/mips/Makefile
2021 +++ b/arch/mips/Makefile
2022 @@ -51,6 +51,8 @@ endif
2023 cflags-y := -ffunction-sections
2024 cflags-y += $(call cc-option, -mno-check-zero-division)
2025
2026 +cflags-y += -Wno-sign-compare -Wno-extra
2027 +
2028 ifdef CONFIG_32BIT
2029 ld-emul = $(32bit-emul)
2030 vmlinux-32 = vmlinux
2031 diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
2032 index 632f986..fd0378d 100644
2033 --- a/arch/mips/alchemy/devboards/pm.c
2034 +++ b/arch/mips/alchemy/devboards/pm.c
2035 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
2036
2037 }
2038
2039 -static struct platform_suspend_ops db1x_pm_ops = {
2040 +static const struct platform_suspend_ops db1x_pm_ops = {
2041 .valid = suspend_valid_only_mem,
2042 .begin = db1x_pm_begin,
2043 .enter = db1x_pm_enter,
2044 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2045 index 09e7128..111035b 100644
2046 --- a/arch/mips/include/asm/atomic.h
2047 +++ b/arch/mips/include/asm/atomic.h
2048 @@ -21,6 +21,10 @@
2049 #include <asm/war.h>
2050 #include <asm/system.h>
2051
2052 +#ifdef CONFIG_GENERIC_ATOMIC64
2053 +#include <asm-generic/atomic64.h>
2054 +#endif
2055 +
2056 #define ATOMIC_INIT(i) { (i) }
2057
2058 /*
2059 @@ -782,6 +786,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2060 */
2061 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2062
2063 +#define atomic64_read_unchecked(v) atomic64_read(v)
2064 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2065 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2066 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2067 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2068 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2069 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2070 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2071 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2072 +
2073 #endif /* CONFIG_64BIT */
2074
2075 /*
2076 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2077 index 7990694..4e93acf 100644
2078 --- a/arch/mips/include/asm/elf.h
2079 +++ b/arch/mips/include/asm/elf.h
2080 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
2081 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2082 #endif
2083
2084 +#ifdef CONFIG_PAX_ASLR
2085 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2086 +
2087 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2088 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2089 +#endif
2090 +
2091 #endif /* _ASM_ELF_H */
2092 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2093 index f266295..627cfff 100644
2094 --- a/arch/mips/include/asm/page.h
2095 +++ b/arch/mips/include/asm/page.h
2096 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2097 #ifdef CONFIG_CPU_MIPS32
2098 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2099 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2100 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2101 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2102 #else
2103 typedef struct { unsigned long long pte; } pte_t;
2104 #define pte_val(x) ((x).pte)
2105 diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
2106 index e48c0bf..f3acf65 100644
2107 --- a/arch/mips/include/asm/reboot.h
2108 +++ b/arch/mips/include/asm/reboot.h
2109 @@ -9,7 +9,7 @@
2110 #ifndef _ASM_REBOOT_H
2111 #define _ASM_REBOOT_H
2112
2113 -extern void (*_machine_restart)(char *command);
2114 -extern void (*_machine_halt)(void);
2115 +extern void (*__noreturn _machine_restart)(char *command);
2116 +extern void (*__noreturn _machine_halt)(void);
2117
2118 #endif /* _ASM_REBOOT_H */
2119 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2120 index 83b5509..9fa24a23 100644
2121 --- a/arch/mips/include/asm/system.h
2122 +++ b/arch/mips/include/asm/system.h
2123 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2124 */
2125 #define __ARCH_WANT_UNLOCKED_CTXSW
2126
2127 -extern unsigned long arch_align_stack(unsigned long sp);
2128 +#define arch_align_stack(x) ((x) & ~0xfUL)
2129
2130 #endif /* _ASM_SYSTEM_H */
2131 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2132 index 9fdd8bc..fcf9d68 100644
2133 --- a/arch/mips/kernel/binfmt_elfn32.c
2134 +++ b/arch/mips/kernel/binfmt_elfn32.c
2135 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2136 #undef ELF_ET_DYN_BASE
2137 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2138
2139 +#ifdef CONFIG_PAX_ASLR
2140 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2141 +
2142 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2143 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2144 +#endif
2145 +
2146 #include <asm/processor.h>
2147 #include <linux/module.h>
2148 #include <linux/elfcore.h>
2149 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2150 index ff44823..cf0b48a 100644
2151 --- a/arch/mips/kernel/binfmt_elfo32.c
2152 +++ b/arch/mips/kernel/binfmt_elfo32.c
2153 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2154 #undef ELF_ET_DYN_BASE
2155 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2156
2157 +#ifdef CONFIG_PAX_ASLR
2158 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2159 +
2160 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2161 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2162 +#endif
2163 +
2164 #include <asm/processor.h>
2165
2166 /*
2167 diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2168 index 50c9bb8..efdd5f8 100644
2169 --- a/arch/mips/kernel/kgdb.c
2170 +++ b/arch/mips/kernel/kgdb.c
2171 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2172 return -1;
2173 }
2174
2175 +/* cannot be const */
2176 struct kgdb_arch arch_kgdb_ops;
2177
2178 /*
2179 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2180 index f3d73e1..bb3f57a 100644
2181 --- a/arch/mips/kernel/process.c
2182 +++ b/arch/mips/kernel/process.c
2183 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2184 out:
2185 return pc;
2186 }
2187 -
2188 -/*
2189 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2190 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2191 - */
2192 -unsigned long arch_align_stack(unsigned long sp)
2193 -{
2194 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2195 - sp -= get_random_int() & ~PAGE_MASK;
2196 -
2197 - return sp & ALMASK;
2198 -}
2199 diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2200 index 060563a..7fbf310 100644
2201 --- a/arch/mips/kernel/reset.c
2202 +++ b/arch/mips/kernel/reset.c
2203 @@ -19,8 +19,8 @@
2204 * So handle all using function pointers to machine specific
2205 * functions.
2206 */
2207 -void (*_machine_restart)(char *command);
2208 -void (*_machine_halt)(void);
2209 +void (*__noreturn _machine_restart)(char *command);
2210 +void (*__noreturn _machine_halt)(void);
2211 void (*pm_power_off)(void);
2212
2213 EXPORT_SYMBOL(pm_power_off);
2214 @@ -29,16 +29,19 @@ void machine_restart(char *command)
2215 {
2216 if (_machine_restart)
2217 _machine_restart(command);
2218 + BUG();
2219 }
2220
2221 void machine_halt(void)
2222 {
2223 if (_machine_halt)
2224 _machine_halt();
2225 + BUG();
2226 }
2227
2228 void machine_power_off(void)
2229 {
2230 if (pm_power_off)
2231 pm_power_off();
2232 + BUG();
2233 }
2234 diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2235 index 3f7f466..3abe0b5 100644
2236 --- a/arch/mips/kernel/syscall.c
2237 +++ b/arch/mips/kernel/syscall.c
2238 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2239 do_color_align = 0;
2240 if (filp || (flags & MAP_SHARED))
2241 do_color_align = 1;
2242 +
2243 +#ifdef CONFIG_PAX_RANDMMAP
2244 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2245 +#endif
2246 +
2247 if (addr) {
2248 if (do_color_align)
2249 addr = COLOUR_ALIGN(addr, pgoff);
2250 else
2251 addr = PAGE_ALIGN(addr);
2252 vmm = find_vma(current->mm, addr);
2253 - if (task_size - len >= addr &&
2254 - (!vmm || addr + len <= vmm->vm_start))
2255 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2256 return addr;
2257 }
2258 - addr = TASK_UNMAPPED_BASE;
2259 + addr = current->mm->mmap_base;
2260 if (do_color_align)
2261 addr = COLOUR_ALIGN(addr, pgoff);
2262 else
2263 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2264 /* At this point: (!vmm || addr < vmm->vm_end). */
2265 if (task_size - len < addr)
2266 return -ENOMEM;
2267 - if (!vmm || addr + len <= vmm->vm_start)
2268 + if (check_heap_stack_gap(vmm, addr, len))
2269 return addr;
2270 addr = vmm->vm_end;
2271 if (do_color_align)
2272 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2273 index e97a7a2..f18f5b0 100644
2274 --- a/arch/mips/mm/fault.c
2275 +++ b/arch/mips/mm/fault.c
2276 @@ -26,6 +26,23 @@
2277 #include <asm/ptrace.h>
2278 #include <asm/highmem.h> /* For VMALLOC_END */
2279
2280 +#ifdef CONFIG_PAX_PAGEEXEC
2281 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2282 +{
2283 + unsigned long i;
2284 +
2285 + printk(KERN_ERR "PAX: bytes at PC: ");
2286 + for (i = 0; i < 5; i++) {
2287 + unsigned int c;
2288 + if (get_user(c, (unsigned int *)pc+i))
2289 + printk(KERN_CONT "???????? ");
2290 + else
2291 + printk(KERN_CONT "%08x ", c);
2292 + }
2293 + printk("\n");
2294 +}
2295 +#endif
2296 +
2297 /*
2298 * This routine handles page faults. It determines the address,
2299 * and the problem, and then passes it off to one of the appropriate
2300 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
2301 index 8bc9e96..26554f8 100644
2302 --- a/arch/parisc/include/asm/atomic.h
2303 +++ b/arch/parisc/include/asm/atomic.h
2304 @@ -336,6 +336,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2305
2306 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2307
2308 +#define atomic64_read_unchecked(v) atomic64_read(v)
2309 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2310 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2311 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2312 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2313 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2314 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2315 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2316 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2317 +
2318 #else /* CONFIG_64BIT */
2319
2320 #include <asm-generic/atomic64.h>
2321 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2322 index 9c802eb..0592e41 100644
2323 --- a/arch/parisc/include/asm/elf.h
2324 +++ b/arch/parisc/include/asm/elf.h
2325 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
2326
2327 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2328
2329 +#ifdef CONFIG_PAX_ASLR
2330 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
2331 +
2332 +#define PAX_DELTA_MMAP_LEN 16
2333 +#define PAX_DELTA_STACK_LEN 16
2334 +#endif
2335 +
2336 /* This yields a mask that user programs can use to figure out what
2337 instruction set this CPU supports. This could be done in user space,
2338 but it's not easy, and we've already done it here. */
2339 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2340 index a27d2e2..18fd845 100644
2341 --- a/arch/parisc/include/asm/pgtable.h
2342 +++ b/arch/parisc/include/asm/pgtable.h
2343 @@ -207,6 +207,17 @@
2344 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2345 #define PAGE_COPY PAGE_EXECREAD
2346 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
2347 +
2348 +#ifdef CONFIG_PAX_PAGEEXEC
2349 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
2350 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2351 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2352 +#else
2353 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2354 +# define PAGE_COPY_NOEXEC PAGE_COPY
2355 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2356 +#endif
2357 +
2358 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
2359 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
2360 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
2361 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
2362 index 2120746..8d70a5e 100644
2363 --- a/arch/parisc/kernel/module.c
2364 +++ b/arch/parisc/kernel/module.c
2365 @@ -95,16 +95,38 @@
2366
2367 /* three functions to determine where in the module core
2368 * or init pieces the location is */
2369 +static inline int in_init_rx(struct module *me, void *loc)
2370 +{
2371 + return (loc >= me->module_init_rx &&
2372 + loc < (me->module_init_rx + me->init_size_rx));
2373 +}
2374 +
2375 +static inline int in_init_rw(struct module *me, void *loc)
2376 +{
2377 + return (loc >= me->module_init_rw &&
2378 + loc < (me->module_init_rw + me->init_size_rw));
2379 +}
2380 +
2381 static inline int in_init(struct module *me, void *loc)
2382 {
2383 - return (loc >= me->module_init &&
2384 - loc <= (me->module_init + me->init_size));
2385 + return in_init_rx(me, loc) || in_init_rw(me, loc);
2386 +}
2387 +
2388 +static inline int in_core_rx(struct module *me, void *loc)
2389 +{
2390 + return (loc >= me->module_core_rx &&
2391 + loc < (me->module_core_rx + me->core_size_rx));
2392 +}
2393 +
2394 +static inline int in_core_rw(struct module *me, void *loc)
2395 +{
2396 + return (loc >= me->module_core_rw &&
2397 + loc < (me->module_core_rw + me->core_size_rw));
2398 }
2399
2400 static inline int in_core(struct module *me, void *loc)
2401 {
2402 - return (loc >= me->module_core &&
2403 - loc <= (me->module_core + me->core_size));
2404 + return in_core_rx(me, loc) || in_core_rw(me, loc);
2405 }
2406
2407 static inline int in_local(struct module *me, void *loc)
2408 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
2409 }
2410
2411 /* align things a bit */
2412 - me->core_size = ALIGN(me->core_size, 16);
2413 - me->arch.got_offset = me->core_size;
2414 - me->core_size += gots * sizeof(struct got_entry);
2415 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
2416 + me->arch.got_offset = me->core_size_rw;
2417 + me->core_size_rw += gots * sizeof(struct got_entry);
2418
2419 - me->core_size = ALIGN(me->core_size, 16);
2420 - me->arch.fdesc_offset = me->core_size;
2421 - me->core_size += fdescs * sizeof(Elf_Fdesc);
2422 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
2423 + me->arch.fdesc_offset = me->core_size_rw;
2424 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
2425
2426 me->arch.got_max = gots;
2427 me->arch.fdesc_max = fdescs;
2428 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2429
2430 BUG_ON(value == 0);
2431
2432 - got = me->module_core + me->arch.got_offset;
2433 + got = me->module_core_rw + me->arch.got_offset;
2434 for (i = 0; got[i].addr; i++)
2435 if (got[i].addr == value)
2436 goto out;
2437 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2438 #ifdef CONFIG_64BIT
2439 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2440 {
2441 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
2442 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
2443
2444 if (!value) {
2445 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2446 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2447
2448 /* Create new one */
2449 fdesc->addr = value;
2450 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2451 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2452 return (Elf_Addr)fdesc;
2453 }
2454 #endif /* CONFIG_64BIT */
2455 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
2456
2457 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2458 end = table + sechdrs[me->arch.unwind_section].sh_size;
2459 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2460 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2461
2462 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2463 me->arch.unwind_section, table, end, gp);
2464 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2465 index 9147391..f3d949a 100644
2466 --- a/arch/parisc/kernel/sys_parisc.c
2467 +++ b/arch/parisc/kernel/sys_parisc.c
2468 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2469 /* At this point: (!vma || addr < vma->vm_end). */
2470 if (TASK_SIZE - len < addr)
2471 return -ENOMEM;
2472 - if (!vma || addr + len <= vma->vm_start)
2473 + if (check_heap_stack_gap(vma, addr, len))
2474 return addr;
2475 addr = vma->vm_end;
2476 }
2477 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2478 /* At this point: (!vma || addr < vma->vm_end). */
2479 if (TASK_SIZE - len < addr)
2480 return -ENOMEM;
2481 - if (!vma || addr + len <= vma->vm_start)
2482 + if (check_heap_stack_gap(vma, addr, len))
2483 return addr;
2484 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2485 if (addr < vma->vm_end) /* handle wraparound */
2486 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2487 if (flags & MAP_FIXED)
2488 return addr;
2489 if (!addr)
2490 - addr = TASK_UNMAPPED_BASE;
2491 + addr = current->mm->mmap_base;
2492
2493 if (filp) {
2494 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2495 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2496 index 8b58bf0..7afff03 100644
2497 --- a/arch/parisc/kernel/traps.c
2498 +++ b/arch/parisc/kernel/traps.c
2499 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2500
2501 down_read(&current->mm->mmap_sem);
2502 vma = find_vma(current->mm,regs->iaoq[0]);
2503 - if (vma && (regs->iaoq[0] >= vma->vm_start)
2504 - && (vma->vm_flags & VM_EXEC)) {
2505 -
2506 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2507 fault_address = regs->iaoq[0];
2508 fault_space = regs->iasq[0];
2509
2510 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2511 index c6afbfc..c5839f6 100644
2512 --- a/arch/parisc/mm/fault.c
2513 +++ b/arch/parisc/mm/fault.c
2514 @@ -15,6 +15,7 @@
2515 #include <linux/sched.h>
2516 #include <linux/interrupt.h>
2517 #include <linux/module.h>
2518 +#include <linux/unistd.h>
2519
2520 #include <asm/uaccess.h>
2521 #include <asm/traps.h>
2522 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2523 static unsigned long
2524 parisc_acctyp(unsigned long code, unsigned int inst)
2525 {
2526 - if (code == 6 || code == 16)
2527 + if (code == 6 || code == 7 || code == 16)
2528 return VM_EXEC;
2529
2530 switch (inst & 0xf0000000) {
2531 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2532 }
2533 #endif
2534
2535 +#ifdef CONFIG_PAX_PAGEEXEC
2536 +/*
2537 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2538 + *
2539 + * returns 1 when task should be killed
2540 + * 2 when rt_sigreturn trampoline was detected
2541 + * 3 when unpatched PLT trampoline was detected
2542 + */
2543 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2544 +{
2545 +
2546 +#ifdef CONFIG_PAX_EMUPLT
2547 + int err;
2548 +
2549 + do { /* PaX: unpatched PLT emulation */
2550 + unsigned int bl, depwi;
2551 +
2552 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2553 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2554 +
2555 + if (err)
2556 + break;
2557 +
2558 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2559 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2560 +
2561 + err = get_user(ldw, (unsigned int *)addr);
2562 + err |= get_user(bv, (unsigned int *)(addr+4));
2563 + err |= get_user(ldw2, (unsigned int *)(addr+8));
2564 +
2565 + if (err)
2566 + break;
2567 +
2568 + if (ldw == 0x0E801096U &&
2569 + bv == 0xEAC0C000U &&
2570 + ldw2 == 0x0E881095U)
2571 + {
2572 + unsigned int resolver, map;
2573 +
2574 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2575 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2576 + if (err)
2577 + break;
2578 +
2579 + regs->gr[20] = instruction_pointer(regs)+8;
2580 + regs->gr[21] = map;
2581 + regs->gr[22] = resolver;
2582 + regs->iaoq[0] = resolver | 3UL;
2583 + regs->iaoq[1] = regs->iaoq[0] + 4;
2584 + return 3;
2585 + }
2586 + }
2587 + } while (0);
2588 +#endif
2589 +
2590 +#ifdef CONFIG_PAX_EMUTRAMP
2591 +
2592 +#ifndef CONFIG_PAX_EMUSIGRT
2593 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2594 + return 1;
2595 +#endif
2596 +
2597 + do { /* PaX: rt_sigreturn emulation */
2598 + unsigned int ldi1, ldi2, bel, nop;
2599 +
2600 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2601 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2602 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2603 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2604 +
2605 + if (err)
2606 + break;
2607 +
2608 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2609 + ldi2 == 0x3414015AU &&
2610 + bel == 0xE4008200U &&
2611 + nop == 0x08000240U)
2612 + {
2613 + regs->gr[25] = (ldi1 & 2) >> 1;
2614 + regs->gr[20] = __NR_rt_sigreturn;
2615 + regs->gr[31] = regs->iaoq[1] + 16;
2616 + regs->sr[0] = regs->iasq[1];
2617 + regs->iaoq[0] = 0x100UL;
2618 + regs->iaoq[1] = regs->iaoq[0] + 4;
2619 + regs->iasq[0] = regs->sr[2];
2620 + regs->iasq[1] = regs->sr[2];
2621 + return 2;
2622 + }
2623 + } while (0);
2624 +#endif
2625 +
2626 + return 1;
2627 +}
2628 +
2629 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2630 +{
2631 + unsigned long i;
2632 +
2633 + printk(KERN_ERR "PAX: bytes at PC: ");
2634 + for (i = 0; i < 5; i++) {
2635 + unsigned int c;
2636 + if (get_user(c, (unsigned int *)pc+i))
2637 + printk(KERN_CONT "???????? ");
2638 + else
2639 + printk(KERN_CONT "%08x ", c);
2640 + }
2641 + printk("\n");
2642 +}
2643 +#endif
2644 +
2645 int fixup_exception(struct pt_regs *regs)
2646 {
2647 const struct exception_table_entry *fix;
2648 @@ -192,8 +303,33 @@ good_area:
2649
2650 acc_type = parisc_acctyp(code,regs->iir);
2651
2652 - if ((vma->vm_flags & acc_type) != acc_type)
2653 + if ((vma->vm_flags & acc_type) != acc_type) {
2654 +
2655 +#ifdef CONFIG_PAX_PAGEEXEC
2656 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2657 + (address & ~3UL) == instruction_pointer(regs))
2658 + {
2659 + up_read(&mm->mmap_sem);
2660 + switch (pax_handle_fetch_fault(regs)) {
2661 +
2662 +#ifdef CONFIG_PAX_EMUPLT
2663 + case 3:
2664 + return;
2665 +#endif
2666 +
2667 +#ifdef CONFIG_PAX_EMUTRAMP
2668 + case 2:
2669 + return;
2670 +#endif
2671 +
2672 + }
2673 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2674 + do_group_exit(SIGKILL);
2675 + }
2676 +#endif
2677 +
2678 goto bad_area;
2679 + }
2680
2681 /*
2682 * If for any reason at all we couldn't handle the fault, make
2683 diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
2684 index c107b74..409dc0f 100644
2685 --- a/arch/powerpc/Makefile
2686 +++ b/arch/powerpc/Makefile
2687 @@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2688 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2689 CPP = $(CC) -E $(KBUILD_CFLAGS)
2690
2691 +cflags-y += -Wno-sign-compare -Wno-extra
2692 +
2693 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2694
2695 ifeq ($(CONFIG_PPC64),y)
2696 diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
2697 index 6d94d27..50d4cad 100644
2698 --- a/arch/powerpc/include/asm/device.h
2699 +++ b/arch/powerpc/include/asm/device.h
2700 @@ -14,7 +14,7 @@ struct dev_archdata {
2701 struct device_node *of_node;
2702
2703 /* DMA operations on that device */
2704 - struct dma_map_ops *dma_ops;
2705 + const struct dma_map_ops *dma_ops;
2706
2707 /*
2708 * When an iommu is in use, dma_data is used as a ptr to the base of the
2709 diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
2710 index e281dae..2b8a784 100644
2711 --- a/arch/powerpc/include/asm/dma-mapping.h
2712 +++ b/arch/powerpc/include/asm/dma-mapping.h
2713 @@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
2714 #ifdef CONFIG_PPC64
2715 extern struct dma_map_ops dma_iommu_ops;
2716 #endif
2717 -extern struct dma_map_ops dma_direct_ops;
2718 +extern const struct dma_map_ops dma_direct_ops;
2719
2720 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2721 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2722 {
2723 /* We don't handle the NULL dev case for ISA for now. We could
2724 * do it via an out of line call but it is not needed for now. The
2725 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2726 return dev->archdata.dma_ops;
2727 }
2728
2729 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2730 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2731 {
2732 dev->archdata.dma_ops = ops;
2733 }
2734 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
2735
2736 static inline int dma_supported(struct device *dev, u64 mask)
2737 {
2738 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2739 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2740
2741 if (unlikely(dma_ops == NULL))
2742 return 0;
2743 @@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
2744
2745 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2746 {
2747 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2748 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2749
2750 if (unlikely(dma_ops == NULL))
2751 return -EIO;
2752 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2753 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2754 dma_addr_t *dma_handle, gfp_t flag)
2755 {
2756 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2757 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2758 void *cpu_addr;
2759
2760 BUG_ON(!dma_ops);
2761 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2762 static inline void dma_free_coherent(struct device *dev, size_t size,
2763 void *cpu_addr, dma_addr_t dma_handle)
2764 {
2765 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2766 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2767
2768 BUG_ON(!dma_ops);
2769
2770 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2771
2772 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2773 {
2774 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2775 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2776
2777 if (dma_ops->mapping_error)
2778 return dma_ops->mapping_error(dev, dma_addr);
2779 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2780 index 5698502..5db093c 100644
2781 --- a/arch/powerpc/include/asm/elf.h
2782 +++ b/arch/powerpc/include/asm/elf.h
2783 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2784 the loader. We need to make sure that it is out of the way of the program
2785 that it will "exec", and that there is sufficient room for the brk. */
2786
2787 -extern unsigned long randomize_et_dyn(unsigned long base);
2788 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2789 +#define ELF_ET_DYN_BASE (0x20000000)
2790 +
2791 +#ifdef CONFIG_PAX_ASLR
2792 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2793 +
2794 +#ifdef __powerpc64__
2795 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2796 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2797 +#else
2798 +#define PAX_DELTA_MMAP_LEN 15
2799 +#define PAX_DELTA_STACK_LEN 15
2800 +#endif
2801 +#endif
2802
2803 /*
2804 * Our registers are always unsigned longs, whether we're a 32 bit
2805 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2806 (0x7ff >> (PAGE_SHIFT - 12)) : \
2807 (0x3ffff >> (PAGE_SHIFT - 12)))
2808
2809 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2810 -#define arch_randomize_brk arch_randomize_brk
2811 -
2812 #endif /* __KERNEL__ */
2813
2814 /*
2815 diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
2816 index edfc980..1766f59 100644
2817 --- a/arch/powerpc/include/asm/iommu.h
2818 +++ b/arch/powerpc/include/asm/iommu.h
2819 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
2820 extern void iommu_init_early_dart(void);
2821 extern void iommu_init_early_pasemi(void);
2822
2823 +/* dma-iommu.c */
2824 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2825 +
2826 #ifdef CONFIG_PCI
2827 extern void pci_iommu_init(void);
2828 extern void pci_direct_iommu_init(void);
2829 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2830 index 9163695..5a00112 100644
2831 --- a/arch/powerpc/include/asm/kmap_types.h
2832 +++ b/arch/powerpc/include/asm/kmap_types.h
2833 @@ -26,6 +26,7 @@ enum km_type {
2834 KM_SOFTIRQ1,
2835 KM_PPC_SYNC_PAGE,
2836 KM_PPC_SYNC_ICACHE,
2837 + KM_CLEARPAGE,
2838 KM_TYPE_NR
2839 };
2840
2841 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2842 index ff24254..fe45b21 100644
2843 --- a/arch/powerpc/include/asm/page.h
2844 +++ b/arch/powerpc/include/asm/page.h
2845 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2846 * and needs to be executable. This means the whole heap ends
2847 * up being executable.
2848 */
2849 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2850 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2851 +#define VM_DATA_DEFAULT_FLAGS32 \
2852 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2853 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2854
2855 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2856 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2857 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2858 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2859 #endif
2860
2861 +#define ktla_ktva(addr) (addr)
2862 +#define ktva_ktla(addr) (addr)
2863 +
2864 #ifndef __ASSEMBLY__
2865
2866 #undef STRICT_MM_TYPECHECKS
2867 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2868 index 3f17b83..1f9e766 100644
2869 --- a/arch/powerpc/include/asm/page_64.h
2870 +++ b/arch/powerpc/include/asm/page_64.h
2871 @@ -180,15 +180,18 @@ do { \
2872 * stack by default, so in the absense of a PT_GNU_STACK program header
2873 * we turn execute permission off.
2874 */
2875 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2876 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2877 +#define VM_STACK_DEFAULT_FLAGS32 \
2878 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2879 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2880
2881 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2882 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2883
2884 +#ifndef CONFIG_PAX_PAGEEXEC
2885 #define VM_STACK_DEFAULT_FLAGS \
2886 (test_thread_flag(TIF_32BIT) ? \
2887 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2888 +#endif
2889
2890 #include <asm-generic/getorder.h>
2891
2892 diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
2893 index b5ea626..4030822 100644
2894 --- a/arch/powerpc/include/asm/pci.h
2895 +++ b/arch/powerpc/include/asm/pci.h
2896 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
2897 }
2898
2899 #ifdef CONFIG_PCI
2900 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2901 -extern struct dma_map_ops *get_pci_dma_ops(void);
2902 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2903 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2904 #else /* CONFIG_PCI */
2905 #define set_pci_dma_ops(d)
2906 #define get_pci_dma_ops() NULL
2907 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2908 index 2a5da06..d65bea2 100644
2909 --- a/arch/powerpc/include/asm/pgtable.h
2910 +++ b/arch/powerpc/include/asm/pgtable.h
2911 @@ -2,6 +2,7 @@
2912 #define _ASM_POWERPC_PGTABLE_H
2913 #ifdef __KERNEL__
2914
2915 +#include <linux/const.h>
2916 #ifndef __ASSEMBLY__
2917 #include <asm/processor.h> /* For TASK_SIZE */
2918 #include <asm/mmu.h>
2919 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2920 index 4aad413..85d86bf 100644
2921 --- a/arch/powerpc/include/asm/pte-hash32.h
2922 +++ b/arch/powerpc/include/asm/pte-hash32.h
2923 @@ -21,6 +21,7 @@
2924 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2925 #define _PAGE_USER 0x004 /* usermode access allowed */
2926 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2927 +#define _PAGE_EXEC _PAGE_GUARDED
2928 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2929 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2930 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2931 diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
2932 index 8c34149..78f425a 100644
2933 --- a/arch/powerpc/include/asm/ptrace.h
2934 +++ b/arch/powerpc/include/asm/ptrace.h
2935 @@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
2936 } while(0)
2937
2938 struct task_struct;
2939 -extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2940 +extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2941 extern int ptrace_put_reg(struct task_struct *task, int regno,
2942 unsigned long data);
2943
2944 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2945 index 32a7c30..be3a8bb 100644
2946 --- a/arch/powerpc/include/asm/reg.h
2947 +++ b/arch/powerpc/include/asm/reg.h
2948 @@ -191,6 +191,7 @@
2949 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2950 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2951 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2952 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2953 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2954 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2955 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2956 diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
2957 index 8979d4c..d2fd0d3 100644
2958 --- a/arch/powerpc/include/asm/swiotlb.h
2959 +++ b/arch/powerpc/include/asm/swiotlb.h
2960 @@ -13,7 +13,7 @@
2961
2962 #include <linux/swiotlb.h>
2963
2964 -extern struct dma_map_ops swiotlb_dma_ops;
2965 +extern const struct dma_map_ops swiotlb_dma_ops;
2966
2967 static inline void dma_mark_clean(void *addr, size_t size) {}
2968
2969 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2970 index 094a12a..877a60a 100644
2971 --- a/arch/powerpc/include/asm/system.h
2972 +++ b/arch/powerpc/include/asm/system.h
2973 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2974 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2975 #endif
2976
2977 -extern unsigned long arch_align_stack(unsigned long sp);
2978 +#define arch_align_stack(x) ((x) & ~0xfUL)
2979
2980 /* Used in very early kernel initialization. */
2981 extern unsigned long reloc_offset(void);
2982 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2983 index bd0fb84..a42a14b 100644
2984 --- a/arch/powerpc/include/asm/uaccess.h
2985 +++ b/arch/powerpc/include/asm/uaccess.h
2986 @@ -13,6 +13,8 @@
2987 #define VERIFY_READ 0
2988 #define VERIFY_WRITE 1
2989
2990 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2991 +
2992 /*
2993 * The fs value determines whether argument validity checking should be
2994 * performed or not. If get_fs() == USER_DS, checking is performed, with
2995 @@ -327,52 +329,6 @@ do { \
2996 extern unsigned long __copy_tofrom_user(void __user *to,
2997 const void __user *from, unsigned long size);
2998
2999 -#ifndef __powerpc64__
3000 -
3001 -static inline unsigned long copy_from_user(void *to,
3002 - const void __user *from, unsigned long n)
3003 -{
3004 - unsigned long over;
3005 -
3006 - if (access_ok(VERIFY_READ, from, n))
3007 - return __copy_tofrom_user((__force void __user *)to, from, n);
3008 - if ((unsigned long)from < TASK_SIZE) {
3009 - over = (unsigned long)from + n - TASK_SIZE;
3010 - return __copy_tofrom_user((__force void __user *)to, from,
3011 - n - over) + over;
3012 - }
3013 - return n;
3014 -}
3015 -
3016 -static inline unsigned long copy_to_user(void __user *to,
3017 - const void *from, unsigned long n)
3018 -{
3019 - unsigned long over;
3020 -
3021 - if (access_ok(VERIFY_WRITE, to, n))
3022 - return __copy_tofrom_user(to, (__force void __user *)from, n);
3023 - if ((unsigned long)to < TASK_SIZE) {
3024 - over = (unsigned long)to + n - TASK_SIZE;
3025 - return __copy_tofrom_user(to, (__force void __user *)from,
3026 - n - over) + over;
3027 - }
3028 - return n;
3029 -}
3030 -
3031 -#else /* __powerpc64__ */
3032 -
3033 -#define __copy_in_user(to, from, size) \
3034 - __copy_tofrom_user((to), (from), (size))
3035 -
3036 -extern unsigned long copy_from_user(void *to, const void __user *from,
3037 - unsigned long n);
3038 -extern unsigned long copy_to_user(void __user *to, const void *from,
3039 - unsigned long n);
3040 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
3041 - unsigned long n);
3042 -
3043 -#endif /* __powerpc64__ */
3044 -
3045 static inline unsigned long __copy_from_user_inatomic(void *to,
3046 const void __user *from, unsigned long n)
3047 {
3048 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3049 if (ret == 0)
3050 return 0;
3051 }
3052 +
3053 + if (!__builtin_constant_p(n))
3054 + check_object_size(to, n, false);
3055 +
3056 return __copy_tofrom_user((__force void __user *)to, from, n);
3057 }
3058
3059 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3060 if (ret == 0)
3061 return 0;
3062 }
3063 +
3064 + if (!__builtin_constant_p(n))
3065 + check_object_size(from, n, true);
3066 +
3067 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3068 }
3069
3070 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3071 return __copy_to_user_inatomic(to, from, size);
3072 }
3073
3074 +#ifndef __powerpc64__
3075 +
3076 +static inline unsigned long __must_check copy_from_user(void *to,
3077 + const void __user *from, unsigned long n)
3078 +{
3079 + unsigned long over;
3080 +
3081 + if ((long)n < 0)
3082 + return n;
3083 +
3084 + if (access_ok(VERIFY_READ, from, n)) {
3085 + if (!__builtin_constant_p(n))
3086 + check_object_size(to, n, false);
3087 + return __copy_tofrom_user((__force void __user *)to, from, n);
3088 + }
3089 + if ((unsigned long)from < TASK_SIZE) {
3090 + over = (unsigned long)from + n - TASK_SIZE;
3091 + if (!__builtin_constant_p(n - over))
3092 + check_object_size(to, n - over, false);
3093 + return __copy_tofrom_user((__force void __user *)to, from,
3094 + n - over) + over;
3095 + }
3096 + return n;
3097 +}
3098 +
3099 +static inline unsigned long __must_check copy_to_user(void __user *to,
3100 + const void *from, unsigned long n)
3101 +{
3102 + unsigned long over;
3103 +
3104 + if ((long)n < 0)
3105 + return n;
3106 +
3107 + if (access_ok(VERIFY_WRITE, to, n)) {
3108 + if (!__builtin_constant_p(n))
3109 + check_object_size(from, n, true);
3110 + return __copy_tofrom_user(to, (__force void __user *)from, n);
3111 + }
3112 + if ((unsigned long)to < TASK_SIZE) {
3113 + over = (unsigned long)to + n - TASK_SIZE;
3114 + if (!__builtin_constant_p(n))
3115 + check_object_size(from, n - over, true);
3116 + return __copy_tofrom_user(to, (__force void __user *)from,
3117 + n - over) + over;
3118 + }
3119 + return n;
3120 +}
3121 +
3122 +#else /* __powerpc64__ */
3123 +
3124 +#define __copy_in_user(to, from, size) \
3125 + __copy_tofrom_user((to), (from), (size))
3126 +
3127 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3128 +{
3129 + if ((long)n < 0 || n > INT_MAX)
3130 + return n;
3131 +
3132 + if (!__builtin_constant_p(n))
3133 + check_object_size(to, n, false);
3134 +
3135 + if (likely(access_ok(VERIFY_READ, from, n)))
3136 + n = __copy_from_user(to, from, n);
3137 + else
3138 + memset(to, 0, n);
3139 + return n;
3140 +}
3141 +
3142 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3143 +{
3144 + if ((long)n < 0 || n > INT_MAX)
3145 + return n;
3146 +
3147 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
3148 + if (!__builtin_constant_p(n))
3149 + check_object_size(from, n, true);
3150 + n = __copy_to_user(to, from, n);
3151 + }
3152 + return n;
3153 +}
3154 +
3155 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
3156 + unsigned long n);
3157 +
3158 +#endif /* __powerpc64__ */
3159 +
3160 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3161
3162 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3163 diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
3164 index bb37b1d..01fe9ce 100644
3165 --- a/arch/powerpc/kernel/cacheinfo.c
3166 +++ b/arch/powerpc/kernel/cacheinfo.c
3167 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3168 &cache_assoc_attr,
3169 };
3170
3171 -static struct sysfs_ops cache_index_ops = {
3172 +static const struct sysfs_ops cache_index_ops = {
3173 .show = cache_index_show,
3174 };
3175
3176 diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3177 index 37771a5..648530c 100644
3178 --- a/arch/powerpc/kernel/dma-iommu.c
3179 +++ b/arch/powerpc/kernel/dma-iommu.c
3180 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3181 }
3182
3183 /* We support DMA to/from any memory page via the iommu */
3184 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3185 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
3186 {
3187 struct iommu_table *tbl = get_iommu_table_base(dev);
3188
3189 diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3190 index e96cbbd..bdd6d41 100644
3191 --- a/arch/powerpc/kernel/dma-swiotlb.c
3192 +++ b/arch/powerpc/kernel/dma-swiotlb.c
3193 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3194 * map_page, and unmap_page on highmem, use normal dma_ops
3195 * for everything else.
3196 */
3197 -struct dma_map_ops swiotlb_dma_ops = {
3198 +const struct dma_map_ops swiotlb_dma_ops = {
3199 .alloc_coherent = dma_direct_alloc_coherent,
3200 .free_coherent = dma_direct_free_coherent,
3201 .map_sg = swiotlb_map_sg_attrs,
3202 diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3203 index 6215062..ebea59c 100644
3204 --- a/arch/powerpc/kernel/dma.c
3205 +++ b/arch/powerpc/kernel/dma.c
3206 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3207 }
3208 #endif
3209
3210 -struct dma_map_ops dma_direct_ops = {
3211 +const struct dma_map_ops dma_direct_ops = {
3212 .alloc_coherent = dma_direct_alloc_coherent,
3213 .free_coherent = dma_direct_free_coherent,
3214 .map_sg = dma_direct_map_sg,
3215 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3216 index 24dcc0e..a300455 100644
3217 --- a/arch/powerpc/kernel/exceptions-64e.S
3218 +++ b/arch/powerpc/kernel/exceptions-64e.S
3219 @@ -455,6 +455,7 @@ storage_fault_common:
3220 std r14,_DAR(r1)
3221 std r15,_DSISR(r1)
3222 addi r3,r1,STACK_FRAME_OVERHEAD
3223 + bl .save_nvgprs
3224 mr r4,r14
3225 mr r5,r15
3226 ld r14,PACA_EXGEN+EX_R14(r13)
3227 @@ -464,8 +465,7 @@ storage_fault_common:
3228 cmpdi r3,0
3229 bne- 1f
3230 b .ret_from_except_lite
3231 -1: bl .save_nvgprs
3232 - mr r5,r3
3233 +1: mr r5,r3
3234 addi r3,r1,STACK_FRAME_OVERHEAD
3235 ld r4,_DAR(r1)
3236 bl .bad_page_fault
3237 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3238 index 1808876..9fd206a 100644
3239 --- a/arch/powerpc/kernel/exceptions-64s.S
3240 +++ b/arch/powerpc/kernel/exceptions-64s.S
3241 @@ -818,10 +818,10 @@ handle_page_fault:
3242 11: ld r4,_DAR(r1)
3243 ld r5,_DSISR(r1)
3244 addi r3,r1,STACK_FRAME_OVERHEAD
3245 + bl .save_nvgprs
3246 bl .do_page_fault
3247 cmpdi r3,0
3248 beq+ 13f
3249 - bl .save_nvgprs
3250 mr r5,r3
3251 addi r3,r1,STACK_FRAME_OVERHEAD
3252 lwz r4,_DAR(r1)
3253 diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
3254 index a4c8b38..1b09ad9 100644
3255 --- a/arch/powerpc/kernel/ibmebus.c
3256 +++ b/arch/powerpc/kernel/ibmebus.c
3257 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
3258 return 1;
3259 }
3260
3261 -static struct dma_map_ops ibmebus_dma_ops = {
3262 +static const struct dma_map_ops ibmebus_dma_ops = {
3263 .alloc_coherent = ibmebus_alloc_coherent,
3264 .free_coherent = ibmebus_free_coherent,
3265 .map_sg = ibmebus_map_sg,
3266 diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
3267 index 641c74b..8339ad7 100644
3268 --- a/arch/powerpc/kernel/kgdb.c
3269 +++ b/arch/powerpc/kernel/kgdb.c
3270 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
3271 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
3272 return 0;
3273
3274 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3275 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3276 regs->nip += 4;
3277
3278 return 1;
3279 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
3280 /*
3281 * Global data
3282 */
3283 -struct kgdb_arch arch_kgdb_ops = {
3284 +const struct kgdb_arch arch_kgdb_ops = {
3285 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
3286 };
3287
3288 diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
3289 index 477c663..4f50234 100644
3290 --- a/arch/powerpc/kernel/module.c
3291 +++ b/arch/powerpc/kernel/module.c
3292 @@ -31,11 +31,24 @@
3293
3294 LIST_HEAD(module_bug_list);
3295
3296 +#ifdef CONFIG_PAX_KERNEXEC
3297 void *module_alloc(unsigned long size)
3298 {
3299 if (size == 0)
3300 return NULL;
3301
3302 + return vmalloc(size);
3303 +}
3304 +
3305 +void *module_alloc_exec(unsigned long size)
3306 +#else
3307 +void *module_alloc(unsigned long size)
3308 +#endif
3309 +
3310 +{
3311 + if (size == 0)
3312 + return NULL;
3313 +
3314 return vmalloc_exec(size);
3315 }
3316
3317 @@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
3318 vfree(module_region);
3319 }
3320
3321 +#ifdef CONFIG_PAX_KERNEXEC
3322 +void module_free_exec(struct module *mod, void *module_region)
3323 +{
3324 + module_free(mod, module_region);
3325 +}
3326 +#endif
3327 +
3328 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
3329 const Elf_Shdr *sechdrs,
3330 const char *name)
3331 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3332 index f832773..0507238 100644
3333 --- a/arch/powerpc/kernel/module_32.c
3334 +++ b/arch/powerpc/kernel/module_32.c
3335 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3336 me->arch.core_plt_section = i;
3337 }
3338 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3339 - printk("Module doesn't contain .plt or .init.plt sections.\n");
3340 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3341 return -ENOEXEC;
3342 }
3343
3344 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
3345
3346 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3347 /* Init, or core PLT? */
3348 - if (location >= mod->module_core
3349 - && location < mod->module_core + mod->core_size)
3350 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3351 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3352 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3353 - else
3354 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3355 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3356 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3357 + else {
3358 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3359 + return ~0UL;
3360 + }
3361
3362 /* Find this entry, or if that fails, the next avail. entry */
3363 while (entry->jump[0]) {
3364 diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
3365 index cadbed6..b9bbb00 100644
3366 --- a/arch/powerpc/kernel/pci-common.c
3367 +++ b/arch/powerpc/kernel/pci-common.c
3368 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
3369 unsigned int ppc_pci_flags = 0;
3370
3371
3372 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3373 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3374
3375 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
3376 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
3377 {
3378 pci_dma_ops = dma_ops;
3379 }
3380
3381 -struct dma_map_ops *get_pci_dma_ops(void)
3382 +const struct dma_map_ops *get_pci_dma_ops(void)
3383 {
3384 return pci_dma_ops;
3385 }
3386 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3387 index 7b816da..8d5c277 100644
3388 --- a/arch/powerpc/kernel/process.c
3389 +++ b/arch/powerpc/kernel/process.c
3390 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
3391 * Lookup NIP late so we have the best change of getting the
3392 * above info out without failing
3393 */
3394 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3395 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3396 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3397 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3398 #endif
3399 show_stack(current, (unsigned long *) regs->gpr[1]);
3400 if (!user_mode(regs))
3401 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3402 newsp = stack[0];
3403 ip = stack[STACK_FRAME_LR_SAVE];
3404 if (!firstframe || ip != lr) {
3405 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3406 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3407 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3408 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3409 - printk(" (%pS)",
3410 + printk(" (%pA)",
3411 (void *)current->ret_stack[curr_frame].ret);
3412 curr_frame--;
3413 }
3414 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3415 struct pt_regs *regs = (struct pt_regs *)
3416 (sp + STACK_FRAME_OVERHEAD);
3417 lr = regs->link;
3418 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
3419 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
3420 regs->trap, (void *)regs->nip, (void *)lr);
3421 firstframe = 1;
3422 }
3423 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
3424 }
3425
3426 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3427 -
3428 -unsigned long arch_align_stack(unsigned long sp)
3429 -{
3430 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3431 - sp -= get_random_int() & ~PAGE_MASK;
3432 - return sp & ~0xf;
3433 -}
3434 -
3435 -static inline unsigned long brk_rnd(void)
3436 -{
3437 - unsigned long rnd = 0;
3438 -
3439 - /* 8MB for 32bit, 1GB for 64bit */
3440 - if (is_32bit_task())
3441 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3442 - else
3443 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3444 -
3445 - return rnd << PAGE_SHIFT;
3446 -}
3447 -
3448 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3449 -{
3450 - unsigned long base = mm->brk;
3451 - unsigned long ret;
3452 -
3453 -#ifdef CONFIG_PPC_STD_MMU_64
3454 - /*
3455 - * If we are using 1TB segments and we are allowed to randomise
3456 - * the heap, we can put it above 1TB so it is backed by a 1TB
3457 - * segment. Otherwise the heap will be in the bottom 1TB
3458 - * which always uses 256MB segments and this may result in a
3459 - * performance penalty.
3460 - */
3461 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3462 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3463 -#endif
3464 -
3465 - ret = PAGE_ALIGN(base + brk_rnd());
3466 -
3467 - if (ret < mm->brk)
3468 - return mm->brk;
3469 -
3470 - return ret;
3471 -}
3472 -
3473 -unsigned long randomize_et_dyn(unsigned long base)
3474 -{
3475 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3476 -
3477 - if (ret < base)
3478 - return base;
3479 -
3480 - return ret;
3481 -}
3482 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
3483 index ef14988..856c4bc 100644
3484 --- a/arch/powerpc/kernel/ptrace.c
3485 +++ b/arch/powerpc/kernel/ptrace.c
3486 @@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
3487 /*
3488 * Get contents of register REGNO in task TASK.
3489 */
3490 -unsigned long ptrace_get_reg(struct task_struct *task, int regno)
3491 +unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
3492 {
3493 if (task->thread.regs == NULL)
3494 return -EIO;
3495 @@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
3496
3497 CHECK_FULL_REGS(child->thread.regs);
3498 if (index < PT_FPR0) {
3499 - tmp = ptrace_get_reg(child, (int) index);
3500 + tmp = ptrace_get_reg(child, index);
3501 } else {
3502 flush_fp_to_thread(child);
3503 tmp = ((unsigned long *)child->thread.fpr)
3504 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3505 index d670429..2bc59b2 100644
3506 --- a/arch/powerpc/kernel/signal_32.c
3507 +++ b/arch/powerpc/kernel/signal_32.c
3508 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3509 /* Save user registers on the stack */
3510 frame = &rt_sf->uc.uc_mcontext;
3511 addr = frame;
3512 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3513 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3514 if (save_user_regs(regs, frame, 0, 1))
3515 goto badframe;
3516 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3517 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3518 index 2fe6fc6..ada0d96 100644
3519 --- a/arch/powerpc/kernel/signal_64.c
3520 +++ b/arch/powerpc/kernel/signal_64.c
3521 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3522 current->thread.fpscr.val = 0;
3523
3524 /* Set up to return from userspace. */
3525 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3526 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3527 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3528 } else {
3529 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3530 diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
3531 index b97c2d6..dd01a6a 100644
3532 --- a/arch/powerpc/kernel/sys_ppc32.c
3533 +++ b/arch/powerpc/kernel/sys_ppc32.c
3534 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
3535 if (oldlenp) {
3536 if (!error) {
3537 if (get_user(oldlen, oldlenp) ||
3538 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
3539 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
3540 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
3541 error = -EFAULT;
3542 }
3543 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
3544 }
3545 return error;
3546 }
3547 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3548 index 6f0ae1a..e4b6a56 100644
3549 --- a/arch/powerpc/kernel/traps.c
3550 +++ b/arch/powerpc/kernel/traps.c
3551 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
3552 static inline void pmac_backlight_unblank(void) { }
3553 #endif
3554
3555 +extern void gr_handle_kernel_exploit(void);
3556 +
3557 int die(const char *str, struct pt_regs *regs, long err)
3558 {
3559 static struct {
3560 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
3561 if (panic_on_oops)
3562 panic("Fatal exception");
3563
3564 + gr_handle_kernel_exploit();
3565 +
3566 oops_exit();
3567 do_exit(err);
3568
3569 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3570 index 137dc22..fe57a79 100644
3571 --- a/arch/powerpc/kernel/vdso.c
3572 +++ b/arch/powerpc/kernel/vdso.c
3573 @@ -36,6 +36,7 @@
3574 #include <asm/firmware.h>
3575 #include <asm/vdso.h>
3576 #include <asm/vdso_datapage.h>
3577 +#include <asm/mman.h>
3578
3579 #include "setup.h"
3580
3581 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3582 vdso_base = VDSO32_MBASE;
3583 #endif
3584
3585 - current->mm->context.vdso_base = 0;
3586 + current->mm->context.vdso_base = ~0UL;
3587
3588 /* vDSO has a problem and was disabled, just don't "enable" it for the
3589 * process
3590 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3591 vdso_base = get_unmapped_area(NULL, vdso_base,
3592 (vdso_pages << PAGE_SHIFT) +
3593 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3594 - 0, 0);
3595 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
3596 if (IS_ERR_VALUE(vdso_base)) {
3597 rc = vdso_base;
3598 goto fail_mmapsem;
3599 diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
3600 index 77f6421..829564a 100644
3601 --- a/arch/powerpc/kernel/vio.c
3602 +++ b/arch/powerpc/kernel/vio.c
3603 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
3604 vio_cmo_dealloc(viodev, alloc_size);
3605 }
3606
3607 -struct dma_map_ops vio_dma_mapping_ops = {
3608 +static const struct dma_map_ops vio_dma_mapping_ops = {
3609 .alloc_coherent = vio_dma_iommu_alloc_coherent,
3610 .free_coherent = vio_dma_iommu_free_coherent,
3611 .map_sg = vio_dma_iommu_map_sg,
3612 .unmap_sg = vio_dma_iommu_unmap_sg,
3613 + .dma_supported = dma_iommu_dma_supported,
3614 .map_page = vio_dma_iommu_map_page,
3615 .unmap_page = vio_dma_iommu_unmap_page,
3616
3617 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
3618
3619 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
3620 {
3621 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
3622 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
3623 }
3624
3625 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3626 index 5eea6f3..5d10396 100644
3627 --- a/arch/powerpc/lib/usercopy_64.c
3628 +++ b/arch/powerpc/lib/usercopy_64.c
3629 @@ -9,22 +9,6 @@
3630 #include <linux/module.h>
3631 #include <asm/uaccess.h>
3632
3633 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3634 -{
3635 - if (likely(access_ok(VERIFY_READ, from, n)))
3636 - n = __copy_from_user(to, from, n);
3637 - else
3638 - memset(to, 0, n);
3639 - return n;
3640 -}
3641 -
3642 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3643 -{
3644 - if (likely(access_ok(VERIFY_WRITE, to, n)))
3645 - n = __copy_to_user(to, from, n);
3646 - return n;
3647 -}
3648 -
3649 unsigned long copy_in_user(void __user *to, const void __user *from,
3650 unsigned long n)
3651 {
3652 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
3653 return n;
3654 }
3655
3656 -EXPORT_SYMBOL(copy_from_user);
3657 -EXPORT_SYMBOL(copy_to_user);
3658 EXPORT_SYMBOL(copy_in_user);
3659
3660 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
3661 index e7dae82..877ce0d 100644
3662 --- a/arch/powerpc/mm/fault.c
3663 +++ b/arch/powerpc/mm/fault.c
3664 @@ -30,6 +30,10 @@
3665 #include <linux/kprobes.h>
3666 #include <linux/kdebug.h>
3667 #include <linux/perf_event.h>
3668 +#include <linux/slab.h>
3669 +#include <linux/pagemap.h>
3670 +#include <linux/compiler.h>
3671 +#include <linux/unistd.h>
3672
3673 #include <asm/firmware.h>
3674 #include <asm/page.h>
3675 @@ -40,6 +44,7 @@
3676 #include <asm/uaccess.h>
3677 #include <asm/tlbflush.h>
3678 #include <asm/siginfo.h>
3679 +#include <asm/ptrace.h>
3680
3681
3682 #ifdef CONFIG_KPROBES
3683 @@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
3684 }
3685 #endif
3686
3687 +#ifdef CONFIG_PAX_PAGEEXEC
3688 +/*
3689 + * PaX: decide what to do with offenders (regs->nip = fault address)
3690 + *
3691 + * returns 1 when task should be killed
3692 + */
3693 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3694 +{
3695 + return 1;
3696 +}
3697 +
3698 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3699 +{
3700 + unsigned long i;
3701 +
3702 + printk(KERN_ERR "PAX: bytes at PC: ");
3703 + for (i = 0; i < 5; i++) {
3704 + unsigned int c;
3705 + if (get_user(c, (unsigned int __user *)pc+i))
3706 + printk(KERN_CONT "???????? ");
3707 + else
3708 + printk(KERN_CONT "%08x ", c);
3709 + }
3710 + printk("\n");
3711 +}
3712 +#endif
3713 +
3714 /*
3715 * Check whether the instruction at regs->nip is a store using
3716 * an update addressing form which will update r1.
3717 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
3718 * indicate errors in DSISR but can validly be set in SRR1.
3719 */
3720 if (trap == 0x400)
3721 - error_code &= 0x48200000;
3722 + error_code &= 0x58200000;
3723 else
3724 is_write = error_code & DSISR_ISSTORE;
3725 #else
3726 @@ -250,7 +282,7 @@ good_area:
3727 * "undefined". Of those that can be set, this is the only
3728 * one which seems bad.
3729 */
3730 - if (error_code & 0x10000000)
3731 + if (error_code & DSISR_GUARDED)
3732 /* Guarded storage error. */
3733 goto bad_area;
3734 #endif /* CONFIG_8xx */
3735 @@ -265,7 +297,7 @@ good_area:
3736 * processors use the same I/D cache coherency mechanism
3737 * as embedded.
3738 */
3739 - if (error_code & DSISR_PROTFAULT)
3740 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3741 goto bad_area;
3742 #endif /* CONFIG_PPC_STD_MMU */
3743
3744 @@ -335,6 +367,23 @@ bad_area:
3745 bad_area_nosemaphore:
3746 /* User mode accesses cause a SIGSEGV */
3747 if (user_mode(regs)) {
3748 +
3749 +#ifdef CONFIG_PAX_PAGEEXEC
3750 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3751 +#ifdef CONFIG_PPC_STD_MMU
3752 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3753 +#else
3754 + if (is_exec && regs->nip == address) {
3755 +#endif
3756 + switch (pax_handle_fetch_fault(regs)) {
3757 + }
3758 +
3759 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3760 + do_group_exit(SIGKILL);
3761 + }
3762 + }
3763 +#endif
3764 +
3765 _exception(SIGSEGV, regs, code, address);
3766 return 0;
3767 }
3768 diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
3769 index 5973631..ad617af 100644
3770 --- a/arch/powerpc/mm/mem.c
3771 +++ b/arch/powerpc/mm/mem.c
3772 @@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
3773 {
3774 unsigned long lmb_next_region_start_pfn,
3775 lmb_region_max_pfn;
3776 - int i;
3777 + unsigned int i;
3778
3779 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3780 lmb_region_max_pfn =
3781 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
3782 index 0d957a4..26d968f 100644
3783 --- a/arch/powerpc/mm/mmap_64.c
3784 +++ b/arch/powerpc/mm/mmap_64.c
3785 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3786 */
3787 if (mmap_is_legacy()) {
3788 mm->mmap_base = TASK_UNMAPPED_BASE;
3789 +
3790 +#ifdef CONFIG_PAX_RANDMMAP
3791 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3792 + mm->mmap_base += mm->delta_mmap;
3793 +#endif
3794 +
3795 mm->get_unmapped_area = arch_get_unmapped_area;
3796 mm->unmap_area = arch_unmap_area;
3797 } else {
3798 mm->mmap_base = mmap_base();
3799 +
3800 +#ifdef CONFIG_PAX_RANDMMAP
3801 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3802 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3803 +#endif
3804 +
3805 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3806 mm->unmap_area = arch_unmap_area_topdown;
3807 }
3808 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3809 index ba51948..23009d9 100644
3810 --- a/arch/powerpc/mm/slice.c
3811 +++ b/arch/powerpc/mm/slice.c
3812 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3813 if ((mm->task_size - len) < addr)
3814 return 0;
3815 vma = find_vma(mm, addr);
3816 - return (!vma || (addr + len) <= vma->vm_start);
3817 + return check_heap_stack_gap(vma, addr, len);
3818 }
3819
3820 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3821 @@ -256,7 +256,7 @@ full_search:
3822 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3823 continue;
3824 }
3825 - if (!vma || addr + len <= vma->vm_start) {
3826 + if (check_heap_stack_gap(vma, addr, len)) {
3827 /*
3828 * Remember the place where we stopped the search:
3829 */
3830 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3831 }
3832 }
3833
3834 - addr = mm->mmap_base;
3835 - while (addr > len) {
3836 + if (mm->mmap_base < len)
3837 + addr = -ENOMEM;
3838 + else
3839 + addr = mm->mmap_base - len;
3840 +
3841 + while (!IS_ERR_VALUE(addr)) {
3842 /* Go down by chunk size */
3843 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3844 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3845
3846 /* Check for hit with different page size */
3847 mask = slice_range_to_mask(addr, len);
3848 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3849 * return with success:
3850 */
3851 vma = find_vma(mm, addr);
3852 - if (!vma || (addr + len) <= vma->vm_start) {
3853 + if (check_heap_stack_gap(vma, addr, len)) {
3854 /* remember the address as a hint for next time */
3855 if (use_cache)
3856 mm->free_area_cache = addr;
3857 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3858 mm->cached_hole_size = vma->vm_start - addr;
3859
3860 /* try just below the current vma->vm_start */
3861 - addr = vma->vm_start;
3862 + addr = skip_heap_stack_gap(vma, len);
3863 }
3864
3865 /*
3866 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3867 if (fixed && addr > (mm->task_size - len))
3868 return -EINVAL;
3869
3870 +#ifdef CONFIG_PAX_RANDMMAP
3871 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3872 + addr = 0;
3873 +#endif
3874 +
3875 /* If hint, make sure it matches our alignment restrictions */
3876 if (!fixed && addr) {
3877 addr = _ALIGN_UP(addr, 1ul << pshift);
3878 diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
3879 index b5c753d..8f01abe 100644
3880 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c
3881 +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
3882 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3883 lite5200_pm_target_state = PM_SUSPEND_ON;
3884 }
3885
3886 -static struct platform_suspend_ops lite5200_pm_ops = {
3887 +static const struct platform_suspend_ops lite5200_pm_ops = {
3888 .valid = lite5200_pm_valid,
3889 .begin = lite5200_pm_begin,
3890 .prepare = lite5200_pm_prepare,
3891 diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3892 index a55b0b6..478c18e 100644
3893 --- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3894 +++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3895 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3896 iounmap(mbar);
3897 }
3898
3899 -static struct platform_suspend_ops mpc52xx_pm_ops = {
3900 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
3901 .valid = mpc52xx_pm_valid,
3902 .prepare = mpc52xx_pm_prepare,
3903 .enter = mpc52xx_pm_enter,
3904 diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
3905 index 08e65fc..643d3ac 100644
3906 --- a/arch/powerpc/platforms/83xx/suspend.c
3907 +++ b/arch/powerpc/platforms/83xx/suspend.c
3908 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3909 return ret;
3910 }
3911
3912 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
3913 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3914 .valid = mpc83xx_suspend_valid,
3915 .begin = mpc83xx_suspend_begin,
3916 .enter = mpc83xx_suspend_enter,
3917 diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
3918 index ca5bfdf..1602e09 100644
3919 --- a/arch/powerpc/platforms/cell/iommu.c
3920 +++ b/arch/powerpc/platforms/cell/iommu.c
3921 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
3922
3923 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3924
3925 -struct dma_map_ops dma_iommu_fixed_ops = {
3926 +const struct dma_map_ops dma_iommu_fixed_ops = {
3927 .alloc_coherent = dma_fixed_alloc_coherent,
3928 .free_coherent = dma_fixed_free_coherent,
3929 .map_sg = dma_fixed_map_sg,
3930 diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
3931 index e34b305..20e48ec 100644
3932 --- a/arch/powerpc/platforms/ps3/system-bus.c
3933 +++ b/arch/powerpc/platforms/ps3/system-bus.c
3934 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
3935 return mask >= DMA_BIT_MASK(32);
3936 }
3937
3938 -static struct dma_map_ops ps3_sb_dma_ops = {
3939 +static const struct dma_map_ops ps3_sb_dma_ops = {
3940 .alloc_coherent = ps3_alloc_coherent,
3941 .free_coherent = ps3_free_coherent,
3942 .map_sg = ps3_sb_map_sg,
3943 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
3944 .unmap_page = ps3_unmap_page,
3945 };
3946
3947 -static struct dma_map_ops ps3_ioc0_dma_ops = {
3948 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
3949 .alloc_coherent = ps3_alloc_coherent,
3950 .free_coherent = ps3_free_coherent,
3951 .map_sg = ps3_ioc0_map_sg,
3952 diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
3953 index f0e6f28..60d53ed 100644
3954 --- a/arch/powerpc/platforms/pseries/Kconfig
3955 +++ b/arch/powerpc/platforms/pseries/Kconfig
3956 @@ -2,6 +2,8 @@ config PPC_PSERIES
3957 depends on PPC64 && PPC_BOOK3S
3958 bool "IBM pSeries & new (POWER5-based) iSeries"
3959 select MPIC
3960 + select PCI_MSI
3961 + select XICS
3962 select PPC_I8259
3963 select PPC_RTAS
3964 select RTAS_ERROR_LOGGING
3965 diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
3966 index 43c0aca..42c045b 100644
3967 --- a/arch/s390/Kconfig
3968 +++ b/arch/s390/Kconfig
3969 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3970
3971 config S390_SWITCH_AMODE
3972 bool "Switch kernel/user addressing modes"
3973 + default y
3974 help
3975 This option allows to switch the addressing modes of kernel and user
3976 - space. The kernel parameter switch_amode=on will enable this feature,
3977 - default is disabled. Enabling this (via kernel parameter) on machines
3978 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3979 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3980 + will reduce system performance.
3981
3982 Note that this option will also be selected by selecting the execute
3983 - protection option below. Enabling the execute protection via the
3984 - noexec kernel parameter will also switch the addressing modes,
3985 - independent of the switch_amode kernel parameter.
3986 + protection option below. Enabling the execute protection will also
3987 + switch the addressing modes, independent of this option.
3988
3989
3990 config S390_EXEC_PROTECT
3991 bool "Data execute protection"
3992 + default y
3993 select S390_SWITCH_AMODE
3994 help
3995 This option allows to enable a buffer overflow protection for user
3996 space programs and it also selects the addressing mode option above.
3997 - The kernel parameter noexec=on will enable this feature and also
3998 - switch the addressing modes, default is disabled. Enabling this (via
3999 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
4000 - will reduce system performance.
4001 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
4002 + reduce system performance.
4003
4004 comment "Code generation options"
4005
4006 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4007 index ae7c8f9..3f01a0c 100644
4008 --- a/arch/s390/include/asm/atomic.h
4009 +++ b/arch/s390/include/asm/atomic.h
4010 @@ -362,6 +362,16 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
4011 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4012 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4013
4014 +#define atomic64_read_unchecked(v) atomic64_read(v)
4015 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4016 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4017 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4018 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4019 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4020 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4021 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4022 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4023 +
4024 #define smp_mb__before_atomic_dec() smp_mb()
4025 #define smp_mb__after_atomic_dec() smp_mb()
4026 #define smp_mb__before_atomic_inc() smp_mb()
4027 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4028 index e885442..e3a2817 100644
4029 --- a/arch/s390/include/asm/elf.h
4030 +++ b/arch/s390/include/asm/elf.h
4031 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
4032 that it will "exec", and that there is sufficient room for the brk. */
4033 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4034
4035 +#ifdef CONFIG_PAX_ASLR
4036 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4037 +
4038 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4039 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4040 +#endif
4041 +
4042 /* This yields a mask that user programs can use to figure out what
4043 instruction set this CPU supports. */
4044
4045 diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
4046 index e37478e..9ce0e9f 100644
4047 --- a/arch/s390/include/asm/setup.h
4048 +++ b/arch/s390/include/asm/setup.h
4049 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
4050 void detect_memory_layout(struct mem_chunk chunk[]);
4051
4052 #ifdef CONFIG_S390_SWITCH_AMODE
4053 -extern unsigned int switch_amode;
4054 +#define switch_amode (1)
4055 #else
4056 #define switch_amode (0)
4057 #endif
4058
4059 #ifdef CONFIG_S390_EXEC_PROTECT
4060 -extern unsigned int s390_noexec;
4061 +#define s390_noexec (1)
4062 #else
4063 #define s390_noexec (0)
4064 #endif
4065 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4066 index 8377e91..e28e6f1 100644
4067 --- a/arch/s390/include/asm/uaccess.h
4068 +++ b/arch/s390/include/asm/uaccess.h
4069 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
4070 copy_to_user(void __user *to, const void *from, unsigned long n)
4071 {
4072 might_fault();
4073 +
4074 + if ((long)n < 0)
4075 + return n;
4076 +
4077 if (access_ok(VERIFY_WRITE, to, n))
4078 n = __copy_to_user(to, from, n);
4079 return n;
4080 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4081 static inline unsigned long __must_check
4082 __copy_from_user(void *to, const void __user *from, unsigned long n)
4083 {
4084 + if ((long)n < 0)
4085 + return n;
4086 +
4087 if (__builtin_constant_p(n) && (n <= 256))
4088 return uaccess.copy_from_user_small(n, from, to);
4089 else
4090 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
4091 copy_from_user(void *to, const void __user *from, unsigned long n)
4092 {
4093 might_fault();
4094 +
4095 + if ((long)n < 0)
4096 + return n;
4097 +
4098 if (access_ok(VERIFY_READ, from, n))
4099 n = __copy_from_user(to, from, n);
4100 else
4101 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4102 index 639380a..72e3c02 100644
4103 --- a/arch/s390/kernel/module.c
4104 +++ b/arch/s390/kernel/module.c
4105 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4106
4107 /* Increase core size by size of got & plt and set start
4108 offsets for got and plt. */
4109 - me->core_size = ALIGN(me->core_size, 4);
4110 - me->arch.got_offset = me->core_size;
4111 - me->core_size += me->arch.got_size;
4112 - me->arch.plt_offset = me->core_size;
4113 - me->core_size += me->arch.plt_size;
4114 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4115 + me->arch.got_offset = me->core_size_rw;
4116 + me->core_size_rw += me->arch.got_size;
4117 + me->arch.plt_offset = me->core_size_rx;
4118 + me->core_size_rx += me->arch.plt_size;
4119 return 0;
4120 }
4121
4122 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4123 if (info->got_initialized == 0) {
4124 Elf_Addr *gotent;
4125
4126 - gotent = me->module_core + me->arch.got_offset +
4127 + gotent = me->module_core_rw + me->arch.got_offset +
4128 info->got_offset;
4129 *gotent = val;
4130 info->got_initialized = 1;
4131 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4132 else if (r_type == R_390_GOTENT ||
4133 r_type == R_390_GOTPLTENT)
4134 *(unsigned int *) loc =
4135 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4136 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4137 else if (r_type == R_390_GOT64 ||
4138 r_type == R_390_GOTPLT64)
4139 *(unsigned long *) loc = val;
4140 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4141 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4142 if (info->plt_initialized == 0) {
4143 unsigned int *ip;
4144 - ip = me->module_core + me->arch.plt_offset +
4145 + ip = me->module_core_rx + me->arch.plt_offset +
4146 info->plt_offset;
4147 #ifndef CONFIG_64BIT
4148 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4149 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4150 val - loc + 0xffffUL < 0x1ffffeUL) ||
4151 (r_type == R_390_PLT32DBL &&
4152 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4153 - val = (Elf_Addr) me->module_core +
4154 + val = (Elf_Addr) me->module_core_rx +
4155 me->arch.plt_offset +
4156 info->plt_offset;
4157 val += rela->r_addend - loc;
4158 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4159 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4160 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4161 val = val + rela->r_addend -
4162 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4163 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4164 if (r_type == R_390_GOTOFF16)
4165 *(unsigned short *) loc = val;
4166 else if (r_type == R_390_GOTOFF32)
4167 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4168 break;
4169 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4170 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4171 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4172 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4173 rela->r_addend - loc;
4174 if (r_type == R_390_GOTPC)
4175 *(unsigned int *) loc = val;
4176 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
4177 index 061479f..dbfb08c 100644
4178 --- a/arch/s390/kernel/setup.c
4179 +++ b/arch/s390/kernel/setup.c
4180 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *p)
4181 early_param("mem", early_parse_mem);
4182
4183 #ifdef CONFIG_S390_SWITCH_AMODE
4184 -unsigned int switch_amode = 0;
4185 -EXPORT_SYMBOL_GPL(switch_amode);
4186 -
4187 static int set_amode_and_uaccess(unsigned long user_amode,
4188 unsigned long user32_amode)
4189 {
4190 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4191 return 0;
4192 }
4193 }
4194 -
4195 -/*
4196 - * Switch kernel/user addressing modes?
4197 - */
4198 -static int __init early_parse_switch_amode(char *p)
4199 -{
4200 - switch_amode = 1;
4201 - return 0;
4202 -}
4203 -early_param("switch_amode", early_parse_switch_amode);
4204 -
4205 #else /* CONFIG_S390_SWITCH_AMODE */
4206 static inline int set_amode_and_uaccess(unsigned long user_amode,
4207 unsigned long user32_amode)
4208 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
4209 }
4210 #endif /* CONFIG_S390_SWITCH_AMODE */
4211
4212 -#ifdef CONFIG_S390_EXEC_PROTECT
4213 -unsigned int s390_noexec = 0;
4214 -EXPORT_SYMBOL_GPL(s390_noexec);
4215 -
4216 -/*
4217 - * Enable execute protection?
4218 - */
4219 -static int __init early_parse_noexec(char *p)
4220 -{
4221 - if (!strncmp(p, "off", 3))
4222 - return 0;
4223 - switch_amode = 1;
4224 - s390_noexec = 1;
4225 - return 0;
4226 -}
4227 -early_param("noexec", early_parse_noexec);
4228 -#endif /* CONFIG_S390_EXEC_PROTECT */
4229 -
4230 static void setup_addressing_mode(void)
4231 {
4232 if (s390_noexec) {
4233 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4234 index f4558cc..e461f37 100644
4235 --- a/arch/s390/mm/mmap.c
4236 +++ b/arch/s390/mm/mmap.c
4237 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4238 */
4239 if (mmap_is_legacy()) {
4240 mm->mmap_base = TASK_UNMAPPED_BASE;
4241 +
4242 +#ifdef CONFIG_PAX_RANDMMAP
4243 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4244 + mm->mmap_base += mm->delta_mmap;
4245 +#endif
4246 +
4247 mm->get_unmapped_area = arch_get_unmapped_area;
4248 mm->unmap_area = arch_unmap_area;
4249 } else {
4250 mm->mmap_base = mmap_base();
4251 +
4252 +#ifdef CONFIG_PAX_RANDMMAP
4253 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4254 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4255 +#endif
4256 +
4257 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4258 mm->unmap_area = arch_unmap_area_topdown;
4259 }
4260 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4261 */
4262 if (mmap_is_legacy()) {
4263 mm->mmap_base = TASK_UNMAPPED_BASE;
4264 +
4265 +#ifdef CONFIG_PAX_RANDMMAP
4266 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4267 + mm->mmap_base += mm->delta_mmap;
4268 +#endif
4269 +
4270 mm->get_unmapped_area = s390_get_unmapped_area;
4271 mm->unmap_area = arch_unmap_area;
4272 } else {
4273 mm->mmap_base = mmap_base();
4274 +
4275 +#ifdef CONFIG_PAX_RANDMMAP
4276 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4277 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4278 +#endif
4279 +
4280 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4281 mm->unmap_area = arch_unmap_area_topdown;
4282 }
4283 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4284 index 589d5c7..669e274 100644
4285 --- a/arch/score/include/asm/system.h
4286 +++ b/arch/score/include/asm/system.h
4287 @@ -17,7 +17,7 @@ do { \
4288 #define finish_arch_switch(prev) do {} while (0)
4289
4290 typedef void (*vi_handler_t)(void);
4291 -extern unsigned long arch_align_stack(unsigned long sp);
4292 +#define arch_align_stack(x) (x)
4293
4294 #define mb() barrier()
4295 #define rmb() barrier()
4296 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4297 index 25d0803..d6c8e36 100644
4298 --- a/arch/score/kernel/process.c
4299 +++ b/arch/score/kernel/process.c
4300 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4301
4302 return task_pt_regs(task)->cp0_epc;
4303 }
4304 -
4305 -unsigned long arch_align_stack(unsigned long sp)
4306 -{
4307 - return sp;
4308 -}
4309 diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
4310 index d936c1a..304a252 100644
4311 --- a/arch/sh/boards/mach-hp6xx/pm.c
4312 +++ b/arch/sh/boards/mach-hp6xx/pm.c
4313 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
4314 return 0;
4315 }
4316
4317 -static struct platform_suspend_ops hp6x0_pm_ops = {
4318 +static const struct platform_suspend_ops hp6x0_pm_ops = {
4319 .enter = hp6x0_pm_enter,
4320 .valid = suspend_valid_only_mem,
4321 };
4322 diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
4323 index 8a8a993..7b3079b 100644
4324 --- a/arch/sh/kernel/cpu/sh4/sq.c
4325 +++ b/arch/sh/kernel/cpu/sh4/sq.c
4326 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
4327 NULL,
4328 };
4329
4330 -static struct sysfs_ops sq_sysfs_ops = {
4331 +static const struct sysfs_ops sq_sysfs_ops = {
4332 .show = sq_sysfs_show,
4333 .store = sq_sysfs_store,
4334 };
4335 diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
4336 index ee3c2aa..c49cee6 100644
4337 --- a/arch/sh/kernel/cpu/shmobile/pm.c
4338 +++ b/arch/sh/kernel/cpu/shmobile/pm.c
4339 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
4340 return 0;
4341 }
4342
4343 -static struct platform_suspend_ops sh_pm_ops = {
4344 +static const struct platform_suspend_ops sh_pm_ops = {
4345 .enter = sh_pm_enter,
4346 .valid = suspend_valid_only_mem,
4347 };
4348 diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
4349 index 3e532d0..9faa306 100644
4350 --- a/arch/sh/kernel/kgdb.c
4351 +++ b/arch/sh/kernel/kgdb.c
4352 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
4353 {
4354 }
4355
4356 -struct kgdb_arch arch_kgdb_ops = {
4357 +const struct kgdb_arch arch_kgdb_ops = {
4358 /* Breakpoint instruction: trapa #0x3c */
4359 #ifdef CONFIG_CPU_LITTLE_ENDIAN
4360 .gdb_bpt_instr = { 0x3c, 0xc3 },
4361 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4362 index afeb710..d1d1289 100644
4363 --- a/arch/sh/mm/mmap.c
4364 +++ b/arch/sh/mm/mmap.c
4365 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4366 addr = PAGE_ALIGN(addr);
4367
4368 vma = find_vma(mm, addr);
4369 - if (TASK_SIZE - len >= addr &&
4370 - (!vma || addr + len <= vma->vm_start))
4371 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4372 return addr;
4373 }
4374
4375 @@ -106,7 +105,7 @@ full_search:
4376 }
4377 return -ENOMEM;
4378 }
4379 - if (likely(!vma || addr + len <= vma->vm_start)) {
4380 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4381 /*
4382 * Remember the place where we stopped the search:
4383 */
4384 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4385 addr = PAGE_ALIGN(addr);
4386
4387 vma = find_vma(mm, addr);
4388 - if (TASK_SIZE - len >= addr &&
4389 - (!vma || addr + len <= vma->vm_start))
4390 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4391 return addr;
4392 }
4393
4394 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4395 /* make sure it can fit in the remaining address space */
4396 if (likely(addr > len)) {
4397 vma = find_vma(mm, addr-len);
4398 - if (!vma || addr <= vma->vm_start) {
4399 + if (check_heap_stack_gap(vma, addr - len, len)) {
4400 /* remember the address as a hint for next time */
4401 return (mm->free_area_cache = addr-len);
4402 }
4403 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4404 if (unlikely(mm->mmap_base < len))
4405 goto bottomup;
4406
4407 - addr = mm->mmap_base-len;
4408 - if (do_colour_align)
4409 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4410 + addr = mm->mmap_base - len;
4411
4412 do {
4413 + if (do_colour_align)
4414 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4415 /*
4416 * Lookup failure means no vma is above this address,
4417 * else if new region fits below vma->vm_start,
4418 * return with success:
4419 */
4420 vma = find_vma(mm, addr);
4421 - if (likely(!vma || addr+len <= vma->vm_start)) {
4422 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4423 /* remember the address as a hint for next time */
4424 return (mm->free_area_cache = addr);
4425 }
4426 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4427 mm->cached_hole_size = vma->vm_start - addr;
4428
4429 /* try just below the current vma->vm_start */
4430 - addr = vma->vm_start-len;
4431 - if (do_colour_align)
4432 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4433 - } while (likely(len < vma->vm_start));
4434 + addr = skip_heap_stack_gap(vma, len);
4435 + } while (!IS_ERR_VALUE(addr));
4436
4437 bottomup:
4438 /*
4439 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
4440 index 05ef538..dc9c857 100644
4441 --- a/arch/sparc/Kconfig
4442 +++ b/arch/sparc/Kconfig
4443 @@ -32,6 +32,7 @@ config SPARC
4444
4445 config SPARC32
4446 def_bool !64BIT
4447 + select GENERIC_ATOMIC64
4448
4449 config SPARC64
4450 def_bool 64BIT
4451 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4452 index 113225b..7fd04e7 100644
4453 --- a/arch/sparc/Makefile
4454 +++ b/arch/sparc/Makefile
4455 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4456 # Export what is needed by arch/sparc/boot/Makefile
4457 export VMLINUX_INIT VMLINUX_MAIN
4458 VMLINUX_INIT := $(head-y) $(init-y)
4459 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4460 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4461 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4462 VMLINUX_MAIN += $(drivers-y) $(net-y)
4463
4464 diff --git a/arch/sparc/include/asm/atomic.h b/arch/sparc/include/asm/atomic.h
4465 index 8ff83d8..4a459c2 100644
4466 --- a/arch/sparc/include/asm/atomic.h
4467 +++ b/arch/sparc/include/asm/atomic.h
4468 @@ -4,5 +4,6 @@
4469 #include <asm/atomic_64.h>
4470 #else
4471 #include <asm/atomic_32.h>
4472 +#include <asm-generic/atomic64.h>
4473 #endif
4474 #endif
4475 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4476 index f5cc06f..f858d47 100644
4477 --- a/arch/sparc/include/asm/atomic_64.h
4478 +++ b/arch/sparc/include/asm/atomic_64.h
4479 @@ -14,18 +14,40 @@
4480 #define ATOMIC64_INIT(i) { (i) }
4481
4482 #define atomic_read(v) ((v)->counter)
4483 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4484 +{
4485 + return v->counter;
4486 +}
4487 #define atomic64_read(v) ((v)->counter)
4488 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4489 +{
4490 + return v->counter;
4491 +}
4492
4493 #define atomic_set(v, i) (((v)->counter) = i)
4494 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4495 +{
4496 + v->counter = i;
4497 +}
4498 #define atomic64_set(v, i) (((v)->counter) = i)
4499 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4500 +{
4501 + v->counter = i;
4502 +}
4503
4504 extern void atomic_add(int, atomic_t *);
4505 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4506 extern void atomic64_add(long, atomic64_t *);
4507 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4508 extern void atomic_sub(int, atomic_t *);
4509 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4510 extern void atomic64_sub(long, atomic64_t *);
4511 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4512
4513 extern int atomic_add_ret(int, atomic_t *);
4514 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4515 extern long atomic64_add_ret(long, atomic64_t *);
4516 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4517 extern int atomic_sub_ret(int, atomic_t *);
4518 extern long atomic64_sub_ret(long, atomic64_t *);
4519
4520 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4521 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4522
4523 #define atomic_inc_return(v) atomic_add_ret(1, v)
4524 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4525 +{
4526 + return atomic_add_ret_unchecked(1, v);
4527 +}
4528 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4529 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4530 +{
4531 + return atomic64_add_ret_unchecked(1, v);
4532 +}
4533
4534 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4535 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4536
4537 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4538 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4539 +{
4540 + return atomic_add_ret_unchecked(i, v);
4541 +}
4542 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4543 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4544 +{
4545 + return atomic64_add_ret_unchecked(i, v);
4546 +}
4547
4548 /*
4549 * atomic_inc_and_test - increment and test
4550 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4551 * other cases.
4552 */
4553 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4554 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4555 +{
4556 + return atomic_inc_return_unchecked(v) == 0;
4557 +}
4558 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4559
4560 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4561 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4562 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4563
4564 #define atomic_inc(v) atomic_add(1, v)
4565 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4566 +{
4567 + atomic_add_unchecked(1, v);
4568 +}
4569 #define atomic64_inc(v) atomic64_add(1, v)
4570 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4571 +{
4572 + atomic64_add_unchecked(1, v);
4573 +}
4574
4575 #define atomic_dec(v) atomic_sub(1, v)
4576 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4577 +{
4578 + atomic_sub_unchecked(1, v);
4579 +}
4580 #define atomic64_dec(v) atomic64_sub(1, v)
4581 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4582 +{
4583 + atomic64_sub_unchecked(1, v);
4584 +}
4585
4586 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4587 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4588
4589 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4590 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4591 +{
4592 + return cmpxchg(&v->counter, old, new);
4593 +}
4594 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4595 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4596 +{
4597 + return xchg(&v->counter, new);
4598 +}
4599
4600 static inline int atomic_add_unless(atomic_t *v, int a, int u)
4601 {
4602 - int c, old;
4603 + int c, old, new;
4604 c = atomic_read(v);
4605 for (;;) {
4606 - if (unlikely(c == (u)))
4607 + if (unlikely(c == u))
4608 break;
4609 - old = atomic_cmpxchg((v), c, c + (a));
4610 +
4611 + asm volatile("addcc %2, %0, %0\n"
4612 +
4613 +#ifdef CONFIG_PAX_REFCOUNT
4614 + "tvs %%icc, 6\n"
4615 +#endif
4616 +
4617 + : "=r" (new)
4618 + : "0" (c), "ir" (a)
4619 + : "cc");
4620 +
4621 + old = atomic_cmpxchg(v, c, new);
4622 if (likely(old == c))
4623 break;
4624 c = old;
4625 }
4626 - return c != (u);
4627 + return c != u;
4628 }
4629
4630 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
4631 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
4632 #define atomic64_cmpxchg(v, o, n) \
4633 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4634 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4635 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4636 +{
4637 + return xchg(&v->counter, new);
4638 +}
4639
4640 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4641 {
4642 - long c, old;
4643 + long c, old, new;
4644 c = atomic64_read(v);
4645 for (;;) {
4646 - if (unlikely(c == (u)))
4647 + if (unlikely(c == u))
4648 break;
4649 - old = atomic64_cmpxchg((v), c, c + (a));
4650 +
4651 + asm volatile("addcc %2, %0, %0\n"
4652 +
4653 +#ifdef CONFIG_PAX_REFCOUNT
4654 + "tvs %%xcc, 6\n"
4655 +#endif
4656 +
4657 + : "=r" (new)
4658 + : "0" (c), "ir" (a)
4659 + : "cc");
4660 +
4661 + old = atomic64_cmpxchg(v, c, new);
4662 if (likely(old == c))
4663 break;
4664 c = old;
4665 }
4666 - return c != (u);
4667 + return c != u;
4668 }
4669
4670 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4671 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4672 index 41f85ae..fb54d5e 100644
4673 --- a/arch/sparc/include/asm/cache.h
4674 +++ b/arch/sparc/include/asm/cache.h
4675 @@ -8,7 +8,7 @@
4676 #define _SPARC_CACHE_H
4677
4678 #define L1_CACHE_SHIFT 5
4679 -#define L1_CACHE_BYTES 32
4680 +#define L1_CACHE_BYTES 32UL
4681 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
4682
4683 #ifdef CONFIG_SPARC32
4684 diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
4685 index 5a8c308..38def92 100644
4686 --- a/arch/sparc/include/asm/dma-mapping.h
4687 +++ b/arch/sparc/include/asm/dma-mapping.h
4688 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
4689 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
4690 #define dma_is_consistent(d, h) (1)
4691
4692 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
4693 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
4694 extern struct bus_type pci_bus_type;
4695
4696 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4697 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
4698 {
4699 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
4700 if (dev->bus == &pci_bus_type)
4701 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4702 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4703 dma_addr_t *dma_handle, gfp_t flag)
4704 {
4705 - struct dma_map_ops *ops = get_dma_ops(dev);
4706 + const struct dma_map_ops *ops = get_dma_ops(dev);
4707 void *cpu_addr;
4708
4709 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
4710 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4711 static inline void dma_free_coherent(struct device *dev, size_t size,
4712 void *cpu_addr, dma_addr_t dma_handle)
4713 {
4714 - struct dma_map_ops *ops = get_dma_ops(dev);
4715 + const struct dma_map_ops *ops = get_dma_ops(dev);
4716
4717 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
4718 ops->free_coherent(dev, size, cpu_addr, dma_handle);
4719 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4720 index 381a1b5..b97e3ff 100644
4721 --- a/arch/sparc/include/asm/elf_32.h
4722 +++ b/arch/sparc/include/asm/elf_32.h
4723 @@ -116,6 +116,13 @@ typedef struct {
4724
4725 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4726
4727 +#ifdef CONFIG_PAX_ASLR
4728 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
4729 +
4730 +#define PAX_DELTA_MMAP_LEN 16
4731 +#define PAX_DELTA_STACK_LEN 16
4732 +#endif
4733 +
4734 /* This yields a mask that user programs can use to figure out what
4735 instruction set this cpu supports. This can NOT be done in userspace
4736 on Sparc. */
4737 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4738 index 9968085..c2106ef 100644
4739 --- a/arch/sparc/include/asm/elf_64.h
4740 +++ b/arch/sparc/include/asm/elf_64.h
4741 @@ -163,6 +163,12 @@ typedef struct {
4742 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4743 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4744
4745 +#ifdef CONFIG_PAX_ASLR
4746 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4747 +
4748 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4749 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4750 +#endif
4751
4752 /* This yields a mask that user programs can use to figure out what
4753 instruction set this cpu supports. */
4754 diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
4755 index 156707b..aefa786 100644
4756 --- a/arch/sparc/include/asm/page_32.h
4757 +++ b/arch/sparc/include/asm/page_32.h
4758 @@ -8,6 +8,8 @@
4759 #ifndef _SPARC_PAGE_H
4760 #define _SPARC_PAGE_H
4761
4762 +#include <linux/const.h>
4763 +
4764 #define PAGE_SHIFT 12
4765
4766 #ifndef __ASSEMBLY__
4767 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4768 index e0cabe7..efd60f1 100644
4769 --- a/arch/sparc/include/asm/pgtable_32.h
4770 +++ b/arch/sparc/include/asm/pgtable_32.h
4771 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4772 BTFIXUPDEF_INT(page_none)
4773 BTFIXUPDEF_INT(page_copy)
4774 BTFIXUPDEF_INT(page_readonly)
4775 +
4776 +#ifdef CONFIG_PAX_PAGEEXEC
4777 +BTFIXUPDEF_INT(page_shared_noexec)
4778 +BTFIXUPDEF_INT(page_copy_noexec)
4779 +BTFIXUPDEF_INT(page_readonly_noexec)
4780 +#endif
4781 +
4782 BTFIXUPDEF_INT(page_kernel)
4783
4784 #define PMD_SHIFT SUN4C_PMD_SHIFT
4785 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4786 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4787 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4788
4789 +#ifdef CONFIG_PAX_PAGEEXEC
4790 +extern pgprot_t PAGE_SHARED_NOEXEC;
4791 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4792 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4793 +#else
4794 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
4795 +# define PAGE_COPY_NOEXEC PAGE_COPY
4796 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
4797 +#endif
4798 +
4799 extern unsigned long page_kernel;
4800
4801 #ifdef MODULE
4802 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4803 index 1407c07..7e10231 100644
4804 --- a/arch/sparc/include/asm/pgtsrmmu.h
4805 +++ b/arch/sparc/include/asm/pgtsrmmu.h
4806 @@ -115,6 +115,13 @@
4807 SRMMU_EXEC | SRMMU_REF)
4808 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4809 SRMMU_EXEC | SRMMU_REF)
4810 +
4811 +#ifdef CONFIG_PAX_PAGEEXEC
4812 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4813 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4814 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4815 +#endif
4816 +
4817 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4818 SRMMU_DIRTY | SRMMU_REF)
4819
4820 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4821 index 43e5147..47622a1 100644
4822 --- a/arch/sparc/include/asm/spinlock_64.h
4823 +++ b/arch/sparc/include/asm/spinlock_64.h
4824 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
4825
4826 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4827
4828 -static void inline arch_read_lock(raw_rwlock_t *lock)
4829 +static inline void arch_read_lock(raw_rwlock_t *lock)
4830 {
4831 unsigned long tmp1, tmp2;
4832
4833 __asm__ __volatile__ (
4834 "1: ldsw [%2], %0\n"
4835 " brlz,pn %0, 2f\n"
4836 -"4: add %0, 1, %1\n"
4837 +"4: addcc %0, 1, %1\n"
4838 +
4839 +#ifdef CONFIG_PAX_REFCOUNT
4840 +" tvs %%icc, 6\n"
4841 +#endif
4842 +
4843 " cas [%2], %0, %1\n"
4844 " cmp %0, %1\n"
4845 " bne,pn %%icc, 1b\n"
4846 @@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
4847 " .previous"
4848 : "=&r" (tmp1), "=&r" (tmp2)
4849 : "r" (lock)
4850 - : "memory");
4851 + : "memory", "cc");
4852 }
4853
4854 -static int inline arch_read_trylock(raw_rwlock_t *lock)
4855 +static inline int arch_read_trylock(raw_rwlock_t *lock)
4856 {
4857 int tmp1, tmp2;
4858
4859 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4860 "1: ldsw [%2], %0\n"
4861 " brlz,a,pn %0, 2f\n"
4862 " mov 0, %0\n"
4863 -" add %0, 1, %1\n"
4864 +" addcc %0, 1, %1\n"
4865 +
4866 +#ifdef CONFIG_PAX_REFCOUNT
4867 +" tvs %%icc, 6\n"
4868 +#endif
4869 +
4870 " cas [%2], %0, %1\n"
4871 " cmp %0, %1\n"
4872 " bne,pn %%icc, 1b\n"
4873 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4874 return tmp1;
4875 }
4876
4877 -static void inline arch_read_unlock(raw_rwlock_t *lock)
4878 +static inline void arch_read_unlock(raw_rwlock_t *lock)
4879 {
4880 unsigned long tmp1, tmp2;
4881
4882 __asm__ __volatile__(
4883 "1: lduw [%2], %0\n"
4884 -" sub %0, 1, %1\n"
4885 +" subcc %0, 1, %1\n"
4886 +
4887 +#ifdef CONFIG_PAX_REFCOUNT
4888 +" tvs %%icc, 6\n"
4889 +#endif
4890 +
4891 " cas [%2], %0, %1\n"
4892 " cmp %0, %1\n"
4893 " bne,pn %%xcc, 1b\n"
4894 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
4895 : "memory");
4896 }
4897
4898 -static void inline arch_write_lock(raw_rwlock_t *lock)
4899 +static inline void arch_write_lock(raw_rwlock_t *lock)
4900 {
4901 unsigned long mask, tmp1, tmp2;
4902
4903 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
4904 : "memory");
4905 }
4906
4907 -static void inline arch_write_unlock(raw_rwlock_t *lock)
4908 +static inline void arch_write_unlock(raw_rwlock_t *lock)
4909 {
4910 __asm__ __volatile__(
4911 " stw %%g0, [%0]"
4912 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
4913 : "memory");
4914 }
4915
4916 -static int inline arch_write_trylock(raw_rwlock_t *lock)
4917 +static inline int arch_write_trylock(raw_rwlock_t *lock)
4918 {
4919 unsigned long mask, tmp1, tmp2, result;
4920
4921 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
4922 index 844d73a..f787fb9 100644
4923 --- a/arch/sparc/include/asm/thread_info_32.h
4924 +++ b/arch/sparc/include/asm/thread_info_32.h
4925 @@ -50,6 +50,8 @@ struct thread_info {
4926 unsigned long w_saved;
4927
4928 struct restart_block restart_block;
4929 +
4930 + unsigned long lowest_stack;
4931 };
4932
4933 /*
4934 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
4935 index f78ad9a..9f55fc7 100644
4936 --- a/arch/sparc/include/asm/thread_info_64.h
4937 +++ b/arch/sparc/include/asm/thread_info_64.h
4938 @@ -68,6 +68,8 @@ struct thread_info {
4939 struct pt_regs *kern_una_regs;
4940 unsigned int kern_una_insn;
4941
4942 + unsigned long lowest_stack;
4943 +
4944 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4945 };
4946
4947 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
4948 index e88fbe5..96b0ce5 100644
4949 --- a/arch/sparc/include/asm/uaccess.h
4950 +++ b/arch/sparc/include/asm/uaccess.h
4951 @@ -1,5 +1,13 @@
4952 #ifndef ___ASM_SPARC_UACCESS_H
4953 #define ___ASM_SPARC_UACCESS_H
4954 +
4955 +#ifdef __KERNEL__
4956 +#ifndef __ASSEMBLY__
4957 +#include <linux/types.h>
4958 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4959 +#endif
4960 +#endif
4961 +
4962 #if defined(__sparc__) && defined(__arch64__)
4963 #include <asm/uaccess_64.h>
4964 #else
4965 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
4966 index 8303ac4..07f333d 100644
4967 --- a/arch/sparc/include/asm/uaccess_32.h
4968 +++ b/arch/sparc/include/asm/uaccess_32.h
4969 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
4970
4971 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4972 {
4973 - if (n && __access_ok((unsigned long) to, n))
4974 + if ((long)n < 0)
4975 + return n;
4976 +
4977 + if (n && __access_ok((unsigned long) to, n)) {
4978 + if (!__builtin_constant_p(n))
4979 + check_object_size(from, n, true);
4980 return __copy_user(to, (__force void __user *) from, n);
4981 - else
4982 + } else
4983 return n;
4984 }
4985
4986 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4987 {
4988 + if ((long)n < 0)
4989 + return n;
4990 +
4991 + if (!__builtin_constant_p(n))
4992 + check_object_size(from, n, true);
4993 +
4994 return __copy_user(to, (__force void __user *) from, n);
4995 }
4996
4997 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4998 {
4999 - if (n && __access_ok((unsigned long) from, n))
5000 + if ((long)n < 0)
5001 + return n;
5002 +
5003 + if (n && __access_ok((unsigned long) from, n)) {
5004 + if (!__builtin_constant_p(n))
5005 + check_object_size(to, n, false);
5006 return __copy_user((__force void __user *) to, from, n);
5007 - else
5008 + } else
5009 return n;
5010 }
5011
5012 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5013 {
5014 + if ((long)n < 0)
5015 + return n;
5016 +
5017 return __copy_user((__force void __user *) to, from, n);
5018 }
5019
5020 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5021 index 9ea271e..7b8a271 100644
5022 --- a/arch/sparc/include/asm/uaccess_64.h
5023 +++ b/arch/sparc/include/asm/uaccess_64.h
5024 @@ -9,6 +9,7 @@
5025 #include <linux/compiler.h>
5026 #include <linux/string.h>
5027 #include <linux/thread_info.h>
5028 +#include <linux/kernel.h>
5029 #include <asm/asi.h>
5030 #include <asm/system.h>
5031 #include <asm/spitfire.h>
5032 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5033 static inline unsigned long __must_check
5034 copy_from_user(void *to, const void __user *from, unsigned long size)
5035 {
5036 - unsigned long ret = ___copy_from_user(to, from, size);
5037 + unsigned long ret;
5038
5039 + if ((long)size < 0 || size > INT_MAX)
5040 + return size;
5041 +
5042 + if (!__builtin_constant_p(size))
5043 + check_object_size(to, size, false);
5044 +
5045 + ret = ___copy_from_user(to, from, size);
5046 if (unlikely(ret))
5047 ret = copy_from_user_fixup(to, from, size);
5048 return ret;
5049 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5050 static inline unsigned long __must_check
5051 copy_to_user(void __user *to, const void *from, unsigned long size)
5052 {
5053 - unsigned long ret = ___copy_to_user(to, from, size);
5054 + unsigned long ret;
5055
5056 + if ((long)size < 0 || size > INT_MAX)
5057 + return size;
5058 +
5059 + if (!__builtin_constant_p(size))
5060 + check_object_size(from, size, true);
5061 +
5062 + ret = ___copy_to_user(to, from, size);
5063 if (unlikely(ret))
5064 ret = copy_to_user_fixup(to, from, size);
5065 return ret;
5066 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5067 index 2782681..77ded84 100644
5068 --- a/arch/sparc/kernel/Makefile
5069 +++ b/arch/sparc/kernel/Makefile
5070 @@ -3,7 +3,7 @@
5071 #
5072
5073 asflags-y := -ansi
5074 -ccflags-y := -Werror
5075 +#ccflags-y := -Werror
5076
5077 extra-y := head_$(BITS).o
5078 extra-y += init_task.o
5079 diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
5080 index 7690cc2..ece64c9 100644
5081 --- a/arch/sparc/kernel/iommu.c
5082 +++ b/arch/sparc/kernel/iommu.c
5083 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
5084 spin_unlock_irqrestore(&iommu->lock, flags);
5085 }
5086
5087 -static struct dma_map_ops sun4u_dma_ops = {
5088 +static const struct dma_map_ops sun4u_dma_ops = {
5089 .alloc_coherent = dma_4u_alloc_coherent,
5090 .free_coherent = dma_4u_free_coherent,
5091 .map_page = dma_4u_map_page,
5092 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
5093 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
5094 };
5095
5096 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5097 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5098 EXPORT_SYMBOL(dma_ops);
5099
5100 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
5101 diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
5102 index 9f61fd8..bd048db 100644
5103 --- a/arch/sparc/kernel/ioport.c
5104 +++ b/arch/sparc/kernel/ioport.c
5105 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
5106 BUG();
5107 }
5108
5109 -struct dma_map_ops sbus_dma_ops = {
5110 +const struct dma_map_ops sbus_dma_ops = {
5111 .alloc_coherent = sbus_alloc_coherent,
5112 .free_coherent = sbus_free_coherent,
5113 .map_page = sbus_map_page,
5114 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
5115 .sync_sg_for_device = sbus_sync_sg_for_device,
5116 };
5117
5118 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
5119 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
5120 EXPORT_SYMBOL(dma_ops);
5121
5122 static int __init sparc_register_ioport(void)
5123 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
5124 }
5125 }
5126
5127 -struct dma_map_ops pci32_dma_ops = {
5128 +const struct dma_map_ops pci32_dma_ops = {
5129 .alloc_coherent = pci32_alloc_coherent,
5130 .free_coherent = pci32_free_coherent,
5131 .map_page = pci32_map_page,
5132 diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
5133 index 04df4ed..55c4b6e 100644
5134 --- a/arch/sparc/kernel/kgdb_32.c
5135 +++ b/arch/sparc/kernel/kgdb_32.c
5136 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
5137 {
5138 }
5139
5140 -struct kgdb_arch arch_kgdb_ops = {
5141 +const struct kgdb_arch arch_kgdb_ops = {
5142 /* Breakpoint instruction: ta 0x7d */
5143 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
5144 };
5145 diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
5146 index f5a0fd4..d886f71 100644
5147 --- a/arch/sparc/kernel/kgdb_64.c
5148 +++ b/arch/sparc/kernel/kgdb_64.c
5149 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
5150 {
5151 }
5152
5153 -struct kgdb_arch arch_kgdb_ops = {
5154 +const struct kgdb_arch arch_kgdb_ops = {
5155 /* Breakpoint instruction: ta 0x72 */
5156 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
5157 };
5158 diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
5159 index 23c33ff..d137fbd 100644
5160 --- a/arch/sparc/kernel/pci_sun4v.c
5161 +++ b/arch/sparc/kernel/pci_sun4v.c
5162 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
5163 spin_unlock_irqrestore(&iommu->lock, flags);
5164 }
5165
5166 -static struct dma_map_ops sun4v_dma_ops = {
5167 +static const struct dma_map_ops sun4v_dma_ops = {
5168 .alloc_coherent = dma_4v_alloc_coherent,
5169 .free_coherent = dma_4v_free_coherent,
5170 .map_page = dma_4v_map_page,
5171 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5172 index c49865b..b41a81b 100644
5173 --- a/arch/sparc/kernel/process_32.c
5174 +++ b/arch/sparc/kernel/process_32.c
5175 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
5176 rw->ins[4], rw->ins[5],
5177 rw->ins[6],
5178 rw->ins[7]);
5179 - printk("%pS\n", (void *) rw->ins[7]);
5180 + printk("%pA\n", (void *) rw->ins[7]);
5181 rw = (struct reg_window32 *) rw->ins[6];
5182 }
5183 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5184 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
5185
5186 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5187 r->psr, r->pc, r->npc, r->y, print_tainted());
5188 - printk("PC: <%pS>\n", (void *) r->pc);
5189 + printk("PC: <%pA>\n", (void *) r->pc);
5190 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5191 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5192 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5193 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5194 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5195 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5196 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5197 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5198
5199 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5200 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5201 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5202 rw = (struct reg_window32 *) fp;
5203 pc = rw->ins[7];
5204 printk("[%08lx : ", pc);
5205 - printk("%pS ] ", (void *) pc);
5206 + printk("%pA ] ", (void *) pc);
5207 fp = rw->ins[6];
5208 } while (++count < 16);
5209 printk("\n");
5210 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5211 index cb70476..3d0c191 100644
5212 --- a/arch/sparc/kernel/process_64.c
5213 +++ b/arch/sparc/kernel/process_64.c
5214 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
5215 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5216 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5217 if (regs->tstate & TSTATE_PRIV)
5218 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5219 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5220 }
5221
5222 void show_regs(struct pt_regs *regs)
5223 {
5224 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5225 regs->tpc, regs->tnpc, regs->y, print_tainted());
5226 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5227 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5228 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5229 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5230 regs->u_regs[3]);
5231 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
5232 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5233 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5234 regs->u_regs[15]);
5235 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5236 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5237 show_regwindow(regs);
5238 }
5239
5240 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5241 ((tp && tp->task) ? tp->task->pid : -1));
5242
5243 if (gp->tstate & TSTATE_PRIV) {
5244 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5245 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5246 (void *) gp->tpc,
5247 (void *) gp->o7,
5248 (void *) gp->i7,
5249 diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
5250 index 6edc4e5..06a69b4 100644
5251 --- a/arch/sparc/kernel/sigutil_64.c
5252 +++ b/arch/sparc/kernel/sigutil_64.c
5253 @@ -2,6 +2,7 @@
5254 #include <linux/types.h>
5255 #include <linux/thread_info.h>
5256 #include <linux/uaccess.h>
5257 +#include <linux/errno.h>
5258
5259 #include <asm/sigcontext.h>
5260 #include <asm/fpumacro.h>
5261 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5262 index 3a82e65..ce0a53a 100644
5263 --- a/arch/sparc/kernel/sys_sparc_32.c
5264 +++ b/arch/sparc/kernel/sys_sparc_32.c
5265 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5266 if (ARCH_SUN4C && len > 0x20000000)
5267 return -ENOMEM;
5268 if (!addr)
5269 - addr = TASK_UNMAPPED_BASE;
5270 + addr = current->mm->mmap_base;
5271
5272 if (flags & MAP_SHARED)
5273 addr = COLOUR_ALIGN(addr);
5274 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5275 }
5276 if (TASK_SIZE - PAGE_SIZE - len < addr)
5277 return -ENOMEM;
5278 - if (!vmm || addr + len <= vmm->vm_start)
5279 + if (check_heap_stack_gap(vmm, addr, len))
5280 return addr;
5281 addr = vmm->vm_end;
5282 if (flags & MAP_SHARED)
5283 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5284 index cfa0e19..98972ac 100644
5285 --- a/arch/sparc/kernel/sys_sparc_64.c
5286 +++ b/arch/sparc/kernel/sys_sparc_64.c
5287 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5288 /* We do not accept a shared mapping if it would violate
5289 * cache aliasing constraints.
5290 */
5291 - if ((flags & MAP_SHARED) &&
5292 + if ((filp || (flags & MAP_SHARED)) &&
5293 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5294 return -EINVAL;
5295 return addr;
5296 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5297 if (filp || (flags & MAP_SHARED))
5298 do_color_align = 1;
5299
5300 +#ifdef CONFIG_PAX_RANDMMAP
5301 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5302 +#endif
5303 +
5304 if (addr) {
5305 if (do_color_align)
5306 addr = COLOUR_ALIGN(addr, pgoff);
5307 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5308 addr = PAGE_ALIGN(addr);
5309
5310 vma = find_vma(mm, addr);
5311 - if (task_size - len >= addr &&
5312 - (!vma || addr + len <= vma->vm_start))
5313 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5314 return addr;
5315 }
5316
5317 if (len > mm->cached_hole_size) {
5318 - start_addr = addr = mm->free_area_cache;
5319 + start_addr = addr = mm->free_area_cache;
5320 } else {
5321 - start_addr = addr = TASK_UNMAPPED_BASE;
5322 + start_addr = addr = mm->mmap_base;
5323 mm->cached_hole_size = 0;
5324 }
5325
5326 @@ -175,14 +178,14 @@ full_search:
5327 vma = find_vma(mm, VA_EXCLUDE_END);
5328 }
5329 if (unlikely(task_size < addr)) {
5330 - if (start_addr != TASK_UNMAPPED_BASE) {
5331 - start_addr = addr = TASK_UNMAPPED_BASE;
5332 + if (start_addr != mm->mmap_base) {
5333 + start_addr = addr = mm->mmap_base;
5334 mm->cached_hole_size = 0;
5335 goto full_search;
5336 }
5337 return -ENOMEM;
5338 }
5339 - if (likely(!vma || addr + len <= vma->vm_start)) {
5340 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5341 /*
5342 * Remember the place where we stopped the search:
5343 */
5344 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5345 /* We do not accept a shared mapping if it would violate
5346 * cache aliasing constraints.
5347 */
5348 - if ((flags & MAP_SHARED) &&
5349 + if ((filp || (flags & MAP_SHARED)) &&
5350 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5351 return -EINVAL;
5352 return addr;
5353 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5354 addr = PAGE_ALIGN(addr);
5355
5356 vma = find_vma(mm, addr);
5357 - if (task_size - len >= addr &&
5358 - (!vma || addr + len <= vma->vm_start))
5359 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5360 return addr;
5361 }
5362
5363 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5364 /* make sure it can fit in the remaining address space */
5365 if (likely(addr > len)) {
5366 vma = find_vma(mm, addr-len);
5367 - if (!vma || addr <= vma->vm_start) {
5368 + if (check_heap_stack_gap(vma, addr - len, len)) {
5369 /* remember the address as a hint for next time */
5370 return (mm->free_area_cache = addr-len);
5371 }
5372 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5373 if (unlikely(mm->mmap_base < len))
5374 goto bottomup;
5375
5376 - addr = mm->mmap_base-len;
5377 - if (do_color_align)
5378 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5379 + addr = mm->mmap_base - len;
5380
5381 do {
5382 + if (do_color_align)
5383 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5384 /*
5385 * Lookup failure means no vma is above this address,
5386 * else if new region fits below vma->vm_start,
5387 * return with success:
5388 */
5389 vma = find_vma(mm, addr);
5390 - if (likely(!vma || addr+len <= vma->vm_start)) {
5391 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5392 /* remember the address as a hint for next time */
5393 return (mm->free_area_cache = addr);
5394 }
5395 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5396 mm->cached_hole_size = vma->vm_start - addr;
5397
5398 /* try just below the current vma->vm_start */
5399 - addr = vma->vm_start-len;
5400 - if (do_color_align)
5401 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5402 - } while (likely(len < vma->vm_start));
5403 + addr = skip_heap_stack_gap(vma, len);
5404 + } while (!IS_ERR_VALUE(addr));
5405
5406 bottomup:
5407 /*
5408 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5409 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
5410 sysctl_legacy_va_layout) {
5411 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5412 +
5413 +#ifdef CONFIG_PAX_RANDMMAP
5414 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5415 + mm->mmap_base += mm->delta_mmap;
5416 +#endif
5417 +
5418 mm->get_unmapped_area = arch_get_unmapped_area;
5419 mm->unmap_area = arch_unmap_area;
5420 } else {
5421 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5422 gap = (task_size / 6 * 5);
5423
5424 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5425 +
5426 +#ifdef CONFIG_PAX_RANDMMAP
5427 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5428 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5429 +#endif
5430 +
5431 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5432 mm->unmap_area = arch_unmap_area_topdown;
5433 }
5434 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5435 index c0490c7..84959d1 100644
5436 --- a/arch/sparc/kernel/traps_32.c
5437 +++ b/arch/sparc/kernel/traps_32.c
5438 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
5439 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5440 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5441
5442 +extern void gr_handle_kernel_exploit(void);
5443 +
5444 void die_if_kernel(char *str, struct pt_regs *regs)
5445 {
5446 static int die_counter;
5447 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5448 count++ < 30 &&
5449 (((unsigned long) rw) >= PAGE_OFFSET) &&
5450 !(((unsigned long) rw) & 0x7)) {
5451 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
5452 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
5453 (void *) rw->ins[7]);
5454 rw = (struct reg_window32 *)rw->ins[6];
5455 }
5456 }
5457 printk("Instruction DUMP:");
5458 instruction_dump ((unsigned long *) regs->pc);
5459 - if(regs->psr & PSR_PS)
5460 + if(regs->psr & PSR_PS) {
5461 + gr_handle_kernel_exploit();
5462 do_exit(SIGKILL);
5463 + }
5464 do_exit(SIGSEGV);
5465 }
5466
5467 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5468 index 10f7bb9..cdb6793 100644
5469 --- a/arch/sparc/kernel/traps_64.c
5470 +++ b/arch/sparc/kernel/traps_64.c
5471 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5472 i + 1,
5473 p->trapstack[i].tstate, p->trapstack[i].tpc,
5474 p->trapstack[i].tnpc, p->trapstack[i].tt);
5475 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5476 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5477 }
5478 }
5479
5480 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5481
5482 lvl -= 0x100;
5483 if (regs->tstate & TSTATE_PRIV) {
5484 +
5485 +#ifdef CONFIG_PAX_REFCOUNT
5486 + if (lvl == 6)
5487 + pax_report_refcount_overflow(regs);
5488 +#endif
5489 +
5490 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5491 die_if_kernel(buffer, regs);
5492 }
5493 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5494 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5495 {
5496 char buffer[32];
5497 -
5498 +
5499 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5500 0, lvl, SIGTRAP) == NOTIFY_STOP)
5501 return;
5502
5503 +#ifdef CONFIG_PAX_REFCOUNT
5504 + if (lvl == 6)
5505 + pax_report_refcount_overflow(regs);
5506 +#endif
5507 +
5508 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5509
5510 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5511 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5512 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5513 printk("%s" "ERROR(%d): ",
5514 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5515 - printk("TPC<%pS>\n", (void *) regs->tpc);
5516 + printk("TPC<%pA>\n", (void *) regs->tpc);
5517 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5518 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5519 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5520 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5521 smp_processor_id(),
5522 (type & 0x1) ? 'I' : 'D',
5523 regs->tpc);
5524 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5525 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5526 panic("Irrecoverable Cheetah+ parity error.");
5527 }
5528
5529 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5530 smp_processor_id(),
5531 (type & 0x1) ? 'I' : 'D',
5532 regs->tpc);
5533 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5534 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5535 }
5536
5537 struct sun4v_error_entry {
5538 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5539
5540 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5541 regs->tpc, tl);
5542 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5543 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5544 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5545 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5546 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5547 (void *) regs->u_regs[UREG_I7]);
5548 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5549 "pte[%lx] error[%lx]\n",
5550 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5551
5552 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5553 regs->tpc, tl);
5554 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5555 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5556 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5557 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5558 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5559 (void *) regs->u_regs[UREG_I7]);
5560 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5561 "pte[%lx] error[%lx]\n",
5562 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5563 fp = (unsigned long)sf->fp + STACK_BIAS;
5564 }
5565
5566 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5567 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5568 } while (++count < 16);
5569 }
5570
5571 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5572 return (struct reg_window *) (fp + STACK_BIAS);
5573 }
5574
5575 +extern void gr_handle_kernel_exploit(void);
5576 +
5577 void die_if_kernel(char *str, struct pt_regs *regs)
5578 {
5579 static int die_counter;
5580 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5581 while (rw &&
5582 count++ < 30&&
5583 is_kernel_stack(current, rw)) {
5584 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
5585 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
5586 (void *) rw->ins[7]);
5587
5588 rw = kernel_stack_up(rw);
5589 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5590 }
5591 user_instruction_dump ((unsigned int __user *) regs->tpc);
5592 }
5593 - if (regs->tstate & TSTATE_PRIV)
5594 + if (regs->tstate & TSTATE_PRIV) {
5595 + gr_handle_kernel_exploit();
5596 do_exit(SIGKILL);
5597 + }
5598 +
5599 do_exit(SIGSEGV);
5600 }
5601 EXPORT_SYMBOL(die_if_kernel);
5602 diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
5603 index be183fe..1c8d332 100644
5604 --- a/arch/sparc/kernel/una_asm_64.S
5605 +++ b/arch/sparc/kernel/una_asm_64.S
5606 @@ -127,7 +127,7 @@ do_int_load:
5607 wr %o5, 0x0, %asi
5608 retl
5609 mov 0, %o0
5610 - .size __do_int_load, .-__do_int_load
5611 + .size do_int_load, .-do_int_load
5612
5613 .section __ex_table,"a"
5614 .word 4b, __retl_efault
5615 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5616 index 3792099..2af17d8 100644
5617 --- a/arch/sparc/kernel/unaligned_64.c
5618 +++ b/arch/sparc/kernel/unaligned_64.c
5619 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
5620 if (count < 5) {
5621 last_time = jiffies;
5622 count++;
5623 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
5624 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
5625 regs->tpc, (void *) regs->tpc);
5626 }
5627 }
5628 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5629 index e75faf0..24f12f9 100644
5630 --- a/arch/sparc/lib/Makefile
5631 +++ b/arch/sparc/lib/Makefile
5632 @@ -2,7 +2,7 @@
5633 #
5634
5635 asflags-y := -ansi -DST_DIV0=0x02
5636 -ccflags-y := -Werror
5637 +#ccflags-y := -Werror
5638
5639 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5640 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5641 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5642 index 0268210..f0291ca 100644
5643 --- a/arch/sparc/lib/atomic_64.S
5644 +++ b/arch/sparc/lib/atomic_64.S
5645 @@ -18,7 +18,12 @@
5646 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5647 BACKOFF_SETUP(%o2)
5648 1: lduw [%o1], %g1
5649 - add %g1, %o0, %g7
5650 + addcc %g1, %o0, %g7
5651 +
5652 +#ifdef CONFIG_PAX_REFCOUNT
5653 + tvs %icc, 6
5654 +#endif
5655 +
5656 cas [%o1], %g1, %g7
5657 cmp %g1, %g7
5658 bne,pn %icc, 2f
5659 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5660 2: BACKOFF_SPIN(%o2, %o3, 1b)
5661 .size atomic_add, .-atomic_add
5662
5663 + .globl atomic_add_unchecked
5664 + .type atomic_add_unchecked,#function
5665 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5666 + BACKOFF_SETUP(%o2)
5667 +1: lduw [%o1], %g1
5668 + add %g1, %o0, %g7
5669 + cas [%o1], %g1, %g7
5670 + cmp %g1, %g7
5671 + bne,pn %icc, 2f
5672 + nop
5673 + retl
5674 + nop
5675 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5676 + .size atomic_add_unchecked, .-atomic_add_unchecked
5677 +
5678 .globl atomic_sub
5679 .type atomic_sub,#function
5680 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5681 BACKOFF_SETUP(%o2)
5682 1: lduw [%o1], %g1
5683 - sub %g1, %o0, %g7
5684 + subcc %g1, %o0, %g7
5685 +
5686 +#ifdef CONFIG_PAX_REFCOUNT
5687 + tvs %icc, 6
5688 +#endif
5689 +
5690 cas [%o1], %g1, %g7
5691 cmp %g1, %g7
5692 bne,pn %icc, 2f
5693 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5694 2: BACKOFF_SPIN(%o2, %o3, 1b)
5695 .size atomic_sub, .-atomic_sub
5696
5697 + .globl atomic_sub_unchecked
5698 + .type atomic_sub_unchecked,#function
5699 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5700 + BACKOFF_SETUP(%o2)
5701 +1: lduw [%o1], %g1
5702 + sub %g1, %o0, %g7
5703 + cas [%o1], %g1, %g7
5704 + cmp %g1, %g7
5705 + bne,pn %icc, 2f
5706 + nop
5707 + retl
5708 + nop
5709 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5710 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
5711 +
5712 .globl atomic_add_ret
5713 .type atomic_add_ret,#function
5714 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5715 BACKOFF_SETUP(%o2)
5716 1: lduw [%o1], %g1
5717 - add %g1, %o0, %g7
5718 + addcc %g1, %o0, %g7
5719 +
5720 +#ifdef CONFIG_PAX_REFCOUNT
5721 + tvs %icc, 6
5722 +#endif
5723 +
5724 cas [%o1], %g1, %g7
5725 cmp %g1, %g7
5726 bne,pn %icc, 2f
5727 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5728 2: BACKOFF_SPIN(%o2, %o3, 1b)
5729 .size atomic_add_ret, .-atomic_add_ret
5730
5731 + .globl atomic_add_ret_unchecked
5732 + .type atomic_add_ret_unchecked,#function
5733 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5734 + BACKOFF_SETUP(%o2)
5735 +1: lduw [%o1], %g1
5736 + addcc %g1, %o0, %g7
5737 + cas [%o1], %g1, %g7
5738 + cmp %g1, %g7
5739 + bne,pn %icc, 2f
5740 + add %g7, %o0, %g7
5741 + sra %g7, 0, %o0
5742 + retl
5743 + nop
5744 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5745 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5746 +
5747 .globl atomic_sub_ret
5748 .type atomic_sub_ret,#function
5749 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5750 BACKOFF_SETUP(%o2)
5751 1: lduw [%o1], %g1
5752 - sub %g1, %o0, %g7
5753 + subcc %g1, %o0, %g7
5754 +
5755 +#ifdef CONFIG_PAX_REFCOUNT
5756 + tvs %icc, 6
5757 +#endif
5758 +
5759 cas [%o1], %g1, %g7
5760 cmp %g1, %g7
5761 bne,pn %icc, 2f
5762 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5763 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5764 BACKOFF_SETUP(%o2)
5765 1: ldx [%o1], %g1
5766 - add %g1, %o0, %g7
5767 + addcc %g1, %o0, %g7
5768 +
5769 +#ifdef CONFIG_PAX_REFCOUNT
5770 + tvs %xcc, 6
5771 +#endif
5772 +
5773 casx [%o1], %g1, %g7
5774 cmp %g1, %g7
5775 bne,pn %xcc, 2f
5776 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5777 2: BACKOFF_SPIN(%o2, %o3, 1b)
5778 .size atomic64_add, .-atomic64_add
5779
5780 + .globl atomic64_add_unchecked
5781 + .type atomic64_add_unchecked,#function
5782 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5783 + BACKOFF_SETUP(%o2)
5784 +1: ldx [%o1], %g1
5785 + addcc %g1, %o0, %g7
5786 + casx [%o1], %g1, %g7
5787 + cmp %g1, %g7
5788 + bne,pn %xcc, 2f
5789 + nop
5790 + retl
5791 + nop
5792 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5793 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
5794 +
5795 .globl atomic64_sub
5796 .type atomic64_sub,#function
5797 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5798 BACKOFF_SETUP(%o2)
5799 1: ldx [%o1], %g1
5800 - sub %g1, %o0, %g7
5801 + subcc %g1, %o0, %g7
5802 +
5803 +#ifdef CONFIG_PAX_REFCOUNT
5804 + tvs %xcc, 6
5805 +#endif
5806 +
5807 casx [%o1], %g1, %g7
5808 cmp %g1, %g7
5809 bne,pn %xcc, 2f
5810 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5811 2: BACKOFF_SPIN(%o2, %o3, 1b)
5812 .size atomic64_sub, .-atomic64_sub
5813
5814 + .globl atomic64_sub_unchecked
5815 + .type atomic64_sub_unchecked,#function
5816 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5817 + BACKOFF_SETUP(%o2)
5818 +1: ldx [%o1], %g1
5819 + subcc %g1, %o0, %g7
5820 + casx [%o1], %g1, %g7
5821 + cmp %g1, %g7
5822 + bne,pn %xcc, 2f
5823 + nop
5824 + retl
5825 + nop
5826 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5827 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5828 +
5829 .globl atomic64_add_ret
5830 .type atomic64_add_ret,#function
5831 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5832 BACKOFF_SETUP(%o2)
5833 1: ldx [%o1], %g1
5834 - add %g1, %o0, %g7
5835 + addcc %g1, %o0, %g7
5836 +
5837 +#ifdef CONFIG_PAX_REFCOUNT
5838 + tvs %xcc, 6
5839 +#endif
5840 +
5841 casx [%o1], %g1, %g7
5842 cmp %g1, %g7
5843 bne,pn %xcc, 2f
5844 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5845 2: BACKOFF_SPIN(%o2, %o3, 1b)
5846 .size atomic64_add_ret, .-atomic64_add_ret
5847
5848 + .globl atomic64_add_ret_unchecked
5849 + .type atomic64_add_ret_unchecked,#function
5850 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5851 + BACKOFF_SETUP(%o2)
5852 +1: ldx [%o1], %g1
5853 + addcc %g1, %o0, %g7
5854 + casx [%o1], %g1, %g7
5855 + cmp %g1, %g7
5856 + bne,pn %xcc, 2f
5857 + add %g7, %o0, %g7
5858 + mov %g7, %o0
5859 + retl
5860 + nop
5861 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5862 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5863 +
5864 .globl atomic64_sub_ret
5865 .type atomic64_sub_ret,#function
5866 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5867 BACKOFF_SETUP(%o2)
5868 1: ldx [%o1], %g1
5869 - sub %g1, %o0, %g7
5870 + subcc %g1, %o0, %g7
5871 +
5872 +#ifdef CONFIG_PAX_REFCOUNT
5873 + tvs %xcc, 6
5874 +#endif
5875 +
5876 casx [%o1], %g1, %g7
5877 cmp %g1, %g7
5878 bne,pn %xcc, 2f
5879 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5880 index 704b126..2e79d76 100644
5881 --- a/arch/sparc/lib/ksyms.c
5882 +++ b/arch/sparc/lib/ksyms.c
5883 @@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5884
5885 /* Atomic counter implementation. */
5886 EXPORT_SYMBOL(atomic_add);
5887 +EXPORT_SYMBOL(atomic_add_unchecked);
5888 EXPORT_SYMBOL(atomic_add_ret);
5889 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
5890 EXPORT_SYMBOL(atomic_sub);
5891 +EXPORT_SYMBOL(atomic_sub_unchecked);
5892 EXPORT_SYMBOL(atomic_sub_ret);
5893 EXPORT_SYMBOL(atomic64_add);
5894 +EXPORT_SYMBOL(atomic64_add_unchecked);
5895 EXPORT_SYMBOL(atomic64_add_ret);
5896 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5897 EXPORT_SYMBOL(atomic64_sub);
5898 +EXPORT_SYMBOL(atomic64_sub_unchecked);
5899 EXPORT_SYMBOL(atomic64_sub_ret);
5900
5901 /* Atomic bit operations. */
5902 diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
5903 index 91a7d29..ce75c29 100644
5904 --- a/arch/sparc/lib/rwsem_64.S
5905 +++ b/arch/sparc/lib/rwsem_64.S
5906 @@ -11,7 +11,12 @@
5907 .globl __down_read
5908 __down_read:
5909 1: lduw [%o0], %g1
5910 - add %g1, 1, %g7
5911 + addcc %g1, 1, %g7
5912 +
5913 +#ifdef CONFIG_PAX_REFCOUNT
5914 + tvs %icc, 6
5915 +#endif
5916 +
5917 cas [%o0], %g1, %g7
5918 cmp %g1, %g7
5919 bne,pn %icc, 1b
5920 @@ -33,7 +38,12 @@ __down_read:
5921 .globl __down_read_trylock
5922 __down_read_trylock:
5923 1: lduw [%o0], %g1
5924 - add %g1, 1, %g7
5925 + addcc %g1, 1, %g7
5926 +
5927 +#ifdef CONFIG_PAX_REFCOUNT
5928 + tvs %icc, 6
5929 +#endif
5930 +
5931 cmp %g7, 0
5932 bl,pn %icc, 2f
5933 mov 0, %o1
5934 @@ -51,7 +61,12 @@ __down_write:
5935 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5936 1:
5937 lduw [%o0], %g3
5938 - add %g3, %g1, %g7
5939 + addcc %g3, %g1, %g7
5940 +
5941 +#ifdef CONFIG_PAX_REFCOUNT
5942 + tvs %icc, 6
5943 +#endif
5944 +
5945 cas [%o0], %g3, %g7
5946 cmp %g3, %g7
5947 bne,pn %icc, 1b
5948 @@ -77,7 +92,12 @@ __down_write_trylock:
5949 cmp %g3, 0
5950 bne,pn %icc, 2f
5951 mov 0, %o1
5952 - add %g3, %g1, %g7
5953 + addcc %g3, %g1, %g7
5954 +
5955 +#ifdef CONFIG_PAX_REFCOUNT
5956 + tvs %icc, 6
5957 +#endif
5958 +
5959 cas [%o0], %g3, %g7
5960 cmp %g3, %g7
5961 bne,pn %icc, 1b
5962 @@ -90,7 +110,12 @@ __down_write_trylock:
5963 __up_read:
5964 1:
5965 lduw [%o0], %g1
5966 - sub %g1, 1, %g7
5967 + subcc %g1, 1, %g7
5968 +
5969 +#ifdef CONFIG_PAX_REFCOUNT
5970 + tvs %icc, 6
5971 +#endif
5972 +
5973 cas [%o0], %g1, %g7
5974 cmp %g1, %g7
5975 bne,pn %icc, 1b
5976 @@ -118,7 +143,12 @@ __up_write:
5977 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5978 1:
5979 lduw [%o0], %g3
5980 - sub %g3, %g1, %g7
5981 + subcc %g3, %g1, %g7
5982 +
5983 +#ifdef CONFIG_PAX_REFCOUNT
5984 + tvs %icc, 6
5985 +#endif
5986 +
5987 cas [%o0], %g3, %g7
5988 cmp %g3, %g7
5989 bne,pn %icc, 1b
5990 @@ -143,7 +173,12 @@ __downgrade_write:
5991 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5992 1:
5993 lduw [%o0], %g3
5994 - sub %g3, %g1, %g7
5995 + subcc %g3, %g1, %g7
5996 +
5997 +#ifdef CONFIG_PAX_REFCOUNT
5998 + tvs %icc, 6
5999 +#endif
6000 +
6001 cas [%o0], %g3, %g7
6002 cmp %g3, %g7
6003 bne,pn %icc, 1b
6004 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6005 index 79836a7..62f47a2 100644
6006 --- a/arch/sparc/mm/Makefile
6007 +++ b/arch/sparc/mm/Makefile
6008 @@ -2,7 +2,7 @@
6009 #
6010
6011 asflags-y := -ansi
6012 -ccflags-y := -Werror
6013 +#ccflags-y := -Werror
6014
6015 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6016 obj-y += fault_$(BITS).o
6017 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6018 index b99f81c..3453e93 100644
6019 --- a/arch/sparc/mm/fault_32.c
6020 +++ b/arch/sparc/mm/fault_32.c
6021 @@ -21,6 +21,9 @@
6022 #include <linux/interrupt.h>
6023 #include <linux/module.h>
6024 #include <linux/kdebug.h>
6025 +#include <linux/slab.h>
6026 +#include <linux/pagemap.h>
6027 +#include <linux/compiler.h>
6028
6029 #include <asm/system.h>
6030 #include <asm/page.h>
6031 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6032 return safe_compute_effective_address(regs, insn);
6033 }
6034
6035 +#ifdef CONFIG_PAX_PAGEEXEC
6036 +#ifdef CONFIG_PAX_DLRESOLVE
6037 +static void pax_emuplt_close(struct vm_area_struct *vma)
6038 +{
6039 + vma->vm_mm->call_dl_resolve = 0UL;
6040 +}
6041 +
6042 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6043 +{
6044 + unsigned int *kaddr;
6045 +
6046 + vmf->page = alloc_page(GFP_HIGHUSER);
6047 + if (!vmf->page)
6048 + return VM_FAULT_OOM;
6049 +
6050 + kaddr = kmap(vmf->page);
6051 + memset(kaddr, 0, PAGE_SIZE);
6052 + kaddr[0] = 0x9DE3BFA8U; /* save */
6053 + flush_dcache_page(vmf->page);
6054 + kunmap(vmf->page);
6055 + return VM_FAULT_MAJOR;
6056 +}
6057 +
6058 +static const struct vm_operations_struct pax_vm_ops = {
6059 + .close = pax_emuplt_close,
6060 + .fault = pax_emuplt_fault
6061 +};
6062 +
6063 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6064 +{
6065 + int ret;
6066 +
6067 + vma->vm_mm = current->mm;
6068 + vma->vm_start = addr;
6069 + vma->vm_end = addr + PAGE_SIZE;
6070 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6071 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6072 + vma->vm_ops = &pax_vm_ops;
6073 +
6074 + ret = insert_vm_struct(current->mm, vma);
6075 + if (ret)
6076 + return ret;
6077 +
6078 + ++current->mm->total_vm;
6079 + return 0;
6080 +}
6081 +#endif
6082 +
6083 +/*
6084 + * PaX: decide what to do with offenders (regs->pc = fault address)
6085 + *
6086 + * returns 1 when task should be killed
6087 + * 2 when patched PLT trampoline was detected
6088 + * 3 when unpatched PLT trampoline was detected
6089 + */
6090 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6091 +{
6092 +
6093 +#ifdef CONFIG_PAX_EMUPLT
6094 + int err;
6095 +
6096 + do { /* PaX: patched PLT emulation #1 */
6097 + unsigned int sethi1, sethi2, jmpl;
6098 +
6099 + err = get_user(sethi1, (unsigned int *)regs->pc);
6100 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6101 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6102 +
6103 + if (err)
6104 + break;
6105 +
6106 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6107 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6108 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6109 + {
6110 + unsigned int addr;
6111 +
6112 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6113 + addr = regs->u_regs[UREG_G1];
6114 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6115 + regs->pc = addr;
6116 + regs->npc = addr+4;
6117 + return 2;
6118 + }
6119 + } while (0);
6120 +
6121 + { /* PaX: patched PLT emulation #2 */
6122 + unsigned int ba;
6123 +
6124 + err = get_user(ba, (unsigned int *)regs->pc);
6125 +
6126 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6127 + unsigned int addr;
6128 +
6129 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6130 + regs->pc = addr;
6131 + regs->npc = addr+4;
6132 + return 2;
6133 + }
6134 + }
6135 +
6136 + do { /* PaX: patched PLT emulation #3 */
6137 + unsigned int sethi, jmpl, nop;
6138 +
6139 + err = get_user(sethi, (unsigned int *)regs->pc);
6140 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6141 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6142 +
6143 + if (err)
6144 + break;
6145 +
6146 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6147 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6148 + nop == 0x01000000U)
6149 + {
6150 + unsigned int addr;
6151 +
6152 + addr = (sethi & 0x003FFFFFU) << 10;
6153 + regs->u_regs[UREG_G1] = addr;
6154 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6155 + regs->pc = addr;
6156 + regs->npc = addr+4;
6157 + return 2;
6158 + }
6159 + } while (0);
6160 +
6161 + do { /* PaX: unpatched PLT emulation step 1 */
6162 + unsigned int sethi, ba, nop;
6163 +
6164 + err = get_user(sethi, (unsigned int *)regs->pc);
6165 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
6166 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6167 +
6168 + if (err)
6169 + break;
6170 +
6171 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6172 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6173 + nop == 0x01000000U)
6174 + {
6175 + unsigned int addr, save, call;
6176 +
6177 + if ((ba & 0xFFC00000U) == 0x30800000U)
6178 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6179 + else
6180 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6181 +
6182 + err = get_user(save, (unsigned int *)addr);
6183 + err |= get_user(call, (unsigned int *)(addr+4));
6184 + err |= get_user(nop, (unsigned int *)(addr+8));
6185 + if (err)
6186 + break;
6187 +
6188 +#ifdef CONFIG_PAX_DLRESOLVE
6189 + if (save == 0x9DE3BFA8U &&
6190 + (call & 0xC0000000U) == 0x40000000U &&
6191 + nop == 0x01000000U)
6192 + {
6193 + struct vm_area_struct *vma;
6194 + unsigned long call_dl_resolve;
6195 +
6196 + down_read(&current->mm->mmap_sem);
6197 + call_dl_resolve = current->mm->call_dl_resolve;
6198 + up_read(&current->mm->mmap_sem);
6199 + if (likely(call_dl_resolve))
6200 + goto emulate;
6201 +
6202 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6203 +
6204 + down_write(&current->mm->mmap_sem);
6205 + if (current->mm->call_dl_resolve) {
6206 + call_dl_resolve = current->mm->call_dl_resolve;
6207 + up_write(&current->mm->mmap_sem);
6208 + if (vma)
6209 + kmem_cache_free(vm_area_cachep, vma);
6210 + goto emulate;
6211 + }
6212 +
6213 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6214 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6215 + up_write(&current->mm->mmap_sem);
6216 + if (vma)
6217 + kmem_cache_free(vm_area_cachep, vma);
6218 + return 1;
6219 + }
6220 +
6221 + if (pax_insert_vma(vma, call_dl_resolve)) {
6222 + up_write(&current->mm->mmap_sem);
6223 + kmem_cache_free(vm_area_cachep, vma);
6224 + return 1;
6225 + }
6226 +
6227 + current->mm->call_dl_resolve = call_dl_resolve;
6228 + up_write(&current->mm->mmap_sem);
6229 +
6230 +emulate:
6231 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6232 + regs->pc = call_dl_resolve;
6233 + regs->npc = addr+4;
6234 + return 3;
6235 + }
6236 +#endif
6237 +
6238 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6239 + if ((save & 0xFFC00000U) == 0x05000000U &&
6240 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6241 + nop == 0x01000000U)
6242 + {
6243 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6244 + regs->u_regs[UREG_G2] = addr + 4;
6245 + addr = (save & 0x003FFFFFU) << 10;
6246 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6247 + regs->pc = addr;
6248 + regs->npc = addr+4;
6249 + return 3;
6250 + }
6251 + }
6252 + } while (0);
6253 +
6254 + do { /* PaX: unpatched PLT emulation step 2 */
6255 + unsigned int save, call, nop;
6256 +
6257 + err = get_user(save, (unsigned int *)(regs->pc-4));
6258 + err |= get_user(call, (unsigned int *)regs->pc);
6259 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6260 + if (err)
6261 + break;
6262 +
6263 + if (save == 0x9DE3BFA8U &&
6264 + (call & 0xC0000000U) == 0x40000000U &&
6265 + nop == 0x01000000U)
6266 + {
6267 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6268 +
6269 + regs->u_regs[UREG_RETPC] = regs->pc;
6270 + regs->pc = dl_resolve;
6271 + regs->npc = dl_resolve+4;
6272 + return 3;
6273 + }
6274 + } while (0);
6275 +#endif
6276 +
6277 + return 1;
6278 +}
6279 +
6280 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6281 +{
6282 + unsigned long i;
6283 +
6284 + printk(KERN_ERR "PAX: bytes at PC: ");
6285 + for (i = 0; i < 8; i++) {
6286 + unsigned int c;
6287 + if (get_user(c, (unsigned int *)pc+i))
6288 + printk(KERN_CONT "???????? ");
6289 + else
6290 + printk(KERN_CONT "%08x ", c);
6291 + }
6292 + printk("\n");
6293 +}
6294 +#endif
6295 +
6296 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
6297 unsigned long address)
6298 {
6299 @@ -231,6 +495,24 @@ good_area:
6300 if(!(vma->vm_flags & VM_WRITE))
6301 goto bad_area;
6302 } else {
6303 +
6304 +#ifdef CONFIG_PAX_PAGEEXEC
6305 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6306 + up_read(&mm->mmap_sem);
6307 + switch (pax_handle_fetch_fault(regs)) {
6308 +
6309 +#ifdef CONFIG_PAX_EMUPLT
6310 + case 2:
6311 + case 3:
6312 + return;
6313 +#endif
6314 +
6315 + }
6316 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6317 + do_group_exit(SIGKILL);
6318 + }
6319 +#endif
6320 +
6321 /* Allow reads even for write-only mappings */
6322 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6323 goto bad_area;
6324 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6325 index 43b0da9..a0b78f9 100644
6326 --- a/arch/sparc/mm/fault_64.c
6327 +++ b/arch/sparc/mm/fault_64.c
6328 @@ -20,6 +20,9 @@
6329 #include <linux/kprobes.h>
6330 #include <linux/kdebug.h>
6331 #include <linux/percpu.h>
6332 +#include <linux/slab.h>
6333 +#include <linux/pagemap.h>
6334 +#include <linux/compiler.h>
6335
6336 #include <asm/page.h>
6337 #include <asm/pgtable.h>
6338 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6339 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6340 regs->tpc);
6341 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6342 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6343 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6344 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6345 dump_stack();
6346 unhandled_fault(regs->tpc, current, regs);
6347 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
6348 show_regs(regs);
6349 }
6350
6351 +#ifdef CONFIG_PAX_PAGEEXEC
6352 +#ifdef CONFIG_PAX_DLRESOLVE
6353 +static void pax_emuplt_close(struct vm_area_struct *vma)
6354 +{
6355 + vma->vm_mm->call_dl_resolve = 0UL;
6356 +}
6357 +
6358 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6359 +{
6360 + unsigned int *kaddr;
6361 +
6362 + vmf->page = alloc_page(GFP_HIGHUSER);
6363 + if (!vmf->page)
6364 + return VM_FAULT_OOM;
6365 +
6366 + kaddr = kmap(vmf->page);
6367 + memset(kaddr, 0, PAGE_SIZE);
6368 + kaddr[0] = 0x9DE3BFA8U; /* save */
6369 + flush_dcache_page(vmf->page);
6370 + kunmap(vmf->page);
6371 + return VM_FAULT_MAJOR;
6372 +}
6373 +
6374 +static const struct vm_operations_struct pax_vm_ops = {
6375 + .close = pax_emuplt_close,
6376 + .fault = pax_emuplt_fault
6377 +};
6378 +
6379 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6380 +{
6381 + int ret;
6382 +
6383 + vma->vm_mm = current->mm;
6384 + vma->vm_start = addr;
6385 + vma->vm_end = addr + PAGE_SIZE;
6386 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6387 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6388 + vma->vm_ops = &pax_vm_ops;
6389 +
6390 + ret = insert_vm_struct(current->mm, vma);
6391 + if (ret)
6392 + return ret;
6393 +
6394 + ++current->mm->total_vm;
6395 + return 0;
6396 +}
6397 +#endif
6398 +
6399 +/*
6400 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6401 + *
6402 + * returns 1 when task should be killed
6403 + * 2 when patched PLT trampoline was detected
6404 + * 3 when unpatched PLT trampoline was detected
6405 + */
6406 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6407 +{
6408 +
6409 +#ifdef CONFIG_PAX_EMUPLT
6410 + int err;
6411 +
6412 + do { /* PaX: patched PLT emulation #1 */
6413 + unsigned int sethi1, sethi2, jmpl;
6414 +
6415 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6416 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6417 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6418 +
6419 + if (err)
6420 + break;
6421 +
6422 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6423 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6424 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6425 + {
6426 + unsigned long addr;
6427 +
6428 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6429 + addr = regs->u_regs[UREG_G1];
6430 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6431 +
6432 + if (test_thread_flag(TIF_32BIT))
6433 + addr &= 0xFFFFFFFFUL;
6434 +
6435 + regs->tpc = addr;
6436 + regs->tnpc = addr+4;
6437 + return 2;
6438 + }
6439 + } while (0);
6440 +
6441 + { /* PaX: patched PLT emulation #2 */
6442 + unsigned int ba;
6443 +
6444 + err = get_user(ba, (unsigned int *)regs->tpc);
6445 +
6446 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6447 + unsigned long addr;
6448 +
6449 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6450 +
6451 + if (test_thread_flag(TIF_32BIT))
6452 + addr &= 0xFFFFFFFFUL;
6453 +
6454 + regs->tpc = addr;
6455 + regs->tnpc = addr+4;
6456 + return 2;
6457 + }
6458 + }
6459 +
6460 + do { /* PaX: patched PLT emulation #3 */
6461 + unsigned int sethi, jmpl, nop;
6462 +
6463 + err = get_user(sethi, (unsigned int *)regs->tpc);
6464 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6465 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6466 +
6467 + if (err)
6468 + break;
6469 +
6470 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6471 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6472 + nop == 0x01000000U)
6473 + {
6474 + unsigned long addr;
6475 +
6476 + addr = (sethi & 0x003FFFFFU) << 10;
6477 + regs->u_regs[UREG_G1] = addr;
6478 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6479 +
6480 + if (test_thread_flag(TIF_32BIT))
6481 + addr &= 0xFFFFFFFFUL;
6482 +
6483 + regs->tpc = addr;
6484 + regs->tnpc = addr+4;
6485 + return 2;
6486 + }
6487 + } while (0);
6488 +
6489 + do { /* PaX: patched PLT emulation #4 */
6490 + unsigned int sethi, mov1, call, mov2;
6491 +
6492 + err = get_user(sethi, (unsigned int *)regs->tpc);
6493 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6494 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
6495 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6496 +
6497 + if (err)
6498 + break;
6499 +
6500 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6501 + mov1 == 0x8210000FU &&
6502 + (call & 0xC0000000U) == 0x40000000U &&
6503 + mov2 == 0x9E100001U)
6504 + {
6505 + unsigned long addr;
6506 +
6507 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6508 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6509 +
6510 + if (test_thread_flag(TIF_32BIT))
6511 + addr &= 0xFFFFFFFFUL;
6512 +
6513 + regs->tpc = addr;
6514 + regs->tnpc = addr+4;
6515 + return 2;
6516 + }
6517 + } while (0);
6518 +
6519 + do { /* PaX: patched PLT emulation #5 */
6520 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6521 +
6522 + err = get_user(sethi, (unsigned int *)regs->tpc);
6523 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6524 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6525 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6526 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6527 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6528 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6529 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6530 +
6531 + if (err)
6532 + break;
6533 +
6534 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6535 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6536 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6537 + (or1 & 0xFFFFE000U) == 0x82106000U &&
6538 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6539 + sllx == 0x83287020U &&
6540 + jmpl == 0x81C04005U &&
6541 + nop == 0x01000000U)
6542 + {
6543 + unsigned long addr;
6544 +
6545 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6546 + regs->u_regs[UREG_G1] <<= 32;
6547 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6548 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6549 + regs->tpc = addr;
6550 + regs->tnpc = addr+4;
6551 + return 2;
6552 + }
6553 + } while (0);
6554 +
6555 + do { /* PaX: patched PLT emulation #6 */
6556 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6557 +
6558 + err = get_user(sethi, (unsigned int *)regs->tpc);
6559 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6560 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6561 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6562 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
6563 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6564 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6565 +
6566 + if (err)
6567 + break;
6568 +
6569 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6570 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6571 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6572 + sllx == 0x83287020U &&
6573 + (or & 0xFFFFE000U) == 0x8A116000U &&
6574 + jmpl == 0x81C04005U &&
6575 + nop == 0x01000000U)
6576 + {
6577 + unsigned long addr;
6578 +
6579 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6580 + regs->u_regs[UREG_G1] <<= 32;
6581 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6582 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6583 + regs->tpc = addr;
6584 + regs->tnpc = addr+4;
6585 + return 2;
6586 + }
6587 + } while (0);
6588 +
6589 + do { /* PaX: unpatched PLT emulation step 1 */
6590 + unsigned int sethi, ba, nop;
6591 +
6592 + err = get_user(sethi, (unsigned int *)regs->tpc);
6593 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6594 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6595 +
6596 + if (err)
6597 + break;
6598 +
6599 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6600 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6601 + nop == 0x01000000U)
6602 + {
6603 + unsigned long addr;
6604 + unsigned int save, call;
6605 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6606 +
6607 + if ((ba & 0xFFC00000U) == 0x30800000U)
6608 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6609 + else
6610 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6611 +
6612 + if (test_thread_flag(TIF_32BIT))
6613 + addr &= 0xFFFFFFFFUL;
6614 +
6615 + err = get_user(save, (unsigned int *)addr);
6616 + err |= get_user(call, (unsigned int *)(addr+4));
6617 + err |= get_user(nop, (unsigned int *)(addr+8));
6618 + if (err)
6619 + break;
6620 +
6621 +#ifdef CONFIG_PAX_DLRESOLVE
6622 + if (save == 0x9DE3BFA8U &&
6623 + (call & 0xC0000000U) == 0x40000000U &&
6624 + nop == 0x01000000U)
6625 + {
6626 + struct vm_area_struct *vma;
6627 + unsigned long call_dl_resolve;
6628 +
6629 + down_read(&current->mm->mmap_sem);
6630 + call_dl_resolve = current->mm->call_dl_resolve;
6631 + up_read(&current->mm->mmap_sem);
6632 + if (likely(call_dl_resolve))
6633 + goto emulate;
6634 +
6635 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6636 +
6637 + down_write(&current->mm->mmap_sem);
6638 + if (current->mm->call_dl_resolve) {
6639 + call_dl_resolve = current->mm->call_dl_resolve;
6640 + up_write(&current->mm->mmap_sem);
6641 + if (vma)
6642 + kmem_cache_free(vm_area_cachep, vma);
6643 + goto emulate;
6644 + }
6645 +
6646 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6647 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6648 + up_write(&current->mm->mmap_sem);
6649 + if (vma)
6650 + kmem_cache_free(vm_area_cachep, vma);
6651 + return 1;
6652 + }
6653 +
6654 + if (pax_insert_vma(vma, call_dl_resolve)) {
6655 + up_write(&current->mm->mmap_sem);
6656 + kmem_cache_free(vm_area_cachep, vma);
6657 + return 1;
6658 + }
6659 +
6660 + current->mm->call_dl_resolve = call_dl_resolve;
6661 + up_write(&current->mm->mmap_sem);
6662 +
6663 +emulate:
6664 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6665 + regs->tpc = call_dl_resolve;
6666 + regs->tnpc = addr+4;
6667 + return 3;
6668 + }
6669 +#endif
6670 +
6671 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6672 + if ((save & 0xFFC00000U) == 0x05000000U &&
6673 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6674 + nop == 0x01000000U)
6675 + {
6676 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6677 + regs->u_regs[UREG_G2] = addr + 4;
6678 + addr = (save & 0x003FFFFFU) << 10;
6679 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6680 +
6681 + if (test_thread_flag(TIF_32BIT))
6682 + addr &= 0xFFFFFFFFUL;
6683 +
6684 + regs->tpc = addr;
6685 + regs->tnpc = addr+4;
6686 + return 3;
6687 + }
6688 +
6689 + /* PaX: 64-bit PLT stub */
6690 + err = get_user(sethi1, (unsigned int *)addr);
6691 + err |= get_user(sethi2, (unsigned int *)(addr+4));
6692 + err |= get_user(or1, (unsigned int *)(addr+8));
6693 + err |= get_user(or2, (unsigned int *)(addr+12));
6694 + err |= get_user(sllx, (unsigned int *)(addr+16));
6695 + err |= get_user(add, (unsigned int *)(addr+20));
6696 + err |= get_user(jmpl, (unsigned int *)(addr+24));
6697 + err |= get_user(nop, (unsigned int *)(addr+28));
6698 + if (err)
6699 + break;
6700 +
6701 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6702 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6703 + (or1 & 0xFFFFE000U) == 0x88112000U &&
6704 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6705 + sllx == 0x89293020U &&
6706 + add == 0x8A010005U &&
6707 + jmpl == 0x89C14000U &&
6708 + nop == 0x01000000U)
6709 + {
6710 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6711 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6712 + regs->u_regs[UREG_G4] <<= 32;
6713 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6714 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6715 + regs->u_regs[UREG_G4] = addr + 24;
6716 + addr = regs->u_regs[UREG_G5];
6717 + regs->tpc = addr;
6718 + regs->tnpc = addr+4;
6719 + return 3;
6720 + }
6721 + }
6722 + } while (0);
6723 +
6724 +#ifdef CONFIG_PAX_DLRESOLVE
6725 + do { /* PaX: unpatched PLT emulation step 2 */
6726 + unsigned int save, call, nop;
6727 +
6728 + err = get_user(save, (unsigned int *)(regs->tpc-4));
6729 + err |= get_user(call, (unsigned int *)regs->tpc);
6730 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6731 + if (err)
6732 + break;
6733 +
6734 + if (save == 0x9DE3BFA8U &&
6735 + (call & 0xC0000000U) == 0x40000000U &&
6736 + nop == 0x01000000U)
6737 + {
6738 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6739 +
6740 + if (test_thread_flag(TIF_32BIT))
6741 + dl_resolve &= 0xFFFFFFFFUL;
6742 +
6743 + regs->u_regs[UREG_RETPC] = regs->tpc;
6744 + regs->tpc = dl_resolve;
6745 + regs->tnpc = dl_resolve+4;
6746 + return 3;
6747 + }
6748 + } while (0);
6749 +#endif
6750 +
6751 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6752 + unsigned int sethi, ba, nop;
6753 +
6754 + err = get_user(sethi, (unsigned int *)regs->tpc);
6755 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6756 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6757 +
6758 + if (err)
6759 + break;
6760 +
6761 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6762 + (ba & 0xFFF00000U) == 0x30600000U &&
6763 + nop == 0x01000000U)
6764 + {
6765 + unsigned long addr;
6766 +
6767 + addr = (sethi & 0x003FFFFFU) << 10;
6768 + regs->u_regs[UREG_G1] = addr;
6769 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6770 +
6771 + if (test_thread_flag(TIF_32BIT))
6772 + addr &= 0xFFFFFFFFUL;
6773 +
6774 + regs->tpc = addr;
6775 + regs->tnpc = addr+4;
6776 + return 2;
6777 + }
6778 + } while (0);
6779 +
6780 +#endif
6781 +
6782 + return 1;
6783 +}
6784 +
6785 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6786 +{
6787 + unsigned long i;
6788 +
6789 + printk(KERN_ERR "PAX: bytes at PC: ");
6790 + for (i = 0; i < 8; i++) {
6791 + unsigned int c;
6792 + if (get_user(c, (unsigned int *)pc+i))
6793 + printk(KERN_CONT "???????? ");
6794 + else
6795 + printk(KERN_CONT "%08x ", c);
6796 + }
6797 + printk("\n");
6798 +}
6799 +#endif
6800 +
6801 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6802 {
6803 struct mm_struct *mm = current->mm;
6804 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6805 if (!vma)
6806 goto bad_area;
6807
6808 +#ifdef CONFIG_PAX_PAGEEXEC
6809 + /* PaX: detect ITLB misses on non-exec pages */
6810 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6811 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6812 + {
6813 + if (address != regs->tpc)
6814 + goto good_area;
6815 +
6816 + up_read(&mm->mmap_sem);
6817 + switch (pax_handle_fetch_fault(regs)) {
6818 +
6819 +#ifdef CONFIG_PAX_EMUPLT
6820 + case 2:
6821 + case 3:
6822 + return;
6823 +#endif
6824 +
6825 + }
6826 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6827 + do_group_exit(SIGKILL);
6828 + }
6829 +#endif
6830 +
6831 /* Pure DTLB misses do not tell us whether the fault causing
6832 * load/store/atomic was a write or not, it only says that there
6833 * was no match. So in such a case we (carefully) read the
6834 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6835 index f27d103..1b06377 100644
6836 --- a/arch/sparc/mm/hugetlbpage.c
6837 +++ b/arch/sparc/mm/hugetlbpage.c
6838 @@ -69,7 +69,7 @@ full_search:
6839 }
6840 return -ENOMEM;
6841 }
6842 - if (likely(!vma || addr + len <= vma->vm_start)) {
6843 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6844 /*
6845 * Remember the place where we stopped the search:
6846 */
6847 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6848 /* make sure it can fit in the remaining address space */
6849 if (likely(addr > len)) {
6850 vma = find_vma(mm, addr-len);
6851 - if (!vma || addr <= vma->vm_start) {
6852 + if (check_heap_stack_gap(vma, addr - len, len)) {
6853 /* remember the address as a hint for next time */
6854 return (mm->free_area_cache = addr-len);
6855 }
6856 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6857 if (unlikely(mm->mmap_base < len))
6858 goto bottomup;
6859
6860 - addr = (mm->mmap_base-len) & HPAGE_MASK;
6861 + addr = mm->mmap_base - len;
6862
6863 do {
6864 + addr &= HPAGE_MASK;
6865 /*
6866 * Lookup failure means no vma is above this address,
6867 * else if new region fits below vma->vm_start,
6868 * return with success:
6869 */
6870 vma = find_vma(mm, addr);
6871 - if (likely(!vma || addr+len <= vma->vm_start)) {
6872 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6873 /* remember the address as a hint for next time */
6874 return (mm->free_area_cache = addr);
6875 }
6876 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6877 mm->cached_hole_size = vma->vm_start - addr;
6878
6879 /* try just below the current vma->vm_start */
6880 - addr = (vma->vm_start-len) & HPAGE_MASK;
6881 - } while (likely(len < vma->vm_start));
6882 + addr = skip_heap_stack_gap(vma, len);
6883 + } while (!IS_ERR_VALUE(addr));
6884
6885 bottomup:
6886 /*
6887 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6888 if (addr) {
6889 addr = ALIGN(addr, HPAGE_SIZE);
6890 vma = find_vma(mm, addr);
6891 - if (task_size - len >= addr &&
6892 - (!vma || addr + len <= vma->vm_start))
6893 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6894 return addr;
6895 }
6896 if (mm->get_unmapped_area == arch_get_unmapped_area)
6897 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6898 index dc7c3b1..34c0070 100644
6899 --- a/arch/sparc/mm/init_32.c
6900 +++ b/arch/sparc/mm/init_32.c
6901 @@ -317,6 +317,9 @@ extern void device_scan(void);
6902 pgprot_t PAGE_SHARED __read_mostly;
6903 EXPORT_SYMBOL(PAGE_SHARED);
6904
6905 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6906 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6907 +
6908 void __init paging_init(void)
6909 {
6910 switch(sparc_cpu_model) {
6911 @@ -345,17 +348,17 @@ void __init paging_init(void)
6912
6913 /* Initialize the protection map with non-constant, MMU dependent values. */
6914 protection_map[0] = PAGE_NONE;
6915 - protection_map[1] = PAGE_READONLY;
6916 - protection_map[2] = PAGE_COPY;
6917 - protection_map[3] = PAGE_COPY;
6918 + protection_map[1] = PAGE_READONLY_NOEXEC;
6919 + protection_map[2] = PAGE_COPY_NOEXEC;
6920 + protection_map[3] = PAGE_COPY_NOEXEC;
6921 protection_map[4] = PAGE_READONLY;
6922 protection_map[5] = PAGE_READONLY;
6923 protection_map[6] = PAGE_COPY;
6924 protection_map[7] = PAGE_COPY;
6925 protection_map[8] = PAGE_NONE;
6926 - protection_map[9] = PAGE_READONLY;
6927 - protection_map[10] = PAGE_SHARED;
6928 - protection_map[11] = PAGE_SHARED;
6929 + protection_map[9] = PAGE_READONLY_NOEXEC;
6930 + protection_map[10] = PAGE_SHARED_NOEXEC;
6931 + protection_map[11] = PAGE_SHARED_NOEXEC;
6932 protection_map[12] = PAGE_READONLY;
6933 protection_map[13] = PAGE_READONLY;
6934 protection_map[14] = PAGE_SHARED;
6935 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6936 index 509b1ff..bfd7118 100644
6937 --- a/arch/sparc/mm/srmmu.c
6938 +++ b/arch/sparc/mm/srmmu.c
6939 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6940 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6941 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6942 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6943 +
6944 +#ifdef CONFIG_PAX_PAGEEXEC
6945 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6946 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6947 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6948 +#endif
6949 +
6950 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6951 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6952
6953 diff --git a/arch/um/Makefile b/arch/um/Makefile
6954 index fc633db..5e1a1c2 100644
6955 --- a/arch/um/Makefile
6956 +++ b/arch/um/Makefile
6957 @@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6958 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6959 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
6960
6961 +ifdef CONSTIFY_PLUGIN
6962 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6963 +endif
6964 +
6965 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
6966
6967 #This will adjust *FLAGS accordingly to the platform.
6968 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6969 index 6c03acd..a5e0215 100644
6970 --- a/arch/um/include/asm/kmap_types.h
6971 +++ b/arch/um/include/asm/kmap_types.h
6972 @@ -23,6 +23,7 @@ enum km_type {
6973 KM_IRQ1,
6974 KM_SOFTIRQ0,
6975 KM_SOFTIRQ1,
6976 + KM_CLEARPAGE,
6977 KM_TYPE_NR
6978 };
6979
6980 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6981 index 4cc9b6c..02e5029 100644
6982 --- a/arch/um/include/asm/page.h
6983 +++ b/arch/um/include/asm/page.h
6984 @@ -14,6 +14,9 @@
6985 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6986 #define PAGE_MASK (~(PAGE_SIZE-1))
6987
6988 +#define ktla_ktva(addr) (addr)
6989 +#define ktva_ktla(addr) (addr)
6990 +
6991 #ifndef __ASSEMBLY__
6992
6993 struct page;
6994 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6995 index 4a28a15..654dc2a 100644
6996 --- a/arch/um/kernel/process.c
6997 +++ b/arch/um/kernel/process.c
6998 @@ -393,22 +393,6 @@ int singlestepping(void * t)
6999 return 2;
7000 }
7001
7002 -/*
7003 - * Only x86 and x86_64 have an arch_align_stack().
7004 - * All other arches have "#define arch_align_stack(x) (x)"
7005 - * in their asm/system.h
7006 - * As this is included in UML from asm-um/system-generic.h,
7007 - * we can use it to behave as the subarch does.
7008 - */
7009 -#ifndef arch_align_stack
7010 -unsigned long arch_align_stack(unsigned long sp)
7011 -{
7012 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7013 - sp -= get_random_int() % 8192;
7014 - return sp & ~0xf;
7015 -}
7016 -#endif
7017 -
7018 unsigned long get_wchan(struct task_struct *p)
7019 {
7020 unsigned long stack_page, sp, ip;
7021 diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
7022 index d1b93c4..ae1b7fd 100644
7023 --- a/arch/um/sys-i386/shared/sysdep/system.h
7024 +++ b/arch/um/sys-i386/shared/sysdep/system.h
7025 @@ -17,7 +17,7 @@
7026 # define AT_VECTOR_SIZE_ARCH 1
7027 #endif
7028
7029 -extern unsigned long arch_align_stack(unsigned long sp);
7030 +#define arch_align_stack(x) ((x) & ~0xfUL)
7031
7032 void default_idle(void);
7033
7034 diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
7035 index 857ca0b..9a2669d 100644
7036 --- a/arch/um/sys-i386/syscalls.c
7037 +++ b/arch/um/sys-i386/syscalls.c
7038 @@ -11,6 +11,21 @@
7039 #include "asm/uaccess.h"
7040 #include "asm/unistd.h"
7041
7042 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
7043 +{
7044 + unsigned long pax_task_size = TASK_SIZE;
7045 +
7046 +#ifdef CONFIG_PAX_SEGMEXEC
7047 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
7048 + pax_task_size = SEGMEXEC_TASK_SIZE;
7049 +#endif
7050 +
7051 + if (len > pax_task_size || addr > pax_task_size - len)
7052 + return -EINVAL;
7053 +
7054 + return 0;
7055 +}
7056 +
7057 /*
7058 * Perform the select(nd, in, out, ex, tv) and mmap() system
7059 * calls. Linux/i386 didn't use to be able to handle more than
7060 diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
7061 index d1b93c4..ae1b7fd 100644
7062 --- a/arch/um/sys-x86_64/shared/sysdep/system.h
7063 +++ b/arch/um/sys-x86_64/shared/sysdep/system.h
7064 @@ -17,7 +17,7 @@
7065 # define AT_VECTOR_SIZE_ARCH 1
7066 #endif
7067
7068 -extern unsigned long arch_align_stack(unsigned long sp);
7069 +#define arch_align_stack(x) ((x) & ~0xfUL)
7070
7071 void default_idle(void);
7072
7073 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7074 index 73ae02a..f932de5 100644
7075 --- a/arch/x86/Kconfig
7076 +++ b/arch/x86/Kconfig
7077 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
7078
7079 config X86_32_LAZY_GS
7080 def_bool y
7081 - depends on X86_32 && !CC_STACKPROTECTOR
7082 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7083
7084 config KTIME_SCALAR
7085 def_bool X86_32
7086 @@ -1008,7 +1008,7 @@ choice
7087
7088 config NOHIGHMEM
7089 bool "off"
7090 - depends on !X86_NUMAQ
7091 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7092 ---help---
7093 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7094 However, the address space of 32-bit x86 processors is only 4
7095 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
7096
7097 config HIGHMEM4G
7098 bool "4GB"
7099 - depends on !X86_NUMAQ
7100 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7101 ---help---
7102 Select this if you have a 32-bit processor and between 1 and 4
7103 gigabytes of physical RAM.
7104 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
7105 hex
7106 default 0xB0000000 if VMSPLIT_3G_OPT
7107 default 0x80000000 if VMSPLIT_2G
7108 - default 0x78000000 if VMSPLIT_2G_OPT
7109 + default 0x70000000 if VMSPLIT_2G_OPT
7110 default 0x40000000 if VMSPLIT_1G
7111 default 0xC0000000
7112 depends on X86_32
7113 @@ -1460,6 +1460,7 @@ config SECCOMP
7114
7115 config CC_STACKPROTECTOR
7116 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7117 + depends on X86_64 || !PAX_MEMORY_UDEREF
7118 ---help---
7119 This option turns on the -fstack-protector GCC feature. This
7120 feature puts, at the beginning of functions, a canary value on
7121 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
7122 config PHYSICAL_START
7123 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
7124 default "0x1000000"
7125 + range 0x400000 0x40000000
7126 ---help---
7127 This gives the physical address where the kernel is loaded.
7128
7129 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
7130 hex
7131 prompt "Alignment value to which kernel should be aligned" if X86_32
7132 default "0x1000000"
7133 + range 0x400000 0x1000000 if PAX_KERNEXEC
7134 range 0x2000 0x1000000
7135 ---help---
7136 This value puts the alignment restrictions on physical address
7137 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
7138 Say N if you want to disable CPU hotplug.
7139
7140 config COMPAT_VDSO
7141 - def_bool y
7142 + def_bool n
7143 prompt "Compat VDSO support"
7144 depends on X86_32 || IA32_EMULATION
7145 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7146 ---help---
7147 Map the 32-bit VDSO to the predictable old-style address too.
7148 ---help---
7149 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7150 index 0e566103..1a6b57e 100644
7151 --- a/arch/x86/Kconfig.cpu
7152 +++ b/arch/x86/Kconfig.cpu
7153 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
7154
7155 config X86_F00F_BUG
7156 def_bool y
7157 - depends on M586MMX || M586TSC || M586 || M486 || M386
7158 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7159
7160 config X86_WP_WORKS_OK
7161 def_bool y
7162 @@ -360,7 +360,7 @@ config X86_POPAD_OK
7163
7164 config X86_ALIGNMENT_16
7165 def_bool y
7166 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7167 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7168
7169 config X86_INTEL_USERCOPY
7170 def_bool y
7171 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
7172 # generates cmov.
7173 config X86_CMOV
7174 def_bool y
7175 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
7176 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
7177
7178 config X86_MINIMUM_CPU_FAMILY
7179 int
7180 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7181 index d105f29..c928727 100644
7182 --- a/arch/x86/Kconfig.debug
7183 +++ b/arch/x86/Kconfig.debug
7184 @@ -99,7 +99,7 @@ config X86_PTDUMP
7185 config DEBUG_RODATA
7186 bool "Write protect kernel read-only data structures"
7187 default y
7188 - depends on DEBUG_KERNEL
7189 + depends on DEBUG_KERNEL && BROKEN
7190 ---help---
7191 Mark the kernel read-only data as write-protected in the pagetables,
7192 in order to catch accidental (and incorrect) writes to such const
7193 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7194 index d2d24c9..0f21f8d 100644
7195 --- a/arch/x86/Makefile
7196 +++ b/arch/x86/Makefile
7197 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
7198 else
7199 BITS := 64
7200 UTS_MACHINE := x86_64
7201 + biarch := $(call cc-option,-m64)
7202 CHECKFLAGS += -D__x86_64__ -m64
7203
7204 KBUILD_AFLAGS += -m64
7205 @@ -189,3 +190,12 @@ define archhelp
7206 echo ' FDARGS="..." arguments for the booted kernel'
7207 echo ' FDINITRD=file initrd for the booted kernel'
7208 endef
7209 +
7210 +define OLD_LD
7211 +
7212 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7213 +*** Please upgrade your binutils to 2.18 or newer
7214 +endef
7215 +
7216 +archprepare:
7217 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7218 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7219 index ec749c2..bbb5319 100644
7220 --- a/arch/x86/boot/Makefile
7221 +++ b/arch/x86/boot/Makefile
7222 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7223 $(call cc-option, -fno-stack-protector) \
7224 $(call cc-option, -mpreferred-stack-boundary=2)
7225 KBUILD_CFLAGS += $(call cc-option, -m32)
7226 +ifdef CONSTIFY_PLUGIN
7227 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7228 +endif
7229 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7230 GCOV_PROFILE := n
7231
7232 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7233 index 878e4b9..20537ab 100644
7234 --- a/arch/x86/boot/bitops.h
7235 +++ b/arch/x86/boot/bitops.h
7236 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7237 u8 v;
7238 const u32 *p = (const u32 *)addr;
7239
7240 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7241 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7242 return v;
7243 }
7244
7245 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7246
7247 static inline void set_bit(int nr, void *addr)
7248 {
7249 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7250 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7251 }
7252
7253 #endif /* BOOT_BITOPS_H */
7254 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7255 index 98239d2..f40214c 100644
7256 --- a/arch/x86/boot/boot.h
7257 +++ b/arch/x86/boot/boot.h
7258 @@ -82,7 +82,7 @@ static inline void io_delay(void)
7259 static inline u16 ds(void)
7260 {
7261 u16 seg;
7262 - asm("movw %%ds,%0" : "=rm" (seg));
7263 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7264 return seg;
7265 }
7266
7267 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7268 static inline int memcmp(const void *s1, const void *s2, size_t len)
7269 {
7270 u8 diff;
7271 - asm("repe; cmpsb; setnz %0"
7272 + asm volatile("repe; cmpsb; setnz %0"
7273 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7274 return diff;
7275 }
7276 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7277 index f8ed065..5bf5ff3 100644
7278 --- a/arch/x86/boot/compressed/Makefile
7279 +++ b/arch/x86/boot/compressed/Makefile
7280 @@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7281 KBUILD_CFLAGS += $(cflags-y)
7282 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7283 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7284 +ifdef CONSTIFY_PLUGIN
7285 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7286 +endif
7287
7288 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7289 GCOV_PROFILE := n
7290 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7291 index f543b70..b60fba8 100644
7292 --- a/arch/x86/boot/compressed/head_32.S
7293 +++ b/arch/x86/boot/compressed/head_32.S
7294 @@ -76,7 +76,7 @@ ENTRY(startup_32)
7295 notl %eax
7296 andl %eax, %ebx
7297 #else
7298 - movl $LOAD_PHYSICAL_ADDR, %ebx
7299 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7300 #endif
7301
7302 /* Target address to relocate to for decompression */
7303 @@ -149,7 +149,7 @@ relocated:
7304 * and where it was actually loaded.
7305 */
7306 movl %ebp, %ebx
7307 - subl $LOAD_PHYSICAL_ADDR, %ebx
7308 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7309 jz 2f /* Nothing to be done if loaded at compiled addr. */
7310 /*
7311 * Process relocations.
7312 @@ -157,8 +157,7 @@ relocated:
7313
7314 1: subl $4, %edi
7315 movl (%edi), %ecx
7316 - testl %ecx, %ecx
7317 - jz 2f
7318 + jecxz 2f
7319 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7320 jmp 1b
7321 2:
7322 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7323 index 077e1b6..2c6b13b 100644
7324 --- a/arch/x86/boot/compressed/head_64.S
7325 +++ b/arch/x86/boot/compressed/head_64.S
7326 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7327 notl %eax
7328 andl %eax, %ebx
7329 #else
7330 - movl $LOAD_PHYSICAL_ADDR, %ebx
7331 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7332 #endif
7333
7334 /* Target address to relocate to for decompression */
7335 @@ -183,7 +183,7 @@ no_longmode:
7336 hlt
7337 jmp 1b
7338
7339 -#include "../../kernel/verify_cpu_64.S"
7340 +#include "../../kernel/verify_cpu.S"
7341
7342 /*
7343 * Be careful here startup_64 needs to be at a predictable
7344 @@ -234,7 +234,7 @@ ENTRY(startup_64)
7345 notq %rax
7346 andq %rax, %rbp
7347 #else
7348 - movq $LOAD_PHYSICAL_ADDR, %rbp
7349 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7350 #endif
7351
7352 /* Target address to relocate to for decompression */
7353 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7354 index 842b2a3..f00178b 100644
7355 --- a/arch/x86/boot/compressed/misc.c
7356 +++ b/arch/x86/boot/compressed/misc.c
7357 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
7358 case PT_LOAD:
7359 #ifdef CONFIG_RELOCATABLE
7360 dest = output;
7361 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7362 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7363 #else
7364 dest = (void *)(phdr->p_paddr);
7365 #endif
7366 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7367 error("Destination address too large");
7368 #endif
7369 #ifndef CONFIG_RELOCATABLE
7370 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7371 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7372 error("Wrong destination address");
7373 #endif
7374
7375 diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
7376 index bcbd36c..b1754af 100644
7377 --- a/arch/x86/boot/compressed/mkpiggy.c
7378 +++ b/arch/x86/boot/compressed/mkpiggy.c
7379 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
7380
7381 offs = (olen > ilen) ? olen - ilen : 0;
7382 offs += olen >> 12; /* Add 8 bytes for each 32K block */
7383 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
7384 + offs += 64*1024; /* Add 64K bytes slack */
7385 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
7386
7387 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
7388 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7389 index bbeb0c3..f5167ab 100644
7390 --- a/arch/x86/boot/compressed/relocs.c
7391 +++ b/arch/x86/boot/compressed/relocs.c
7392 @@ -10,8 +10,11 @@
7393 #define USE_BSD
7394 #include <endian.h>
7395
7396 +#include "../../../../include/linux/autoconf.h"
7397 +
7398 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7399 static Elf32_Ehdr ehdr;
7400 +static Elf32_Phdr *phdr;
7401 static unsigned long reloc_count, reloc_idx;
7402 static unsigned long *relocs;
7403
7404 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
7405
7406 static int is_safe_abs_reloc(const char* sym_name)
7407 {
7408 - int i;
7409 + unsigned int i;
7410
7411 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
7412 if (!strcmp(sym_name, safe_abs_relocs[i]))
7413 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
7414 }
7415 }
7416
7417 +static void read_phdrs(FILE *fp)
7418 +{
7419 + unsigned int i;
7420 +
7421 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7422 + if (!phdr) {
7423 + die("Unable to allocate %d program headers\n",
7424 + ehdr.e_phnum);
7425 + }
7426 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7427 + die("Seek to %d failed: %s\n",
7428 + ehdr.e_phoff, strerror(errno));
7429 + }
7430 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7431 + die("Cannot read ELF program headers: %s\n",
7432 + strerror(errno));
7433 + }
7434 + for(i = 0; i < ehdr.e_phnum; i++) {
7435 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7436 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7437 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7438 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7439 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7440 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7441 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7442 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7443 + }
7444 +
7445 +}
7446 +
7447 static void read_shdrs(FILE *fp)
7448 {
7449 - int i;
7450 + unsigned int i;
7451 Elf32_Shdr shdr;
7452
7453 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7454 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
7455
7456 static void read_strtabs(FILE *fp)
7457 {
7458 - int i;
7459 + unsigned int i;
7460 for (i = 0; i < ehdr.e_shnum; i++) {
7461 struct section *sec = &secs[i];
7462 if (sec->shdr.sh_type != SHT_STRTAB) {
7463 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
7464
7465 static void read_symtabs(FILE *fp)
7466 {
7467 - int i,j;
7468 + unsigned int i,j;
7469 for (i = 0; i < ehdr.e_shnum; i++) {
7470 struct section *sec = &secs[i];
7471 if (sec->shdr.sh_type != SHT_SYMTAB) {
7472 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
7473
7474 static void read_relocs(FILE *fp)
7475 {
7476 - int i,j;
7477 + unsigned int i,j;
7478 + uint32_t base;
7479 +
7480 for (i = 0; i < ehdr.e_shnum; i++) {
7481 struct section *sec = &secs[i];
7482 if (sec->shdr.sh_type != SHT_REL) {
7483 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
7484 die("Cannot read symbol table: %s\n",
7485 strerror(errno));
7486 }
7487 + base = 0;
7488 + for (j = 0; j < ehdr.e_phnum; j++) {
7489 + if (phdr[j].p_type != PT_LOAD )
7490 + continue;
7491 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7492 + continue;
7493 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7494 + break;
7495 + }
7496 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7497 Elf32_Rel *rel = &sec->reltab[j];
7498 - rel->r_offset = elf32_to_cpu(rel->r_offset);
7499 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7500 rel->r_info = elf32_to_cpu(rel->r_info);
7501 }
7502 }
7503 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
7504
7505 static void print_absolute_symbols(void)
7506 {
7507 - int i;
7508 + unsigned int i;
7509 printf("Absolute symbols\n");
7510 printf(" Num: Value Size Type Bind Visibility Name\n");
7511 for (i = 0; i < ehdr.e_shnum; i++) {
7512 struct section *sec = &secs[i];
7513 char *sym_strtab;
7514 Elf32_Sym *sh_symtab;
7515 - int j;
7516 + unsigned int j;
7517
7518 if (sec->shdr.sh_type != SHT_SYMTAB) {
7519 continue;
7520 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
7521
7522 static void print_absolute_relocs(void)
7523 {
7524 - int i, printed = 0;
7525 + unsigned int i, printed = 0;
7526
7527 for (i = 0; i < ehdr.e_shnum; i++) {
7528 struct section *sec = &secs[i];
7529 struct section *sec_applies, *sec_symtab;
7530 char *sym_strtab;
7531 Elf32_Sym *sh_symtab;
7532 - int j;
7533 + unsigned int j;
7534 if (sec->shdr.sh_type != SHT_REL) {
7535 continue;
7536 }
7537 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
7538
7539 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7540 {
7541 - int i;
7542 + unsigned int i;
7543 /* Walk through the relocations */
7544 for (i = 0; i < ehdr.e_shnum; i++) {
7545 char *sym_strtab;
7546 Elf32_Sym *sh_symtab;
7547 struct section *sec_applies, *sec_symtab;
7548 - int j;
7549 + unsigned int j;
7550 struct section *sec = &secs[i];
7551
7552 if (sec->shdr.sh_type != SHT_REL) {
7553 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7554 if (sym->st_shndx == SHN_ABS) {
7555 continue;
7556 }
7557 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7558 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7559 + continue;
7560 +
7561 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7562 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7563 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7564 + continue;
7565 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7566 + continue;
7567 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7568 + continue;
7569 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7570 + continue;
7571 +#endif
7572 if (r_type == R_386_NONE || r_type == R_386_PC32) {
7573 /*
7574 * NONE can be ignored and and PC relative
7575 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
7576
7577 static void emit_relocs(int as_text)
7578 {
7579 - int i;
7580 + unsigned int i;
7581 /* Count how many relocations I have and allocate space for them. */
7582 reloc_count = 0;
7583 walk_relocs(count_reloc);
7584 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
7585 fname, strerror(errno));
7586 }
7587 read_ehdr(fp);
7588 + read_phdrs(fp);
7589 read_shdrs(fp);
7590 read_strtabs(fp);
7591 read_symtabs(fp);
7592 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7593 index 4d3ff03..e4972ff 100644
7594 --- a/arch/x86/boot/cpucheck.c
7595 +++ b/arch/x86/boot/cpucheck.c
7596 @@ -74,7 +74,7 @@ static int has_fpu(void)
7597 u16 fcw = -1, fsw = -1;
7598 u32 cr0;
7599
7600 - asm("movl %%cr0,%0" : "=r" (cr0));
7601 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7602 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7603 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7604 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7605 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7606 {
7607 u32 f0, f1;
7608
7609 - asm("pushfl ; "
7610 + asm volatile("pushfl ; "
7611 "pushfl ; "
7612 "popl %0 ; "
7613 "movl %0,%1 ; "
7614 @@ -115,7 +115,7 @@ static void get_flags(void)
7615 set_bit(X86_FEATURE_FPU, cpu.flags);
7616
7617 if (has_eflag(X86_EFLAGS_ID)) {
7618 - asm("cpuid"
7619 + asm volatile("cpuid"
7620 : "=a" (max_intel_level),
7621 "=b" (cpu_vendor[0]),
7622 "=d" (cpu_vendor[1]),
7623 @@ -124,7 +124,7 @@ static void get_flags(void)
7624
7625 if (max_intel_level >= 0x00000001 &&
7626 max_intel_level <= 0x0000ffff) {
7627 - asm("cpuid"
7628 + asm volatile("cpuid"
7629 : "=a" (tfms),
7630 "=c" (cpu.flags[4]),
7631 "=d" (cpu.flags[0])
7632 @@ -136,7 +136,7 @@ static void get_flags(void)
7633 cpu.model += ((tfms >> 16) & 0xf) << 4;
7634 }
7635
7636 - asm("cpuid"
7637 + asm volatile("cpuid"
7638 : "=a" (max_amd_level)
7639 : "a" (0x80000000)
7640 : "ebx", "ecx", "edx");
7641 @@ -144,7 +144,7 @@ static void get_flags(void)
7642 if (max_amd_level >= 0x80000001 &&
7643 max_amd_level <= 0x8000ffff) {
7644 u32 eax = 0x80000001;
7645 - asm("cpuid"
7646 + asm volatile("cpuid"
7647 : "+a" (eax),
7648 "=c" (cpu.flags[6]),
7649 "=d" (cpu.flags[1])
7650 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7651 u32 ecx = MSR_K7_HWCR;
7652 u32 eax, edx;
7653
7654 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7655 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7656 eax &= ~(1 << 15);
7657 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7658 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7659
7660 get_flags(); /* Make sure it really did something */
7661 err = check_flags();
7662 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7663 u32 ecx = MSR_VIA_FCR;
7664 u32 eax, edx;
7665
7666 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7667 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7668 eax |= (1<<1)|(1<<7);
7669 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7670 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7671
7672 set_bit(X86_FEATURE_CX8, cpu.flags);
7673 err = check_flags();
7674 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7675 u32 eax, edx;
7676 u32 level = 1;
7677
7678 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7679 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7680 - asm("cpuid"
7681 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7682 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7683 + asm volatile("cpuid"
7684 : "+a" (level), "=d" (cpu.flags[0])
7685 : : "ecx", "ebx");
7686 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7687 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7688
7689 err = check_flags();
7690 }
7691 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7692 index b31cc54..8d69237 100644
7693 --- a/arch/x86/boot/header.S
7694 +++ b/arch/x86/boot/header.S
7695 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7696 # single linked list of
7697 # struct setup_data
7698
7699 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7700 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7701
7702 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7703 #define VO_INIT_SIZE (VO__end - VO__text)
7704 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7705 index cae3feb..ff8ff2a 100644
7706 --- a/arch/x86/boot/memory.c
7707 +++ b/arch/x86/boot/memory.c
7708 @@ -19,7 +19,7 @@
7709
7710 static int detect_memory_e820(void)
7711 {
7712 - int count = 0;
7713 + unsigned int count = 0;
7714 struct biosregs ireg, oreg;
7715 struct e820entry *desc = boot_params.e820_map;
7716 static struct e820entry buf; /* static so it is zeroed */
7717 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7718 index 11e8c6e..fdbb1ed 100644
7719 --- a/arch/x86/boot/video-vesa.c
7720 +++ b/arch/x86/boot/video-vesa.c
7721 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7722
7723 boot_params.screen_info.vesapm_seg = oreg.es;
7724 boot_params.screen_info.vesapm_off = oreg.di;
7725 + boot_params.screen_info.vesapm_size = oreg.cx;
7726 }
7727
7728 /*
7729 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7730 index d42da38..787cdf3 100644
7731 --- a/arch/x86/boot/video.c
7732 +++ b/arch/x86/boot/video.c
7733 @@ -90,7 +90,7 @@ static void store_mode_params(void)
7734 static unsigned int get_entry(void)
7735 {
7736 char entry_buf[4];
7737 - int i, len = 0;
7738 + unsigned int i, len = 0;
7739 int key;
7740 unsigned int v;
7741
7742 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7743 index 5b577d5..3c1fed4 100644
7744 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
7745 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7746 @@ -8,6 +8,8 @@
7747 * including this sentence is retained in full.
7748 */
7749
7750 +#include <asm/alternative-asm.h>
7751 +
7752 .extern crypto_ft_tab
7753 .extern crypto_it_tab
7754 .extern crypto_fl_tab
7755 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7756 je B192; \
7757 leaq 32(r9),r9;
7758
7759 +#define ret pax_force_retaddr 0, 1; ret
7760 +
7761 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7762 movq r1,r2; \
7763 movq r3,r4; \
7764 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7765 index eb0566e..e3ebad8 100644
7766 --- a/arch/x86/crypto/aesni-intel_asm.S
7767 +++ b/arch/x86/crypto/aesni-intel_asm.S
7768 @@ -16,6 +16,7 @@
7769 */
7770
7771 #include <linux/linkage.h>
7772 +#include <asm/alternative-asm.h>
7773
7774 .text
7775
7776 @@ -52,6 +53,7 @@ _key_expansion_256a:
7777 pxor %xmm1, %xmm0
7778 movaps %xmm0, (%rcx)
7779 add $0x10, %rcx
7780 + pax_force_retaddr_bts
7781 ret
7782
7783 _key_expansion_192a:
7784 @@ -75,6 +77,7 @@ _key_expansion_192a:
7785 shufps $0b01001110, %xmm2, %xmm1
7786 movaps %xmm1, 16(%rcx)
7787 add $0x20, %rcx
7788 + pax_force_retaddr_bts
7789 ret
7790
7791 _key_expansion_192b:
7792 @@ -93,6 +96,7 @@ _key_expansion_192b:
7793
7794 movaps %xmm0, (%rcx)
7795 add $0x10, %rcx
7796 + pax_force_retaddr_bts
7797 ret
7798
7799 _key_expansion_256b:
7800 @@ -104,6 +108,7 @@ _key_expansion_256b:
7801 pxor %xmm1, %xmm2
7802 movaps %xmm2, (%rcx)
7803 add $0x10, %rcx
7804 + pax_force_retaddr_bts
7805 ret
7806
7807 /*
7808 @@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
7809 cmp %rcx, %rdi
7810 jb .Ldec_key_loop
7811 xor %rax, %rax
7812 + pax_force_retaddr 0, 1
7813 ret
7814 +ENDPROC(aesni_set_key)
7815
7816 /*
7817 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7818 @@ -249,7 +256,9 @@ ENTRY(aesni_enc)
7819 movups (INP), STATE # input
7820 call _aesni_enc1
7821 movups STATE, (OUTP) # output
7822 + pax_force_retaddr 0, 1
7823 ret
7824 +ENDPROC(aesni_enc)
7825
7826 /*
7827 * _aesni_enc1: internal ABI
7828 @@ -319,6 +328,7 @@ _aesni_enc1:
7829 movaps 0x70(TKEYP), KEY
7830 # aesenclast KEY, STATE # last round
7831 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
7832 + pax_force_retaddr_bts
7833 ret
7834
7835 /*
7836 @@ -482,6 +492,7 @@ _aesni_enc4:
7837 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
7838 # aesenclast KEY, STATE4
7839 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
7840 + pax_force_retaddr_bts
7841 ret
7842
7843 /*
7844 @@ -493,7 +504,9 @@ ENTRY(aesni_dec)
7845 movups (INP), STATE # input
7846 call _aesni_dec1
7847 movups STATE, (OUTP) #output
7848 + pax_force_retaddr 0, 1
7849 ret
7850 +ENDPROC(aesni_dec)
7851
7852 /*
7853 * _aesni_dec1: internal ABI
7854 @@ -563,6 +576,7 @@ _aesni_dec1:
7855 movaps 0x70(TKEYP), KEY
7856 # aesdeclast KEY, STATE # last round
7857 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
7858 + pax_force_retaddr_bts
7859 ret
7860
7861 /*
7862 @@ -726,6 +740,7 @@ _aesni_dec4:
7863 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
7864 # aesdeclast KEY, STATE4
7865 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
7866 + pax_force_retaddr_bts
7867 ret
7868
7869 /*
7870 @@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
7871 cmp $16, LEN
7872 jge .Lecb_enc_loop1
7873 .Lecb_enc_ret:
7874 + pax_force_retaddr 0, 1
7875 ret
7876 +ENDPROC(aesni_ecb_enc)
7877
7878 /*
7879 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7880 @@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
7881 cmp $16, LEN
7882 jge .Lecb_dec_loop1
7883 .Lecb_dec_ret:
7884 + pax_force_retaddr 0, 1
7885 ret
7886 +ENDPROC(aesni_ecb_dec)
7887
7888 /*
7889 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7890 @@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
7891 jge .Lcbc_enc_loop
7892 movups STATE, (IVP)
7893 .Lcbc_enc_ret:
7894 + pax_force_retaddr 0, 1
7895 ret
7896 +ENDPROC(aesni_cbc_enc)
7897
7898 /*
7899 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7900 @@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
7901 .Lcbc_dec_ret:
7902 movups IV, (IVP)
7903 .Lcbc_dec_just_ret:
7904 + pax_force_retaddr 0, 1
7905 ret
7906 +ENDPROC(aesni_cbc_dec)
7907 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7908 index 6214a9b..1f4fc9a 100644
7909 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7910 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7911 @@ -1,3 +1,5 @@
7912 +#include <asm/alternative-asm.h>
7913 +
7914 # enter ECRYPT_encrypt_bytes
7915 .text
7916 .p2align 5
7917 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7918 add %r11,%rsp
7919 mov %rdi,%rax
7920 mov %rsi,%rdx
7921 + pax_force_retaddr 0, 1
7922 ret
7923 # bytesatleast65:
7924 ._bytesatleast65:
7925 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
7926 add %r11,%rsp
7927 mov %rdi,%rax
7928 mov %rsi,%rdx
7929 + pax_force_retaddr
7930 ret
7931 # enter ECRYPT_ivsetup
7932 .text
7933 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7934 add %r11,%rsp
7935 mov %rdi,%rax
7936 mov %rsi,%rdx
7937 + pax_force_retaddr
7938 ret
7939 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
7940 index 35974a5..5662ae2 100644
7941 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
7942 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
7943 @@ -21,6 +21,7 @@
7944 .text
7945
7946 #include <asm/asm-offsets.h>
7947 +#include <asm/alternative-asm.h>
7948
7949 #define a_offset 0
7950 #define b_offset 4
7951 @@ -269,6 +270,7 @@ twofish_enc_blk:
7952
7953 popq R1
7954 movq $1,%rax
7955 + pax_force_retaddr 0, 1
7956 ret
7957
7958 twofish_dec_blk:
7959 @@ -321,4 +323,5 @@ twofish_dec_blk:
7960
7961 popq R1
7962 movq $1,%rax
7963 + pax_force_retaddr 0, 1
7964 ret
7965 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
7966 index 14531ab..a89a0c0 100644
7967 --- a/arch/x86/ia32/ia32_aout.c
7968 +++ b/arch/x86/ia32/ia32_aout.c
7969 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7970 unsigned long dump_start, dump_size;
7971 struct user32 dump;
7972
7973 + memset(&dump, 0, sizeof(dump));
7974 +
7975 fs = get_fs();
7976 set_fs(KERNEL_DS);
7977 has_dumped = 1;
7978 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7979 dump_size = dump.u_ssize << PAGE_SHIFT;
7980 DUMP_WRITE(dump_start, dump_size);
7981 }
7982 - /*
7983 - * Finally dump the task struct. Not be used by gdb, but
7984 - * could be useful
7985 - */
7986 - set_fs(KERNEL_DS);
7987 - DUMP_WRITE(current, sizeof(*current));
7988 end_coredump:
7989 set_fs(fs);
7990 return has_dumped;
7991 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
7992 index 588a7aa..a3468b0 100644
7993 --- a/arch/x86/ia32/ia32_signal.c
7994 +++ b/arch/x86/ia32/ia32_signal.c
7995 @@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
7996 }
7997 seg = get_fs();
7998 set_fs(KERNEL_DS);
7999 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8000 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8001 set_fs(seg);
8002 if (ret >= 0 && uoss_ptr) {
8003 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8004 @@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8005 */
8006 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8007 size_t frame_size,
8008 - void **fpstate)
8009 + void __user **fpstate)
8010 {
8011 unsigned long sp;
8012
8013 @@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8014
8015 if (used_math()) {
8016 sp = sp - sig_xstate_ia32_size;
8017 - *fpstate = (struct _fpstate_ia32 *) sp;
8018 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8019 if (save_i387_xstate_ia32(*fpstate) < 0)
8020 return (void __user *) -1L;
8021 }
8022 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8023 sp -= frame_size;
8024 /* Align the stack pointer according to the i386 ABI,
8025 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8026 - sp = ((sp + 4) & -16ul) - 4;
8027 + sp = ((sp - 12) & -16ul) - 4;
8028 return (void __user *) sp;
8029 }
8030
8031 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8032 * These are actually not used anymore, but left because some
8033 * gdb versions depend on them as a marker.
8034 */
8035 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8036 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8037 } put_user_catch(err);
8038
8039 if (err)
8040 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8041 0xb8,
8042 __NR_ia32_rt_sigreturn,
8043 0x80cd,
8044 - 0,
8045 + 0
8046 };
8047
8048 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8049 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8050
8051 if (ka->sa.sa_flags & SA_RESTORER)
8052 restorer = ka->sa.sa_restorer;
8053 + else if (current->mm->context.vdso)
8054 + /* Return stub is in 32bit vsyscall page */
8055 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8056 else
8057 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8058 - rt_sigreturn);
8059 + restorer = &frame->retcode;
8060 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8061
8062 /*
8063 * Not actually used anymore, but left because some gdb
8064 * versions need it.
8065 */
8066 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8067 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8068 } put_user_catch(err);
8069
8070 if (err)
8071 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8072 index 4edd8eb..29124b4 100644
8073 --- a/arch/x86/ia32/ia32entry.S
8074 +++ b/arch/x86/ia32/ia32entry.S
8075 @@ -13,7 +13,9 @@
8076 #include <asm/thread_info.h>
8077 #include <asm/segment.h>
8078 #include <asm/irqflags.h>
8079 +#include <asm/pgtable.h>
8080 #include <linux/linkage.h>
8081 +#include <asm/alternative-asm.h>
8082
8083 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8084 #include <linux/elf-em.h>
8085 @@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
8086 ENDPROC(native_irq_enable_sysexit)
8087 #endif
8088
8089 + .macro pax_enter_kernel_user
8090 + pax_set_fptr_mask
8091 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8092 + call pax_enter_kernel_user
8093 +#endif
8094 + .endm
8095 +
8096 + .macro pax_exit_kernel_user
8097 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8098 + call pax_exit_kernel_user
8099 +#endif
8100 +#ifdef CONFIG_PAX_RANDKSTACK
8101 + pushq %rax
8102 + pushq %r11
8103 + call pax_randomize_kstack
8104 + popq %r11
8105 + popq %rax
8106 +#endif
8107 + .endm
8108 +
8109 +.macro pax_erase_kstack
8110 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8111 + call pax_erase_kstack
8112 +#endif
8113 +.endm
8114 +
8115 /*
8116 * 32bit SYSENTER instruction entry.
8117 *
8118 @@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
8119 CFI_REGISTER rsp,rbp
8120 SWAPGS_UNSAFE_STACK
8121 movq PER_CPU_VAR(kernel_stack), %rsp
8122 - addq $(KERNEL_STACK_OFFSET),%rsp
8123 - /*
8124 - * No need to follow this irqs on/off section: the syscall
8125 - * disabled irqs, here we enable it straight after entry:
8126 - */
8127 - ENABLE_INTERRUPTS(CLBR_NONE)
8128 movl %ebp,%ebp /* zero extension */
8129 pushq $__USER32_DS
8130 CFI_ADJUST_CFA_OFFSET 8
8131 @@ -135,28 +157,42 @@ ENTRY(ia32_sysenter_target)
8132 pushfq
8133 CFI_ADJUST_CFA_OFFSET 8
8134 /*CFI_REL_OFFSET rflags,0*/
8135 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
8136 - CFI_REGISTER rip,r10
8137 + orl $X86_EFLAGS_IF,(%rsp)
8138 + GET_THREAD_INFO(%r11)
8139 + movl TI_sysenter_return(%r11), %r11d
8140 + CFI_REGISTER rip,r11
8141 pushq $__USER32_CS
8142 CFI_ADJUST_CFA_OFFSET 8
8143 /*CFI_REL_OFFSET cs,0*/
8144 movl %eax, %eax
8145 - pushq %r10
8146 + pushq %r11
8147 CFI_ADJUST_CFA_OFFSET 8
8148 CFI_REL_OFFSET rip,0
8149 pushq %rax
8150 CFI_ADJUST_CFA_OFFSET 8
8151 cld
8152 SAVE_ARGS 0,0,1
8153 + pax_enter_kernel_user
8154 + /*
8155 + * No need to follow this irqs on/off section: the syscall
8156 + * disabled irqs, here we enable it straight after entry:
8157 + */
8158 + ENABLE_INTERRUPTS(CLBR_NONE)
8159 /* no need to do an access_ok check here because rbp has been
8160 32bit zero extended */
8161 +
8162 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8163 + mov $PAX_USER_SHADOW_BASE,%r11
8164 + add %r11,%rbp
8165 +#endif
8166 +
8167 1: movl (%rbp),%ebp
8168 .section __ex_table,"a"
8169 .quad 1b,ia32_badarg
8170 .previous
8171 - GET_THREAD_INFO(%r10)
8172 - orl $TS_COMPAT,TI_status(%r10)
8173 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8174 + GET_THREAD_INFO(%r11)
8175 + orl $TS_COMPAT,TI_status(%r11)
8176 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8177 CFI_REMEMBER_STATE
8178 jnz sysenter_tracesys
8179 cmpq $(IA32_NR_syscalls-1),%rax
8180 @@ -166,13 +202,15 @@ sysenter_do_call:
8181 sysenter_dispatch:
8182 call *ia32_sys_call_table(,%rax,8)
8183 movq %rax,RAX-ARGOFFSET(%rsp)
8184 - GET_THREAD_INFO(%r10)
8185 + GET_THREAD_INFO(%r11)
8186 DISABLE_INTERRUPTS(CLBR_NONE)
8187 TRACE_IRQS_OFF
8188 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8189 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8190 jnz sysexit_audit
8191 sysexit_from_sys_call:
8192 - andl $~TS_COMPAT,TI_status(%r10)
8193 + pax_exit_kernel_user
8194 + pax_erase_kstack
8195 + andl $~TS_COMPAT,TI_status(%r11)
8196 /* clear IF, that popfq doesn't enable interrupts early */
8197 andl $~0x200,EFLAGS-R11(%rsp)
8198 movl RIP-R11(%rsp),%edx /* User %eip */
8199 @@ -200,6 +238,9 @@ sysexit_from_sys_call:
8200 movl %eax,%esi /* 2nd arg: syscall number */
8201 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8202 call audit_syscall_entry
8203 +
8204 + pax_erase_kstack
8205 +
8206 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8207 cmpq $(IA32_NR_syscalls-1),%rax
8208 ja ia32_badsys
8209 @@ -211,7 +252,7 @@ sysexit_from_sys_call:
8210 .endm
8211
8212 .macro auditsys_exit exit
8213 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8214 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8215 jnz ia32_ret_from_sys_call
8216 TRACE_IRQS_ON
8217 sti
8218 @@ -221,12 +262,12 @@ sysexit_from_sys_call:
8219 movzbl %al,%edi /* zero-extend that into %edi */
8220 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
8221 call audit_syscall_exit
8222 - GET_THREAD_INFO(%r10)
8223 + GET_THREAD_INFO(%r11)
8224 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
8225 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8226 cli
8227 TRACE_IRQS_OFF
8228 - testl %edi,TI_flags(%r10)
8229 + testl %edi,TI_flags(%r11)
8230 jz \exit
8231 CLEAR_RREGS -ARGOFFSET
8232 jmp int_with_check
8233 @@ -244,7 +285,7 @@ sysexit_audit:
8234
8235 sysenter_tracesys:
8236 #ifdef CONFIG_AUDITSYSCALL
8237 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8238 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8239 jz sysenter_auditsys
8240 #endif
8241 SAVE_REST
8242 @@ -252,6 +293,9 @@ sysenter_tracesys:
8243 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8244 movq %rsp,%rdi /* &pt_regs -> arg1 */
8245 call syscall_trace_enter
8246 +
8247 + pax_erase_kstack
8248 +
8249 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8250 RESTORE_REST
8251 cmpq $(IA32_NR_syscalls-1),%rax
8252 @@ -283,19 +327,20 @@ ENDPROC(ia32_sysenter_target)
8253 ENTRY(ia32_cstar_target)
8254 CFI_STARTPROC32 simple
8255 CFI_SIGNAL_FRAME
8256 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8257 + CFI_DEF_CFA rsp,0
8258 CFI_REGISTER rip,rcx
8259 /*CFI_REGISTER rflags,r11*/
8260 SWAPGS_UNSAFE_STACK
8261 movl %esp,%r8d
8262 CFI_REGISTER rsp,r8
8263 movq PER_CPU_VAR(kernel_stack),%rsp
8264 + SAVE_ARGS 8*6,1,1
8265 + pax_enter_kernel_user
8266 /*
8267 * No need to follow this irqs on/off section: the syscall
8268 * disabled irqs and here we enable it straight after entry:
8269 */
8270 ENABLE_INTERRUPTS(CLBR_NONE)
8271 - SAVE_ARGS 8,1,1
8272 movl %eax,%eax /* zero extension */
8273 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8274 movq %rcx,RIP-ARGOFFSET(%rsp)
8275 @@ -311,13 +356,19 @@ ENTRY(ia32_cstar_target)
8276 /* no need to do an access_ok check here because r8 has been
8277 32bit zero extended */
8278 /* hardware stack frame is complete now */
8279 +
8280 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8281 + mov $PAX_USER_SHADOW_BASE,%r11
8282 + add %r11,%r8
8283 +#endif
8284 +
8285 1: movl (%r8),%r9d
8286 .section __ex_table,"a"
8287 .quad 1b,ia32_badarg
8288 .previous
8289 - GET_THREAD_INFO(%r10)
8290 - orl $TS_COMPAT,TI_status(%r10)
8291 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8292 + GET_THREAD_INFO(%r11)
8293 + orl $TS_COMPAT,TI_status(%r11)
8294 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8295 CFI_REMEMBER_STATE
8296 jnz cstar_tracesys
8297 cmpq $IA32_NR_syscalls-1,%rax
8298 @@ -327,13 +378,15 @@ cstar_do_call:
8299 cstar_dispatch:
8300 call *ia32_sys_call_table(,%rax,8)
8301 movq %rax,RAX-ARGOFFSET(%rsp)
8302 - GET_THREAD_INFO(%r10)
8303 + GET_THREAD_INFO(%r11)
8304 DISABLE_INTERRUPTS(CLBR_NONE)
8305 TRACE_IRQS_OFF
8306 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8307 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8308 jnz sysretl_audit
8309 sysretl_from_sys_call:
8310 - andl $~TS_COMPAT,TI_status(%r10)
8311 + pax_exit_kernel_user
8312 + pax_erase_kstack
8313 + andl $~TS_COMPAT,TI_status(%r11)
8314 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
8315 movl RIP-ARGOFFSET(%rsp),%ecx
8316 CFI_REGISTER rip,rcx
8317 @@ -361,7 +414,7 @@ sysretl_audit:
8318
8319 cstar_tracesys:
8320 #ifdef CONFIG_AUDITSYSCALL
8321 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8322 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8323 jz cstar_auditsys
8324 #endif
8325 xchgl %r9d,%ebp
8326 @@ -370,6 +423,9 @@ cstar_tracesys:
8327 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8328 movq %rsp,%rdi /* &pt_regs -> arg1 */
8329 call syscall_trace_enter
8330 +
8331 + pax_erase_kstack
8332 +
8333 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8334 RESTORE_REST
8335 xchgl %ebp,%r9d
8336 @@ -415,11 +471,6 @@ ENTRY(ia32_syscall)
8337 CFI_REL_OFFSET rip,RIP-RIP
8338 PARAVIRT_ADJUST_EXCEPTION_FRAME
8339 SWAPGS
8340 - /*
8341 - * No need to follow this irqs on/off section: the syscall
8342 - * disabled irqs and here we enable it straight after entry:
8343 - */
8344 - ENABLE_INTERRUPTS(CLBR_NONE)
8345 movl %eax,%eax
8346 pushq %rax
8347 CFI_ADJUST_CFA_OFFSET 8
8348 @@ -427,9 +478,15 @@ ENTRY(ia32_syscall)
8349 /* note the registers are not zero extended to the sf.
8350 this could be a problem. */
8351 SAVE_ARGS 0,0,1
8352 - GET_THREAD_INFO(%r10)
8353 - orl $TS_COMPAT,TI_status(%r10)
8354 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8355 + pax_enter_kernel_user
8356 + /*
8357 + * No need to follow this irqs on/off section: the syscall
8358 + * disabled irqs and here we enable it straight after entry:
8359 + */
8360 + ENABLE_INTERRUPTS(CLBR_NONE)
8361 + GET_THREAD_INFO(%r11)
8362 + orl $TS_COMPAT,TI_status(%r11)
8363 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8364 jnz ia32_tracesys
8365 cmpq $(IA32_NR_syscalls-1),%rax
8366 ja ia32_badsys
8367 @@ -448,6 +505,9 @@ ia32_tracesys:
8368 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8369 movq %rsp,%rdi /* &pt_regs -> arg1 */
8370 call syscall_trace_enter
8371 +
8372 + pax_erase_kstack
8373 +
8374 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8375 RESTORE_REST
8376 cmpq $(IA32_NR_syscalls-1),%rax
8377 @@ -462,6 +522,7 @@ ia32_badsys:
8378
8379 quiet_ni_syscall:
8380 movq $-ENOSYS,%rax
8381 + pax_force_retaddr
8382 ret
8383 CFI_ENDPROC
8384
8385 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8386 index 016218c..47ccbdd 100644
8387 --- a/arch/x86/ia32/sys_ia32.c
8388 +++ b/arch/x86/ia32/sys_ia32.c
8389 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8390 */
8391 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8392 {
8393 - typeof(ubuf->st_uid) uid = 0;
8394 - typeof(ubuf->st_gid) gid = 0;
8395 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8396 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8397 SET_UID(uid, stat->uid);
8398 SET_GID(gid, stat->gid);
8399 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8400 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8401 }
8402 set_fs(KERNEL_DS);
8403 ret = sys_rt_sigprocmask(how,
8404 - set ? (sigset_t __user *)&s : NULL,
8405 - oset ? (sigset_t __user *)&s : NULL,
8406 + set ? (sigset_t __force_user *)&s : NULL,
8407 + oset ? (sigset_t __force_user *)&s : NULL,
8408 sigsetsize);
8409 set_fs(old_fs);
8410 if (ret)
8411 @@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8412 mm_segment_t old_fs = get_fs();
8413
8414 set_fs(KERNEL_DS);
8415 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8416 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8417 set_fs(old_fs);
8418 if (put_compat_timespec(&t, interval))
8419 return -EFAULT;
8420 @@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8421 mm_segment_t old_fs = get_fs();
8422
8423 set_fs(KERNEL_DS);
8424 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8425 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8426 set_fs(old_fs);
8427 if (!ret) {
8428 switch (_NSIG_WORDS) {
8429 @@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8430 if (copy_siginfo_from_user32(&info, uinfo))
8431 return -EFAULT;
8432 set_fs(KERNEL_DS);
8433 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8434 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8435 set_fs(old_fs);
8436 return ret;
8437 }
8438 @@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8439 return -EFAULT;
8440
8441 set_fs(KERNEL_DS);
8442 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8443 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8444 count);
8445 set_fs(old_fs);
8446
8447 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8448 index e2077d3..17d07ad 100644
8449 --- a/arch/x86/include/asm/alternative-asm.h
8450 +++ b/arch/x86/include/asm/alternative-asm.h
8451 @@ -8,10 +8,10 @@
8452
8453 #ifdef CONFIG_SMP
8454 .macro LOCK_PREFIX
8455 -1: lock
8456 +672: lock
8457 .section .smp_locks,"a"
8458 .align 4
8459 - X86_ALIGN 1b
8460 + X86_ALIGN 672b
8461 .previous
8462 .endm
8463 #else
8464 @@ -19,4 +19,43 @@
8465 .endm
8466 #endif
8467
8468 +#ifdef KERNEXEC_PLUGIN
8469 + .macro pax_force_retaddr_bts rip=0
8470 + btsq $63,\rip(%rsp)
8471 + .endm
8472 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8473 + .macro pax_force_retaddr rip=0, reload=0
8474 + btsq $63,\rip(%rsp)
8475 + .endm
8476 + .macro pax_force_fptr ptr
8477 + btsq $63,\ptr
8478 + .endm
8479 + .macro pax_set_fptr_mask
8480 + .endm
8481 +#endif
8482 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8483 + .macro pax_force_retaddr rip=0, reload=0
8484 + .if \reload
8485 + pax_set_fptr_mask
8486 + .endif
8487 + orq %r10,\rip(%rsp)
8488 + .endm
8489 + .macro pax_force_fptr ptr
8490 + orq %r10,\ptr
8491 + .endm
8492 + .macro pax_set_fptr_mask
8493 + movabs $0x8000000000000000,%r10
8494 + .endm
8495 +#endif
8496 +#else
8497 + .macro pax_force_retaddr rip=0, reload=0
8498 + .endm
8499 + .macro pax_force_fptr ptr
8500 + .endm
8501 + .macro pax_force_retaddr_bts rip=0
8502 + .endm
8503 + .macro pax_set_fptr_mask
8504 + .endm
8505 +#endif
8506 +
8507 #endif /* __ASSEMBLY__ */
8508 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8509 index c240efc..fdfadf3 100644
8510 --- a/arch/x86/include/asm/alternative.h
8511 +++ b/arch/x86/include/asm/alternative.h
8512 @@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
8513 " .byte 662b-661b\n" /* sourcelen */ \
8514 " .byte 664f-663f\n" /* replacementlen */ \
8515 ".previous\n" \
8516 - ".section .altinstr_replacement, \"ax\"\n" \
8517 + ".section .altinstr_replacement, \"a\"\n" \
8518 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8519 ".previous"
8520
8521 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8522 index 474d80d..1f97d58 100644
8523 --- a/arch/x86/include/asm/apic.h
8524 +++ b/arch/x86/include/asm/apic.h
8525 @@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
8526
8527 #ifdef CONFIG_X86_LOCAL_APIC
8528
8529 -extern unsigned int apic_verbosity;
8530 +extern int apic_verbosity;
8531 extern int local_apic_timer_c2_ok;
8532
8533 extern int disable_apic;
8534 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8535 index 20370c6..a2eb9b0 100644
8536 --- a/arch/x86/include/asm/apm.h
8537 +++ b/arch/x86/include/asm/apm.h
8538 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8539 __asm__ __volatile__(APM_DO_ZERO_SEGS
8540 "pushl %%edi\n\t"
8541 "pushl %%ebp\n\t"
8542 - "lcall *%%cs:apm_bios_entry\n\t"
8543 + "lcall *%%ss:apm_bios_entry\n\t"
8544 "setc %%al\n\t"
8545 "popl %%ebp\n\t"
8546 "popl %%edi\n\t"
8547 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8548 __asm__ __volatile__(APM_DO_ZERO_SEGS
8549 "pushl %%edi\n\t"
8550 "pushl %%ebp\n\t"
8551 - "lcall *%%cs:apm_bios_entry\n\t"
8552 + "lcall *%%ss:apm_bios_entry\n\t"
8553 "setc %%bl\n\t"
8554 "popl %%ebp\n\t"
8555 "popl %%edi\n\t"
8556 diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
8557 index dc5a667..939040c 100644
8558 --- a/arch/x86/include/asm/atomic_32.h
8559 +++ b/arch/x86/include/asm/atomic_32.h
8560 @@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
8561 }
8562
8563 /**
8564 + * atomic_read_unchecked - read atomic variable
8565 + * @v: pointer of type atomic_unchecked_t
8566 + *
8567 + * Atomically reads the value of @v.
8568 + */
8569 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8570 +{
8571 + return v->counter;
8572 +}
8573 +
8574 +/**
8575 * atomic_set - set atomic variable
8576 * @v: pointer of type atomic_t
8577 * @i: required value
8578 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
8579 }
8580
8581 /**
8582 + * atomic_set_unchecked - set atomic variable
8583 + * @v: pointer of type atomic_unchecked_t
8584 + * @i: required value
8585 + *
8586 + * Atomically sets the value of @v to @i.
8587 + */
8588 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8589 +{
8590 + v->counter = i;
8591 +}
8592 +
8593 +/**
8594 * atomic_add - add integer to atomic variable
8595 * @i: integer value to add
8596 * @v: pointer of type atomic_t
8597 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
8598 */
8599 static inline void atomic_add(int i, atomic_t *v)
8600 {
8601 - asm volatile(LOCK_PREFIX "addl %1,%0"
8602 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8603 +
8604 +#ifdef CONFIG_PAX_REFCOUNT
8605 + "jno 0f\n"
8606 + LOCK_PREFIX "subl %1,%0\n"
8607 + "int $4\n0:\n"
8608 + _ASM_EXTABLE(0b, 0b)
8609 +#endif
8610 +
8611 + : "+m" (v->counter)
8612 + : "ir" (i));
8613 +}
8614 +
8615 +/**
8616 + * atomic_add_unchecked - add integer to atomic variable
8617 + * @i: integer value to add
8618 + * @v: pointer of type atomic_unchecked_t
8619 + *
8620 + * Atomically adds @i to @v.
8621 + */
8622 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8623 +{
8624 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8625 : "+m" (v->counter)
8626 : "ir" (i));
8627 }
8628 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
8629 */
8630 static inline void atomic_sub(int i, atomic_t *v)
8631 {
8632 - asm volatile(LOCK_PREFIX "subl %1,%0"
8633 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8634 +
8635 +#ifdef CONFIG_PAX_REFCOUNT
8636 + "jno 0f\n"
8637 + LOCK_PREFIX "addl %1,%0\n"
8638 + "int $4\n0:\n"
8639 + _ASM_EXTABLE(0b, 0b)
8640 +#endif
8641 +
8642 + : "+m" (v->counter)
8643 + : "ir" (i));
8644 +}
8645 +
8646 +/**
8647 + * atomic_sub_unchecked - subtract integer from atomic variable
8648 + * @i: integer value to subtract
8649 + * @v: pointer of type atomic_unchecked_t
8650 + *
8651 + * Atomically subtracts @i from @v.
8652 + */
8653 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8654 +{
8655 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8656 : "+m" (v->counter)
8657 : "ir" (i));
8658 }
8659 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8660 {
8661 unsigned char c;
8662
8663 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8664 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
8665 +
8666 +#ifdef CONFIG_PAX_REFCOUNT
8667 + "jno 0f\n"
8668 + LOCK_PREFIX "addl %2,%0\n"
8669 + "int $4\n0:\n"
8670 + _ASM_EXTABLE(0b, 0b)
8671 +#endif
8672 +
8673 + "sete %1\n"
8674 : "+m" (v->counter), "=qm" (c)
8675 : "ir" (i) : "memory");
8676 return c;
8677 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8678 */
8679 static inline void atomic_inc(atomic_t *v)
8680 {
8681 - asm volatile(LOCK_PREFIX "incl %0"
8682 + asm volatile(LOCK_PREFIX "incl %0\n"
8683 +
8684 +#ifdef CONFIG_PAX_REFCOUNT
8685 + "jno 0f\n"
8686 + LOCK_PREFIX "decl %0\n"
8687 + "int $4\n0:\n"
8688 + _ASM_EXTABLE(0b, 0b)
8689 +#endif
8690 +
8691 + : "+m" (v->counter));
8692 +}
8693 +
8694 +/**
8695 + * atomic_inc_unchecked - increment atomic variable
8696 + * @v: pointer of type atomic_unchecked_t
8697 + *
8698 + * Atomically increments @v by 1.
8699 + */
8700 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8701 +{
8702 + asm volatile(LOCK_PREFIX "incl %0\n"
8703 : "+m" (v->counter));
8704 }
8705
8706 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
8707 */
8708 static inline void atomic_dec(atomic_t *v)
8709 {
8710 - asm volatile(LOCK_PREFIX "decl %0"
8711 + asm volatile(LOCK_PREFIX "decl %0\n"
8712 +
8713 +#ifdef CONFIG_PAX_REFCOUNT
8714 + "jno 0f\n"
8715 + LOCK_PREFIX "incl %0\n"
8716 + "int $4\n0:\n"
8717 + _ASM_EXTABLE(0b, 0b)
8718 +#endif
8719 +
8720 + : "+m" (v->counter));
8721 +}
8722 +
8723 +/**
8724 + * atomic_dec_unchecked - decrement atomic variable
8725 + * @v: pointer of type atomic_unchecked_t
8726 + *
8727 + * Atomically decrements @v by 1.
8728 + */
8729 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8730 +{
8731 + asm volatile(LOCK_PREFIX "decl %0\n"
8732 : "+m" (v->counter));
8733 }
8734
8735 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8736 {
8737 unsigned char c;
8738
8739 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
8740 + asm volatile(LOCK_PREFIX "decl %0\n"
8741 +
8742 +#ifdef CONFIG_PAX_REFCOUNT
8743 + "jno 0f\n"
8744 + LOCK_PREFIX "incl %0\n"
8745 + "int $4\n0:\n"
8746 + _ASM_EXTABLE(0b, 0b)
8747 +#endif
8748 +
8749 + "sete %1\n"
8750 : "+m" (v->counter), "=qm" (c)
8751 : : "memory");
8752 return c != 0;
8753 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8754 {
8755 unsigned char c;
8756
8757 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
8758 + asm volatile(LOCK_PREFIX "incl %0\n"
8759 +
8760 +#ifdef CONFIG_PAX_REFCOUNT
8761 + "jno 0f\n"
8762 + LOCK_PREFIX "decl %0\n"
8763 + "into\n0:\n"
8764 + _ASM_EXTABLE(0b, 0b)
8765 +#endif
8766 +
8767 + "sete %1\n"
8768 + : "+m" (v->counter), "=qm" (c)
8769 + : : "memory");
8770 + return c != 0;
8771 +}
8772 +
8773 +/**
8774 + * atomic_inc_and_test_unchecked - increment and test
8775 + * @v: pointer of type atomic_unchecked_t
8776 + *
8777 + * Atomically increments @v by 1
8778 + * and returns true if the result is zero, or false for all
8779 + * other cases.
8780 + */
8781 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8782 +{
8783 + unsigned char c;
8784 +
8785 + asm volatile(LOCK_PREFIX "incl %0\n"
8786 + "sete %1\n"
8787 : "+m" (v->counter), "=qm" (c)
8788 : : "memory");
8789 return c != 0;
8790 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8791 {
8792 unsigned char c;
8793
8794 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8795 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
8796 +
8797 +#ifdef CONFIG_PAX_REFCOUNT
8798 + "jno 0f\n"
8799 + LOCK_PREFIX "subl %2,%0\n"
8800 + "int $4\n0:\n"
8801 + _ASM_EXTABLE(0b, 0b)
8802 +#endif
8803 +
8804 + "sets %1\n"
8805 : "+m" (v->counter), "=qm" (c)
8806 : "ir" (i) : "memory");
8807 return c;
8808 @@ -179,7 +341,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
8809 #endif
8810 /* Modern 486+ processor */
8811 __i = i;
8812 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
8813 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
8814 +
8815 +#ifdef CONFIG_PAX_REFCOUNT
8816 + "jno 0f\n"
8817 + "movl %0, %1\n"
8818 + "int $4\n0:\n"
8819 + _ASM_EXTABLE(0b, 0b)
8820 +#endif
8821 +
8822 : "+r" (i), "+m" (v->counter)
8823 : : "memory");
8824 return i + __i;
8825 @@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */
8826 }
8827
8828 /**
8829 + * atomic_add_return_unchecked - add integer and return
8830 + * @v: pointer of type atomic_unchecked_t
8831 + * @i: integer value to add
8832 + *
8833 + * Atomically adds @i to @v and returns @i + @v
8834 + */
8835 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8836 +{
8837 + int __i;
8838 +#ifdef CONFIG_M386
8839 + unsigned long flags;
8840 + if (unlikely(boot_cpu_data.x86 <= 3))
8841 + goto no_xadd;
8842 +#endif
8843 + /* Modern 486+ processor */
8844 + __i = i;
8845 + asm volatile(LOCK_PREFIX "xaddl %0, %1"
8846 + : "+r" (i), "+m" (v->counter)
8847 + : : "memory");
8848 + return i + __i;
8849 +
8850 +#ifdef CONFIG_M386
8851 +no_xadd: /* Legacy 386 processor */
8852 + local_irq_save(flags);
8853 + __i = atomic_read_unchecked(v);
8854 + atomic_set_unchecked(v, i + __i);
8855 + local_irq_restore(flags);
8856 + return i + __i;
8857 +#endif
8858 +}
8859 +
8860 +/**
8861 * atomic_sub_return - subtract integer and return
8862 * @v: pointer of type atomic_t
8863 * @i: integer value to subtract
8864 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8865 return cmpxchg(&v->counter, old, new);
8866 }
8867
8868 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8869 +{
8870 + return cmpxchg(&v->counter, old, new);
8871 +}
8872 +
8873 static inline int atomic_xchg(atomic_t *v, int new)
8874 {
8875 return xchg(&v->counter, new);
8876 }
8877
8878 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8879 +{
8880 + return xchg(&v->counter, new);
8881 +}
8882 +
8883 /**
8884 * atomic_add_unless - add unless the number is already a given value
8885 * @v: pointer of type atomic_t
8886 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
8887 */
8888 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8889 {
8890 - int c, old;
8891 + int c, old, new;
8892 c = atomic_read(v);
8893 for (;;) {
8894 - if (unlikely(c == (u)))
8895 + if (unlikely(c == u))
8896 break;
8897 - old = atomic_cmpxchg((v), c, c + (a));
8898 +
8899 + asm volatile("addl %2,%0\n"
8900 +
8901 +#ifdef CONFIG_PAX_REFCOUNT
8902 + "jno 0f\n"
8903 + "subl %2,%0\n"
8904 + "int $4\n0:\n"
8905 + _ASM_EXTABLE(0b, 0b)
8906 +#endif
8907 +
8908 + : "=r" (new)
8909 + : "0" (c), "ir" (a));
8910 +
8911 + old = atomic_cmpxchg(v, c, new);
8912 if (likely(old == c))
8913 break;
8914 c = old;
8915 }
8916 - return c != (u);
8917 + return c != u;
8918 }
8919
8920 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8921
8922 #define atomic_inc_return(v) (atomic_add_return(1, v))
8923 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8924 +{
8925 + return atomic_add_return_unchecked(1, v);
8926 +}
8927 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8928
8929 /* These are x86-specific, used by some header files */
8930 @@ -266,9 +495,18 @@ typedef struct {
8931 u64 __aligned(8) counter;
8932 } atomic64_t;
8933
8934 +#ifdef CONFIG_PAX_REFCOUNT
8935 +typedef struct {
8936 + u64 __aligned(8) counter;
8937 +} atomic64_unchecked_t;
8938 +#else
8939 +typedef atomic64_t atomic64_unchecked_t;
8940 +#endif
8941 +
8942 #define ATOMIC64_INIT(val) { (val) }
8943
8944 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8945 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
8946
8947 /**
8948 * atomic64_xchg - xchg atomic64 variable
8949 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8950 * the old value.
8951 */
8952 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8953 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8954
8955 /**
8956 * atomic64_set - set atomic64 variable
8957 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8958 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
8959
8960 /**
8961 + * atomic64_unchecked_set - set atomic64 variable
8962 + * @ptr: pointer to type atomic64_unchecked_t
8963 + * @new_val: value to assign
8964 + *
8965 + * Atomically sets the value of @ptr to @new_val.
8966 + */
8967 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8968 +
8969 +/**
8970 * atomic64_read - read atomic64 variable
8971 * @ptr: pointer to type atomic64_t
8972 *
8973 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
8974 return res;
8975 }
8976
8977 -extern u64 atomic64_read(atomic64_t *ptr);
8978 +/**
8979 + * atomic64_read_unchecked - read atomic64 variable
8980 + * @ptr: pointer to type atomic64_unchecked_t
8981 + *
8982 + * Atomically reads the value of @ptr and returns it.
8983 + */
8984 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
8985 +{
8986 + u64 res;
8987 +
8988 + /*
8989 + * Note, we inline this atomic64_unchecked_t primitive because
8990 + * it only clobbers EAX/EDX and leaves the others
8991 + * untouched. We also (somewhat subtly) rely on the
8992 + * fact that cmpxchg8b returns the current 64-bit value
8993 + * of the memory location we are touching:
8994 + */
8995 + asm volatile(
8996 + "mov %%ebx, %%eax\n\t"
8997 + "mov %%ecx, %%edx\n\t"
8998 + LOCK_PREFIX "cmpxchg8b %1\n"
8999 + : "=&A" (res)
9000 + : "m" (*ptr)
9001 + );
9002 +
9003 + return res;
9004 +}
9005
9006 /**
9007 * atomic64_add_return - add and return
9008 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
9009 * Other variants with different arithmetic operators:
9010 */
9011 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
9012 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9013 extern u64 atomic64_inc_return(atomic64_t *ptr);
9014 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
9015 extern u64 atomic64_dec_return(atomic64_t *ptr);
9016 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
9017
9018 /**
9019 * atomic64_add - add integer to atomic64 variable
9020 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
9021 extern void atomic64_add(u64 delta, atomic64_t *ptr);
9022
9023 /**
9024 + * atomic64_add_unchecked - add integer to atomic64 variable
9025 + * @delta: integer value to add
9026 + * @ptr: pointer to type atomic64_unchecked_t
9027 + *
9028 + * Atomically adds @delta to @ptr.
9029 + */
9030 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9031 +
9032 +/**
9033 * atomic64_sub - subtract the atomic64 variable
9034 * @delta: integer value to subtract
9035 * @ptr: pointer to type atomic64_t
9036 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
9037 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
9038
9039 /**
9040 + * atomic64_sub_unchecked - subtract the atomic64 variable
9041 + * @delta: integer value to subtract
9042 + * @ptr: pointer to type atomic64_unchecked_t
9043 + *
9044 + * Atomically subtracts @delta from @ptr.
9045 + */
9046 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9047 +
9048 +/**
9049 * atomic64_sub_and_test - subtract value from variable and test result
9050 * @delta: integer value to subtract
9051 * @ptr: pointer to type atomic64_t
9052 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
9053 extern void atomic64_inc(atomic64_t *ptr);
9054
9055 /**
9056 + * atomic64_inc_unchecked - increment atomic64 variable
9057 + * @ptr: pointer to type atomic64_unchecked_t
9058 + *
9059 + * Atomically increments @ptr by 1.
9060 + */
9061 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
9062 +
9063 +/**
9064 * atomic64_dec - decrement atomic64 variable
9065 * @ptr: pointer to type atomic64_t
9066 *
9067 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
9068 extern void atomic64_dec(atomic64_t *ptr);
9069
9070 /**
9071 + * atomic64_dec_unchecked - decrement atomic64 variable
9072 + * @ptr: pointer to type atomic64_unchecked_t
9073 + *
9074 + * Atomically decrements @ptr by 1.
9075 + */
9076 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
9077 +
9078 +/**
9079 * atomic64_dec_and_test - decrement and test
9080 * @ptr: pointer to type atomic64_t
9081 *
9082 diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
9083 index d605dc2..fafd7bd 100644
9084 --- a/arch/x86/include/asm/atomic_64.h
9085 +++ b/arch/x86/include/asm/atomic_64.h
9086 @@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
9087 }
9088
9089 /**
9090 + * atomic_read_unchecked - read atomic variable
9091 + * @v: pointer of type atomic_unchecked_t
9092 + *
9093 + * Atomically reads the value of @v.
9094 + */
9095 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9096 +{
9097 + return v->counter;
9098 +}
9099 +
9100 +/**
9101 * atomic_set - set atomic variable
9102 * @v: pointer of type atomic_t
9103 * @i: required value
9104 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
9105 }
9106
9107 /**
9108 + * atomic_set_unchecked - set atomic variable
9109 + * @v: pointer of type atomic_unchecked_t
9110 + * @i: required value
9111 + *
9112 + * Atomically sets the value of @v to @i.
9113 + */
9114 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9115 +{
9116 + v->counter = i;
9117 +}
9118 +
9119 +/**
9120 * atomic_add - add integer to atomic variable
9121 * @i: integer value to add
9122 * @v: pointer of type atomic_t
9123 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
9124 */
9125 static inline void atomic_add(int i, atomic_t *v)
9126 {
9127 - asm volatile(LOCK_PREFIX "addl %1,%0"
9128 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9129 +
9130 +#ifdef CONFIG_PAX_REFCOUNT
9131 + "jno 0f\n"
9132 + LOCK_PREFIX "subl %1,%0\n"
9133 + "int $4\n0:\n"
9134 + _ASM_EXTABLE(0b, 0b)
9135 +#endif
9136 +
9137 + : "=m" (v->counter)
9138 + : "ir" (i), "m" (v->counter));
9139 +}
9140 +
9141 +/**
9142 + * atomic_add_unchecked - add integer to atomic variable
9143 + * @i: integer value to add
9144 + * @v: pointer of type atomic_unchecked_t
9145 + *
9146 + * Atomically adds @i to @v.
9147 + */
9148 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9149 +{
9150 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9151 : "=m" (v->counter)
9152 : "ir" (i), "m" (v->counter));
9153 }
9154 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
9155 */
9156 static inline void atomic_sub(int i, atomic_t *v)
9157 {
9158 - asm volatile(LOCK_PREFIX "subl %1,%0"
9159 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9160 +
9161 +#ifdef CONFIG_PAX_REFCOUNT
9162 + "jno 0f\n"
9163 + LOCK_PREFIX "addl %1,%0\n"
9164 + "int $4\n0:\n"
9165 + _ASM_EXTABLE(0b, 0b)
9166 +#endif
9167 +
9168 + : "=m" (v->counter)
9169 + : "ir" (i), "m" (v->counter));
9170 +}
9171 +
9172 +/**
9173 + * atomic_sub_unchecked - subtract the atomic variable
9174 + * @i: integer value to subtract
9175 + * @v: pointer of type atomic_unchecked_t
9176 + *
9177 + * Atomically subtracts @i from @v.
9178 + */
9179 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9180 +{
9181 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9182 : "=m" (v->counter)
9183 : "ir" (i), "m" (v->counter));
9184 }
9185 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9186 {
9187 unsigned char c;
9188
9189 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9190 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
9191 +
9192 +#ifdef CONFIG_PAX_REFCOUNT
9193 + "jno 0f\n"
9194 + LOCK_PREFIX "addl %2,%0\n"
9195 + "int $4\n0:\n"
9196 + _ASM_EXTABLE(0b, 0b)
9197 +#endif
9198 +
9199 + "sete %1\n"
9200 : "=m" (v->counter), "=qm" (c)
9201 : "ir" (i), "m" (v->counter) : "memory");
9202 return c;
9203 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9204 */
9205 static inline void atomic_inc(atomic_t *v)
9206 {
9207 - asm volatile(LOCK_PREFIX "incl %0"
9208 + asm volatile(LOCK_PREFIX "incl %0\n"
9209 +
9210 +#ifdef CONFIG_PAX_REFCOUNT
9211 + "jno 0f\n"
9212 + LOCK_PREFIX "decl %0\n"
9213 + "int $4\n0:\n"
9214 + _ASM_EXTABLE(0b, 0b)
9215 +#endif
9216 +
9217 + : "=m" (v->counter)
9218 + : "m" (v->counter));
9219 +}
9220 +
9221 +/**
9222 + * atomic_inc_unchecked - increment atomic variable
9223 + * @v: pointer of type atomic_unchecked_t
9224 + *
9225 + * Atomically increments @v by 1.
9226 + */
9227 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9228 +{
9229 + asm volatile(LOCK_PREFIX "incl %0\n"
9230 : "=m" (v->counter)
9231 : "m" (v->counter));
9232 }
9233 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
9234 */
9235 static inline void atomic_dec(atomic_t *v)
9236 {
9237 - asm volatile(LOCK_PREFIX "decl %0"
9238 + asm volatile(LOCK_PREFIX "decl %0\n"
9239 +
9240 +#ifdef CONFIG_PAX_REFCOUNT
9241 + "jno 0f\n"
9242 + LOCK_PREFIX "incl %0\n"
9243 + "int $4\n0:\n"
9244 + _ASM_EXTABLE(0b, 0b)
9245 +#endif
9246 +
9247 + : "=m" (v->counter)
9248 + : "m" (v->counter));
9249 +}
9250 +
9251 +/**
9252 + * atomic_dec_unchecked - decrement atomic variable
9253 + * @v: pointer of type atomic_unchecked_t
9254 + *
9255 + * Atomically decrements @v by 1.
9256 + */
9257 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9258 +{
9259 + asm volatile(LOCK_PREFIX "decl %0\n"
9260 : "=m" (v->counter)
9261 : "m" (v->counter));
9262 }
9263 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9264 {
9265 unsigned char c;
9266
9267 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
9268 + asm volatile(LOCK_PREFIX "decl %0\n"
9269 +
9270 +#ifdef CONFIG_PAX_REFCOUNT
9271 + "jno 0f\n"
9272 + LOCK_PREFIX "incl %0\n"
9273 + "int $4\n0:\n"
9274 + _ASM_EXTABLE(0b, 0b)
9275 +#endif
9276 +
9277 + "sete %1\n"
9278 : "=m" (v->counter), "=qm" (c)
9279 : "m" (v->counter) : "memory");
9280 return c != 0;
9281 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9282 {
9283 unsigned char c;
9284
9285 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
9286 + asm volatile(LOCK_PREFIX "incl %0\n"
9287 +
9288 +#ifdef CONFIG_PAX_REFCOUNT
9289 + "jno 0f\n"
9290 + LOCK_PREFIX "decl %0\n"
9291 + "int $4\n0:\n"
9292 + _ASM_EXTABLE(0b, 0b)
9293 +#endif
9294 +
9295 + "sete %1\n"
9296 + : "=m" (v->counter), "=qm" (c)
9297 + : "m" (v->counter) : "memory");
9298 + return c != 0;
9299 +}
9300 +
9301 +/**
9302 + * atomic_inc_and_test_unchecked - increment and test
9303 + * @v: pointer of type atomic_unchecked_t
9304 + *
9305 + * Atomically increments @v by 1
9306 + * and returns true if the result is zero, or false for all
9307 + * other cases.
9308 + */
9309 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9310 +{
9311 + unsigned char c;
9312 +
9313 + asm volatile(LOCK_PREFIX "incl %0\n"
9314 + "sete %1\n"
9315 : "=m" (v->counter), "=qm" (c)
9316 : "m" (v->counter) : "memory");
9317 return c != 0;
9318 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9319 {
9320 unsigned char c;
9321
9322 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9323 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
9324 +
9325 +#ifdef CONFIG_PAX_REFCOUNT
9326 + "jno 0f\n"
9327 + LOCK_PREFIX "subl %2,%0\n"
9328 + "int $4\n0:\n"
9329 + _ASM_EXTABLE(0b, 0b)
9330 +#endif
9331 +
9332 + "sets %1\n"
9333 : "=m" (v->counter), "=qm" (c)
9334 : "ir" (i), "m" (v->counter) : "memory");
9335 return c;
9336 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9337 static inline int atomic_add_return(int i, atomic_t *v)
9338 {
9339 int __i = i;
9340 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
9341 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9342 +
9343 +#ifdef CONFIG_PAX_REFCOUNT
9344 + "jno 0f\n"
9345 + "movl %0, %1\n"
9346 + "int $4\n0:\n"
9347 + _ASM_EXTABLE(0b, 0b)
9348 +#endif
9349 +
9350 + : "+r" (i), "+m" (v->counter)
9351 + : : "memory");
9352 + return i + __i;
9353 +}
9354 +
9355 +/**
9356 + * atomic_add_return_unchecked - add and return
9357 + * @i: integer value to add
9358 + * @v: pointer of type atomic_unchecked_t
9359 + *
9360 + * Atomically adds @i to @v and returns @i + @v
9361 + */
9362 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9363 +{
9364 + int __i = i;
9365 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9366 : "+r" (i), "+m" (v->counter)
9367 : : "memory");
9368 return i + __i;
9369 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9370 }
9371
9372 #define atomic_inc_return(v) (atomic_add_return(1, v))
9373 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9374 +{
9375 + return atomic_add_return_unchecked(1, v);
9376 +}
9377 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9378
9379 /* The 64-bit atomic type */
9380 @@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
9381 }
9382
9383 /**
9384 + * atomic64_read_unchecked - read atomic64 variable
9385 + * @v: pointer of type atomic64_unchecked_t
9386 + *
9387 + * Atomically reads the value of @v.
9388 + * Doesn't imply a read memory barrier.
9389 + */
9390 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9391 +{
9392 + return v->counter;
9393 +}
9394 +
9395 +/**
9396 * atomic64_set - set atomic64 variable
9397 * @v: pointer to type atomic64_t
9398 * @i: required value
9399 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9400 }
9401
9402 /**
9403 + * atomic64_set_unchecked - set atomic64 variable
9404 + * @v: pointer to type atomic64_unchecked_t
9405 + * @i: required value
9406 + *
9407 + * Atomically sets the value of @v to @i.
9408 + */
9409 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9410 +{
9411 + v->counter = i;
9412 +}
9413 +
9414 +/**
9415 * atomic64_add - add integer to atomic64 variable
9416 * @i: integer value to add
9417 * @v: pointer to type atomic64_t
9418 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9419 */
9420 static inline void atomic64_add(long i, atomic64_t *v)
9421 {
9422 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9423 +
9424 +#ifdef CONFIG_PAX_REFCOUNT
9425 + "jno 0f\n"
9426 + LOCK_PREFIX "subq %1,%0\n"
9427 + "int $4\n0:\n"
9428 + _ASM_EXTABLE(0b, 0b)
9429 +#endif
9430 +
9431 + : "=m" (v->counter)
9432 + : "er" (i), "m" (v->counter));
9433 +}
9434 +
9435 +/**
9436 + * atomic64_add_unchecked - add integer to atomic64 variable
9437 + * @i: integer value to add
9438 + * @v: pointer to type atomic64_unchecked_t
9439 + *
9440 + * Atomically adds @i to @v.
9441 + */
9442 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9443 +{
9444 asm volatile(LOCK_PREFIX "addq %1,%0"
9445 : "=m" (v->counter)
9446 : "er" (i), "m" (v->counter));
9447 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
9448 */
9449 static inline void atomic64_sub(long i, atomic64_t *v)
9450 {
9451 - asm volatile(LOCK_PREFIX "subq %1,%0"
9452 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9453 +
9454 +#ifdef CONFIG_PAX_REFCOUNT
9455 + "jno 0f\n"
9456 + LOCK_PREFIX "addq %1,%0\n"
9457 + "int $4\n0:\n"
9458 + _ASM_EXTABLE(0b, 0b)
9459 +#endif
9460 +
9461 : "=m" (v->counter)
9462 : "er" (i), "m" (v->counter));
9463 }
9464 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9465 {
9466 unsigned char c;
9467
9468 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9469 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9470 +
9471 +#ifdef CONFIG_PAX_REFCOUNT
9472 + "jno 0f\n"
9473 + LOCK_PREFIX "addq %2,%0\n"
9474 + "int $4\n0:\n"
9475 + _ASM_EXTABLE(0b, 0b)
9476 +#endif
9477 +
9478 + "sete %1\n"
9479 : "=m" (v->counter), "=qm" (c)
9480 : "er" (i), "m" (v->counter) : "memory");
9481 return c;
9482 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9483 */
9484 static inline void atomic64_inc(atomic64_t *v)
9485 {
9486 + asm volatile(LOCK_PREFIX "incq %0\n"
9487 +
9488 +#ifdef CONFIG_PAX_REFCOUNT
9489 + "jno 0f\n"
9490 + LOCK_PREFIX "decq %0\n"
9491 + "int $4\n0:\n"
9492 + _ASM_EXTABLE(0b, 0b)
9493 +#endif
9494 +
9495 + : "=m" (v->counter)
9496 + : "m" (v->counter));
9497 +}
9498 +
9499 +/**
9500 + * atomic64_inc_unchecked - increment atomic64 variable
9501 + * @v: pointer to type atomic64_unchecked_t
9502 + *
9503 + * Atomically increments @v by 1.
9504 + */
9505 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9506 +{
9507 asm volatile(LOCK_PREFIX "incq %0"
9508 : "=m" (v->counter)
9509 : "m" (v->counter));
9510 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
9511 */
9512 static inline void atomic64_dec(atomic64_t *v)
9513 {
9514 - asm volatile(LOCK_PREFIX "decq %0"
9515 + asm volatile(LOCK_PREFIX "decq %0\n"
9516 +
9517 +#ifdef CONFIG_PAX_REFCOUNT
9518 + "jno 0f\n"
9519 + LOCK_PREFIX "incq %0\n"
9520 + "int $4\n0:\n"
9521 + _ASM_EXTABLE(0b, 0b)
9522 +#endif
9523 +
9524 + : "=m" (v->counter)
9525 + : "m" (v->counter));
9526 +}
9527 +
9528 +/**
9529 + * atomic64_dec_unchecked - decrement atomic64 variable
9530 + * @v: pointer to type atomic64_t
9531 + *
9532 + * Atomically decrements @v by 1.
9533 + */
9534 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9535 +{
9536 + asm volatile(LOCK_PREFIX "decq %0\n"
9537 : "=m" (v->counter)
9538 : "m" (v->counter));
9539 }
9540 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9541 {
9542 unsigned char c;
9543
9544 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9545 + asm volatile(LOCK_PREFIX "decq %0\n"
9546 +
9547 +#ifdef CONFIG_PAX_REFCOUNT
9548 + "jno 0f\n"
9549 + LOCK_PREFIX "incq %0\n"
9550 + "int $4\n0:\n"
9551 + _ASM_EXTABLE(0b, 0b)
9552 +#endif
9553 +
9554 + "sete %1\n"
9555 : "=m" (v->counter), "=qm" (c)
9556 : "m" (v->counter) : "memory");
9557 return c != 0;
9558 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9559 {
9560 unsigned char c;
9561
9562 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9563 + asm volatile(LOCK_PREFIX "incq %0\n"
9564 +
9565 +#ifdef CONFIG_PAX_REFCOUNT
9566 + "jno 0f\n"
9567 + LOCK_PREFIX "decq %0\n"
9568 + "int $4\n0:\n"
9569 + _ASM_EXTABLE(0b, 0b)
9570 +#endif
9571 +
9572 + "sete %1\n"
9573 : "=m" (v->counter), "=qm" (c)
9574 : "m" (v->counter) : "memory");
9575 return c != 0;
9576 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9577 {
9578 unsigned char c;
9579
9580 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9581 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9582 +
9583 +#ifdef CONFIG_PAX_REFCOUNT
9584 + "jno 0f\n"
9585 + LOCK_PREFIX "subq %2,%0\n"
9586 + "int $4\n0:\n"
9587 + _ASM_EXTABLE(0b, 0b)
9588 +#endif
9589 +
9590 + "sets %1\n"
9591 : "=m" (v->counter), "=qm" (c)
9592 : "er" (i), "m" (v->counter) : "memory");
9593 return c;
9594 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9595 static inline long atomic64_add_return(long i, atomic64_t *v)
9596 {
9597 long __i = i;
9598 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
9599 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
9600 +
9601 +#ifdef CONFIG_PAX_REFCOUNT
9602 + "jno 0f\n"
9603 + "movq %0, %1\n"
9604 + "int $4\n0:\n"
9605 + _ASM_EXTABLE(0b, 0b)
9606 +#endif
9607 +
9608 + : "+r" (i), "+m" (v->counter)
9609 + : : "memory");
9610 + return i + __i;
9611 +}
9612 +
9613 +/**
9614 + * atomic64_add_return_unchecked - add and return
9615 + * @i: integer value to add
9616 + * @v: pointer to type atomic64_unchecked_t
9617 + *
9618 + * Atomically adds @i to @v and returns @i + @v
9619 + */
9620 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9621 +{
9622 + long __i = i;
9623 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
9624 : "+r" (i), "+m" (v->counter)
9625 : : "memory");
9626 return i + __i;
9627 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9628 }
9629
9630 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9631 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9632 +{
9633 + return atomic64_add_return_unchecked(1, v);
9634 +}
9635 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9636
9637 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9638 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9639 return cmpxchg(&v->counter, old, new);
9640 }
9641
9642 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9643 +{
9644 + return cmpxchg(&v->counter, old, new);
9645 +}
9646 +
9647 static inline long atomic64_xchg(atomic64_t *v, long new)
9648 {
9649 return xchg(&v->counter, new);
9650 }
9651
9652 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9653 +{
9654 + return xchg(&v->counter, new);
9655 +}
9656 +
9657 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
9658 {
9659 return cmpxchg(&v->counter, old, new);
9660 }
9661
9662 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9663 +{
9664 + return cmpxchg(&v->counter, old, new);
9665 +}
9666 +
9667 static inline long atomic_xchg(atomic_t *v, int new)
9668 {
9669 return xchg(&v->counter, new);
9670 }
9671
9672 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9673 +{
9674 + return xchg(&v->counter, new);
9675 +}
9676 +
9677 /**
9678 * atomic_add_unless - add unless the number is a given value
9679 * @v: pointer of type atomic_t
9680 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
9681 */
9682 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9683 {
9684 - int c, old;
9685 + int c, old, new;
9686 c = atomic_read(v);
9687 for (;;) {
9688 - if (unlikely(c == (u)))
9689 + if (unlikely(c == u))
9690 break;
9691 - old = atomic_cmpxchg((v), c, c + (a));
9692 +
9693 + asm volatile("addl %2,%0\n"
9694 +
9695 +#ifdef CONFIG_PAX_REFCOUNT
9696 + "jno 0f\n"
9697 + "subl %2,%0\n"
9698 + "int $4\n0:\n"
9699 + _ASM_EXTABLE(0b, 0b)
9700 +#endif
9701 +
9702 + : "=r" (new)
9703 + : "0" (c), "ir" (a));
9704 +
9705 + old = atomic_cmpxchg(v, c, new);
9706 if (likely(old == c))
9707 break;
9708 c = old;
9709 }
9710 - return c != (u);
9711 + return c != u;
9712 }
9713
9714 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9715 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
9716 */
9717 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9718 {
9719 - long c, old;
9720 + long c, old, new;
9721 c = atomic64_read(v);
9722 for (;;) {
9723 - if (unlikely(c == (u)))
9724 + if (unlikely(c == u))
9725 break;
9726 - old = atomic64_cmpxchg((v), c, c + (a));
9727 +
9728 + asm volatile("addq %2,%0\n"
9729 +
9730 +#ifdef CONFIG_PAX_REFCOUNT
9731 + "jno 0f\n"
9732 + "subq %2,%0\n"
9733 + "int $4\n0:\n"
9734 + _ASM_EXTABLE(0b, 0b)
9735 +#endif
9736 +
9737 + : "=r" (new)
9738 + : "0" (c), "er" (a));
9739 +
9740 + old = atomic64_cmpxchg(v, c, new);
9741 if (likely(old == c))
9742 break;
9743 c = old;
9744 }
9745 - return c != (u);
9746 + return c != u;
9747 }
9748
9749 /**
9750 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9751 index 02b47a6..d5c4b15 100644
9752 --- a/arch/x86/include/asm/bitops.h
9753 +++ b/arch/x86/include/asm/bitops.h
9754 @@ -38,7 +38,7 @@
9755 * a mask operation on a byte.
9756 */
9757 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9758 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9759 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9760 #define CONST_MASK(nr) (1 << ((nr) & 7))
9761
9762 /**
9763 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9764 index 7a10659..8bbf355 100644
9765 --- a/arch/x86/include/asm/boot.h
9766 +++ b/arch/x86/include/asm/boot.h
9767 @@ -11,10 +11,15 @@
9768 #include <asm/pgtable_types.h>
9769
9770 /* Physical address where kernel should be loaded. */
9771 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9772 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9773 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9774 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9775
9776 +#ifndef __ASSEMBLY__
9777 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
9778 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9779 +#endif
9780 +
9781 /* Minimum kernel alignment, as a power of two */
9782 #ifdef CONFIG_X86_64
9783 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9784 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9785 index 549860d..7d45f68 100644
9786 --- a/arch/x86/include/asm/cache.h
9787 +++ b/arch/x86/include/asm/cache.h
9788 @@ -5,9 +5,10 @@
9789
9790 /* L1 cache line size */
9791 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9792 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9793 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9794
9795 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
9796 +#define __read_only __attribute__((__section__(".data.read_only")))
9797
9798 #ifdef CONFIG_X86_VSMP
9799 /* vSMP Internode cacheline shift */
9800 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9801 index b54f6af..5b376a6 100644
9802 --- a/arch/x86/include/asm/cacheflush.h
9803 +++ b/arch/x86/include/asm/cacheflush.h
9804 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
9805 static inline unsigned long get_page_memtype(struct page *pg)
9806 {
9807 if (!PageUncached(pg) && !PageWC(pg))
9808 - return -1;
9809 + return ~0UL;
9810 else if (!PageUncached(pg) && PageWC(pg))
9811 return _PAGE_CACHE_WC;
9812 else if (PageUncached(pg) && !PageWC(pg))
9813 @@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
9814 SetPageWC(pg);
9815 break;
9816 default:
9817 - case -1:
9818 + case ~0UL:
9819 ClearPageUncached(pg);
9820 ClearPageWC(pg);
9821 break;
9822 diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
9823 index 0e63c9a..ab8d972 100644
9824 --- a/arch/x86/include/asm/calling.h
9825 +++ b/arch/x86/include/asm/calling.h
9826 @@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
9827 * for assembly code:
9828 */
9829
9830 -#define R15 0
9831 -#define R14 8
9832 -#define R13 16
9833 -#define R12 24
9834 -#define RBP 32
9835 -#define RBX 40
9836 +#define R15 (0)
9837 +#define R14 (8)
9838 +#define R13 (16)
9839 +#define R12 (24)
9840 +#define RBP (32)
9841 +#define RBX (40)
9842
9843 /* arguments: interrupts/non tracing syscalls only save up to here: */
9844 -#define R11 48
9845 -#define R10 56
9846 -#define R9 64
9847 -#define R8 72
9848 -#define RAX 80
9849 -#define RCX 88
9850 -#define RDX 96
9851 -#define RSI 104
9852 -#define RDI 112
9853 -#define ORIG_RAX 120 /* + error_code */
9854 +#define R11 (48)
9855 +#define R10 (56)
9856 +#define R9 (64)
9857 +#define R8 (72)
9858 +#define RAX (80)
9859 +#define RCX (88)
9860 +#define RDX (96)
9861 +#define RSI (104)
9862 +#define RDI (112)
9863 +#define ORIG_RAX (120) /* + error_code */
9864 /* end of arguments */
9865
9866 /* cpu exception frame or undefined in case of fast syscall: */
9867 -#define RIP 128
9868 -#define CS 136
9869 -#define EFLAGS 144
9870 -#define RSP 152
9871 -#define SS 160
9872 +#define RIP (128)
9873 +#define CS (136)
9874 +#define EFLAGS (144)
9875 +#define RSP (152)
9876 +#define SS (160)
9877
9878 #define ARGOFFSET R11
9879 #define SWFRAME ORIG_RAX
9880 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9881 index 46fc474..b02b0f9 100644
9882 --- a/arch/x86/include/asm/checksum_32.h
9883 +++ b/arch/x86/include/asm/checksum_32.h
9884 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9885 int len, __wsum sum,
9886 int *src_err_ptr, int *dst_err_ptr);
9887
9888 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9889 + int len, __wsum sum,
9890 + int *src_err_ptr, int *dst_err_ptr);
9891 +
9892 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9893 + int len, __wsum sum,
9894 + int *src_err_ptr, int *dst_err_ptr);
9895 +
9896 /*
9897 * Note: when you get a NULL pointer exception here this means someone
9898 * passed in an incorrect kernel address to one of these functions.
9899 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9900 int *err_ptr)
9901 {
9902 might_sleep();
9903 - return csum_partial_copy_generic((__force void *)src, dst,
9904 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
9905 len, sum, err_ptr, NULL);
9906 }
9907
9908 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9909 {
9910 might_sleep();
9911 if (access_ok(VERIFY_WRITE, dst, len))
9912 - return csum_partial_copy_generic(src, (__force void *)dst,
9913 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9914 len, sum, NULL, err_ptr);
9915
9916 if (len)
9917 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9918 index 617bd56..7b047a1 100644
9919 --- a/arch/x86/include/asm/desc.h
9920 +++ b/arch/x86/include/asm/desc.h
9921 @@ -4,6 +4,7 @@
9922 #include <asm/desc_defs.h>
9923 #include <asm/ldt.h>
9924 #include <asm/mmu.h>
9925 +#include <asm/pgtable.h>
9926 #include <linux/smp.h>
9927
9928 static inline void fill_ldt(struct desc_struct *desc,
9929 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
9930 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
9931 desc->type = (info->read_exec_only ^ 1) << 1;
9932 desc->type |= info->contents << 2;
9933 + desc->type |= info->seg_not_present ^ 1;
9934 desc->s = 1;
9935 desc->dpl = 0x3;
9936 desc->p = info->seg_not_present ^ 1;
9937 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
9938 }
9939
9940 extern struct desc_ptr idt_descr;
9941 -extern gate_desc idt_table[];
9942 -
9943 -struct gdt_page {
9944 - struct desc_struct gdt[GDT_ENTRIES];
9945 -} __attribute__((aligned(PAGE_SIZE)));
9946 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9947 +extern gate_desc idt_table[256];
9948
9949 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9950 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9951 {
9952 - return per_cpu(gdt_page, cpu).gdt;
9953 + return cpu_gdt_table[cpu];
9954 }
9955
9956 #ifdef CONFIG_X86_64
9957 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9958 unsigned long base, unsigned dpl, unsigned flags,
9959 unsigned short seg)
9960 {
9961 - gate->a = (seg << 16) | (base & 0xffff);
9962 - gate->b = (base & 0xffff0000) |
9963 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9964 + gate->gate.offset_low = base;
9965 + gate->gate.seg = seg;
9966 + gate->gate.reserved = 0;
9967 + gate->gate.type = type;
9968 + gate->gate.s = 0;
9969 + gate->gate.dpl = dpl;
9970 + gate->gate.p = 1;
9971 + gate->gate.offset_high = base >> 16;
9972 }
9973
9974 #endif
9975 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9976 static inline void native_write_idt_entry(gate_desc *idt, int entry,
9977 const gate_desc *gate)
9978 {
9979 + pax_open_kernel();
9980 memcpy(&idt[entry], gate, sizeof(*gate));
9981 + pax_close_kernel();
9982 }
9983
9984 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
9985 const void *desc)
9986 {
9987 + pax_open_kernel();
9988 memcpy(&ldt[entry], desc, 8);
9989 + pax_close_kernel();
9990 }
9991
9992 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9993 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9994 size = sizeof(struct desc_struct);
9995 break;
9996 }
9997 +
9998 + pax_open_kernel();
9999 memcpy(&gdt[entry], desc, size);
10000 + pax_close_kernel();
10001 }
10002
10003 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10004 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10005
10006 static inline void native_load_tr_desc(void)
10007 {
10008 + pax_open_kernel();
10009 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10010 + pax_close_kernel();
10011 }
10012
10013 static inline void native_load_gdt(const struct desc_ptr *dtr)
10014 @@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10015 unsigned int i;
10016 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10017
10018 + pax_open_kernel();
10019 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10020 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10021 + pax_close_kernel();
10022 }
10023
10024 #define _LDT_empty(info) \
10025 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10026 desc->limit = (limit >> 16) & 0xf;
10027 }
10028
10029 -static inline void _set_gate(int gate, unsigned type, void *addr,
10030 +static inline void _set_gate(int gate, unsigned type, const void *addr,
10031 unsigned dpl, unsigned ist, unsigned seg)
10032 {
10033 gate_desc s;
10034 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10035 * Pentium F0 0F bugfix can have resulted in the mapped
10036 * IDT being write-protected.
10037 */
10038 -static inline void set_intr_gate(unsigned int n, void *addr)
10039 +static inline void set_intr_gate(unsigned int n, const void *addr)
10040 {
10041 BUG_ON((unsigned)n > 0xFF);
10042 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10043 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10044 /*
10045 * This routine sets up an interrupt gate at directory privilege level 3.
10046 */
10047 -static inline void set_system_intr_gate(unsigned int n, void *addr)
10048 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
10049 {
10050 BUG_ON((unsigned)n > 0xFF);
10051 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10052 }
10053
10054 -static inline void set_system_trap_gate(unsigned int n, void *addr)
10055 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
10056 {
10057 BUG_ON((unsigned)n > 0xFF);
10058 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10059 }
10060
10061 -static inline void set_trap_gate(unsigned int n, void *addr)
10062 +static inline void set_trap_gate(unsigned int n, const void *addr)
10063 {
10064 BUG_ON((unsigned)n > 0xFF);
10065 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10066 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10067 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10068 {
10069 BUG_ON((unsigned)n > 0xFF);
10070 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10071 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10072 }
10073
10074 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10075 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10076 {
10077 BUG_ON((unsigned)n > 0xFF);
10078 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10079 }
10080
10081 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10082 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10083 {
10084 BUG_ON((unsigned)n > 0xFF);
10085 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10086 }
10087
10088 +#ifdef CONFIG_X86_32
10089 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10090 +{
10091 + struct desc_struct d;
10092 +
10093 + if (likely(limit))
10094 + limit = (limit - 1UL) >> PAGE_SHIFT;
10095 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
10096 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10097 +}
10098 +#endif
10099 +
10100 #endif /* _ASM_X86_DESC_H */
10101 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10102 index 9d66848..6b4a691 100644
10103 --- a/arch/x86/include/asm/desc_defs.h
10104 +++ b/arch/x86/include/asm/desc_defs.h
10105 @@ -31,6 +31,12 @@ struct desc_struct {
10106 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10107 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10108 };
10109 + struct {
10110 + u16 offset_low;
10111 + u16 seg;
10112 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10113 + unsigned offset_high: 16;
10114 + } gate;
10115 };
10116 } __attribute__((packed));
10117
10118 diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
10119 index cee34e9..a7c3fa2 100644
10120 --- a/arch/x86/include/asm/device.h
10121 +++ b/arch/x86/include/asm/device.h
10122 @@ -6,7 +6,7 @@ struct dev_archdata {
10123 void *acpi_handle;
10124 #endif
10125 #ifdef CONFIG_X86_64
10126 -struct dma_map_ops *dma_ops;
10127 + const struct dma_map_ops *dma_ops;
10128 #endif
10129 #ifdef CONFIG_DMAR
10130 void *iommu; /* hook for IOMMU specific extension */
10131 diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
10132 index 6a25d5d..786b202 100644
10133 --- a/arch/x86/include/asm/dma-mapping.h
10134 +++ b/arch/x86/include/asm/dma-mapping.h
10135 @@ -25,9 +25,9 @@ extern int iommu_merge;
10136 extern struct device x86_dma_fallback_dev;
10137 extern int panic_on_overflow;
10138
10139 -extern struct dma_map_ops *dma_ops;
10140 +extern const struct dma_map_ops *dma_ops;
10141
10142 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
10143 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
10144 {
10145 #ifdef CONFIG_X86_32
10146 return dma_ops;
10147 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
10148 /* Make sure we keep the same behaviour */
10149 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
10150 {
10151 - struct dma_map_ops *ops = get_dma_ops(dev);
10152 + const struct dma_map_ops *ops = get_dma_ops(dev);
10153 if (ops->mapping_error)
10154 return ops->mapping_error(dev, dma_addr);
10155
10156 @@ -122,7 +122,7 @@ static inline void *
10157 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
10158 gfp_t gfp)
10159 {
10160 - struct dma_map_ops *ops = get_dma_ops(dev);
10161 + const struct dma_map_ops *ops = get_dma_ops(dev);
10162 void *memory;
10163
10164 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
10165 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
10166 static inline void dma_free_coherent(struct device *dev, size_t size,
10167 void *vaddr, dma_addr_t bus)
10168 {
10169 - struct dma_map_ops *ops = get_dma_ops(dev);
10170 + const struct dma_map_ops *ops = get_dma_ops(dev);
10171
10172 WARN_ON(irqs_disabled()); /* for portability */
10173
10174 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10175 index 40b4e61..40d8133 100644
10176 --- a/arch/x86/include/asm/e820.h
10177 +++ b/arch/x86/include/asm/e820.h
10178 @@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
10179 #define ISA_END_ADDRESS 0x100000
10180 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
10181
10182 -#define BIOS_BEGIN 0x000a0000
10183 +#define BIOS_BEGIN 0x000c0000
10184 #define BIOS_END 0x00100000
10185
10186 #ifdef __KERNEL__
10187 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10188 index 8ac9d9a..0a6c96e 100644
10189 --- a/arch/x86/include/asm/elf.h
10190 +++ b/arch/x86/include/asm/elf.h
10191 @@ -257,7 +257,25 @@ extern int force_personality32;
10192 the loader. We need to make sure that it is out of the way of the program
10193 that it will "exec", and that there is sufficient room for the brk. */
10194
10195 +#ifdef CONFIG_PAX_SEGMEXEC
10196 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10197 +#else
10198 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10199 +#endif
10200 +
10201 +#ifdef CONFIG_PAX_ASLR
10202 +#ifdef CONFIG_X86_32
10203 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10204 +
10205 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10206 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10207 +#else
10208 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
10209 +
10210 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10211 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10212 +#endif
10213 +#endif
10214
10215 /* This yields a mask that user programs can use to figure out what
10216 instruction set this CPU supports. This could be done in user space,
10217 @@ -310,9 +328,7 @@ do { \
10218
10219 #define ARCH_DLINFO \
10220 do { \
10221 - if (vdso_enabled) \
10222 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10223 - (unsigned long)current->mm->context.vdso); \
10224 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10225 } while (0)
10226
10227 #define AT_SYSINFO 32
10228 @@ -323,7 +339,7 @@ do { \
10229
10230 #endif /* !CONFIG_X86_32 */
10231
10232 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10233 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10234
10235 #define VDSO_ENTRY \
10236 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10237 @@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
10238 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10239 #define compat_arch_setup_additional_pages syscall32_setup_pages
10240
10241 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10242 -#define arch_randomize_brk arch_randomize_brk
10243 -
10244 #endif /* _ASM_X86_ELF_H */
10245 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10246 index cc70c1c..d96d011 100644
10247 --- a/arch/x86/include/asm/emergency-restart.h
10248 +++ b/arch/x86/include/asm/emergency-restart.h
10249 @@ -15,6 +15,6 @@ enum reboot_type {
10250
10251 extern enum reboot_type reboot_type;
10252
10253 -extern void machine_emergency_restart(void);
10254 +extern void machine_emergency_restart(void) __noreturn;
10255
10256 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10257 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10258 index 1f11ce4..7caabd1 100644
10259 --- a/arch/x86/include/asm/futex.h
10260 +++ b/arch/x86/include/asm/futex.h
10261 @@ -12,16 +12,18 @@
10262 #include <asm/system.h>
10263
10264 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10265 + typecheck(u32 __user *, uaddr); \
10266 asm volatile("1:\t" insn "\n" \
10267 "2:\t.section .fixup,\"ax\"\n" \
10268 "3:\tmov\t%3, %1\n" \
10269 "\tjmp\t2b\n" \
10270 "\t.previous\n" \
10271 _ASM_EXTABLE(1b, 3b) \
10272 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10273 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10274 : "i" (-EFAULT), "0" (oparg), "1" (0))
10275
10276 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10277 + typecheck(u32 __user *, uaddr); \
10278 asm volatile("1:\tmovl %2, %0\n" \
10279 "\tmovl\t%0, %3\n" \
10280 "\t" insn "\n" \
10281 @@ -34,10 +36,10 @@
10282 _ASM_EXTABLE(1b, 4b) \
10283 _ASM_EXTABLE(2b, 4b) \
10284 : "=&a" (oldval), "=&r" (ret), \
10285 - "+m" (*uaddr), "=&r" (tem) \
10286 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10287 : "r" (oparg), "i" (-EFAULT), "1" (0))
10288
10289 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10290 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10291 {
10292 int op = (encoded_op >> 28) & 7;
10293 int cmp = (encoded_op >> 24) & 15;
10294 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10295
10296 switch (op) {
10297 case FUTEX_OP_SET:
10298 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10299 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10300 break;
10301 case FUTEX_OP_ADD:
10302 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10303 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10304 uaddr, oparg);
10305 break;
10306 case FUTEX_OP_OR:
10307 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10308 return ret;
10309 }
10310
10311 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10312 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
10313 int newval)
10314 {
10315
10316 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10317 return -ENOSYS;
10318 #endif
10319
10320 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
10321 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10322 return -EFAULT;
10323
10324 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
10325 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
10326 "2:\t.section .fixup, \"ax\"\n"
10327 "3:\tmov %2, %0\n"
10328 "\tjmp 2b\n"
10329 "\t.previous\n"
10330 _ASM_EXTABLE(1b, 3b)
10331 - : "=a" (oldval), "+m" (*uaddr)
10332 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
10333 : "i" (-EFAULT), "r" (newval), "0" (oldval)
10334 : "memory"
10335 );
10336 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10337 index ba180d9..3bad351 100644
10338 --- a/arch/x86/include/asm/hw_irq.h
10339 +++ b/arch/x86/include/asm/hw_irq.h
10340 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
10341 extern void enable_IO_APIC(void);
10342
10343 /* Statistics */
10344 -extern atomic_t irq_err_count;
10345 -extern atomic_t irq_mis_count;
10346 +extern atomic_unchecked_t irq_err_count;
10347 +extern atomic_unchecked_t irq_mis_count;
10348
10349 /* EISA */
10350 extern void eisa_set_level_irq(unsigned int irq);
10351 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10352 index 0b20bbb..4cb1396 100644
10353 --- a/arch/x86/include/asm/i387.h
10354 +++ b/arch/x86/include/asm/i387.h
10355 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10356 {
10357 int err;
10358
10359 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10360 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10361 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
10362 +#endif
10363 +
10364 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
10365 "2:\n"
10366 ".section .fixup,\"ax\"\n"
10367 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10368 {
10369 int err;
10370
10371 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10372 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10373 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10374 +#endif
10375 +
10376 asm volatile("1: rex64/fxsave (%[fx])\n\t"
10377 "2:\n"
10378 ".section .fixup,\"ax\"\n"
10379 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10380 }
10381
10382 /* We need a safe address that is cheap to find and that is already
10383 - in L1 during context switch. The best choices are unfortunately
10384 - different for UP and SMP */
10385 -#ifdef CONFIG_SMP
10386 -#define safe_address (__per_cpu_offset[0])
10387 -#else
10388 -#define safe_address (kstat_cpu(0).cpustat.user)
10389 -#endif
10390 + in L1 during context switch. */
10391 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
10392
10393 /*
10394 * These must be called with preempt disabled
10395 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
10396 struct thread_info *me = current_thread_info();
10397 preempt_disable();
10398 if (me->status & TS_USEDFPU)
10399 - __save_init_fpu(me->task);
10400 + __save_init_fpu(current);
10401 else
10402 clts();
10403 }
10404 diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
10405 index a299900..15c5410 100644
10406 --- a/arch/x86/include/asm/io_32.h
10407 +++ b/arch/x86/include/asm/io_32.h
10408 @@ -3,6 +3,7 @@
10409
10410 #include <linux/string.h>
10411 #include <linux/compiler.h>
10412 +#include <asm/processor.h>
10413
10414 /*
10415 * This file contains the definitions for the x86 IO instructions
10416 @@ -42,6 +43,17 @@
10417
10418 #ifdef __KERNEL__
10419
10420 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10421 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10422 +{
10423 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10424 +}
10425 +
10426 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10427 +{
10428 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10429 +}
10430 +
10431 #include <asm-generic/iomap.h>
10432
10433 #include <linux/vmalloc.h>
10434 diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
10435 index 2440678..c158b88 100644
10436 --- a/arch/x86/include/asm/io_64.h
10437 +++ b/arch/x86/include/asm/io_64.h
10438 @@ -140,6 +140,17 @@ __OUTS(l)
10439
10440 #include <linux/vmalloc.h>
10441
10442 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10443 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10444 +{
10445 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10446 +}
10447 +
10448 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10449 +{
10450 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10451 +}
10452 +
10453 #include <asm-generic/iomap.h>
10454
10455 void __memcpy_fromio(void *, unsigned long, unsigned);
10456 diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
10457 index fd6d21b..8b13915 100644
10458 --- a/arch/x86/include/asm/iommu.h
10459 +++ b/arch/x86/include/asm/iommu.h
10460 @@ -3,7 +3,7 @@
10461
10462 extern void pci_iommu_shutdown(void);
10463 extern void no_iommu_init(void);
10464 -extern struct dma_map_ops nommu_dma_ops;
10465 +extern const struct dma_map_ops nommu_dma_ops;
10466 extern int force_iommu, no_iommu;
10467 extern int iommu_detected;
10468 extern int iommu_pass_through;
10469 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10470 index 9e2b952..557206e 100644
10471 --- a/arch/x86/include/asm/irqflags.h
10472 +++ b/arch/x86/include/asm/irqflags.h
10473 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
10474 sti; \
10475 sysexit
10476
10477 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10478 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10479 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10480 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10481 +
10482 #else
10483 #define INTERRUPT_RETURN iret
10484 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10485 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10486 index 4fe681d..bb6d40c 100644
10487 --- a/arch/x86/include/asm/kprobes.h
10488 +++ b/arch/x86/include/asm/kprobes.h
10489 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
10490 #define BREAKPOINT_INSTRUCTION 0xcc
10491 #define RELATIVEJUMP_INSTRUCTION 0xe9
10492 #define MAX_INSN_SIZE 16
10493 -#define MAX_STACK_SIZE 64
10494 -#define MIN_STACK_SIZE(ADDR) \
10495 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10496 - THREAD_SIZE - (unsigned long)(ADDR))) \
10497 - ? (MAX_STACK_SIZE) \
10498 - : (((unsigned long)current_thread_info()) + \
10499 - THREAD_SIZE - (unsigned long)(ADDR)))
10500 +#define MAX_STACK_SIZE 64UL
10501 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10502
10503 #define flush_insn_slot(p) do { } while (0)
10504
10505 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10506 index 08bc2ff..2e88d1f 100644
10507 --- a/arch/x86/include/asm/kvm_host.h
10508 +++ b/arch/x86/include/asm/kvm_host.h
10509 @@ -534,9 +534,9 @@ struct kvm_x86_ops {
10510 bool (*gb_page_enable)(void);
10511
10512 const struct trace_print_flags *exit_reasons_str;
10513 -};
10514 +} __do_const;
10515
10516 -extern struct kvm_x86_ops *kvm_x86_ops;
10517 +extern const struct kvm_x86_ops *kvm_x86_ops;
10518
10519 int kvm_mmu_module_init(void);
10520 void kvm_mmu_module_exit(void);
10521 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10522 index 47b9b6f..815aaa1 100644
10523 --- a/arch/x86/include/asm/local.h
10524 +++ b/arch/x86/include/asm/local.h
10525 @@ -18,26 +18,58 @@ typedef struct {
10526
10527 static inline void local_inc(local_t *l)
10528 {
10529 - asm volatile(_ASM_INC "%0"
10530 + asm volatile(_ASM_INC "%0\n"
10531 +
10532 +#ifdef CONFIG_PAX_REFCOUNT
10533 + "jno 0f\n"
10534 + _ASM_DEC "%0\n"
10535 + "int $4\n0:\n"
10536 + _ASM_EXTABLE(0b, 0b)
10537 +#endif
10538 +
10539 : "+m" (l->a.counter));
10540 }
10541
10542 static inline void local_dec(local_t *l)
10543 {
10544 - asm volatile(_ASM_DEC "%0"
10545 + asm volatile(_ASM_DEC "%0\n"
10546 +
10547 +#ifdef CONFIG_PAX_REFCOUNT
10548 + "jno 0f\n"
10549 + _ASM_INC "%0\n"
10550 + "int $4\n0:\n"
10551 + _ASM_EXTABLE(0b, 0b)
10552 +#endif
10553 +
10554 : "+m" (l->a.counter));
10555 }
10556
10557 static inline void local_add(long i, local_t *l)
10558 {
10559 - asm volatile(_ASM_ADD "%1,%0"
10560 + asm volatile(_ASM_ADD "%1,%0\n"
10561 +
10562 +#ifdef CONFIG_PAX_REFCOUNT
10563 + "jno 0f\n"
10564 + _ASM_SUB "%1,%0\n"
10565 + "int $4\n0:\n"
10566 + _ASM_EXTABLE(0b, 0b)
10567 +#endif
10568 +
10569 : "+m" (l->a.counter)
10570 : "ir" (i));
10571 }
10572
10573 static inline void local_sub(long i, local_t *l)
10574 {
10575 - asm volatile(_ASM_SUB "%1,%0"
10576 + asm volatile(_ASM_SUB "%1,%0\n"
10577 +
10578 +#ifdef CONFIG_PAX_REFCOUNT
10579 + "jno 0f\n"
10580 + _ASM_ADD "%1,%0\n"
10581 + "int $4\n0:\n"
10582 + _ASM_EXTABLE(0b, 0b)
10583 +#endif
10584 +
10585 : "+m" (l->a.counter)
10586 : "ir" (i));
10587 }
10588 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10589 {
10590 unsigned char c;
10591
10592 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10593 + asm volatile(_ASM_SUB "%2,%0\n"
10594 +
10595 +#ifdef CONFIG_PAX_REFCOUNT
10596 + "jno 0f\n"
10597 + _ASM_ADD "%2,%0\n"
10598 + "int $4\n0:\n"
10599 + _ASM_EXTABLE(0b, 0b)
10600 +#endif
10601 +
10602 + "sete %1\n"
10603 : "+m" (l->a.counter), "=qm" (c)
10604 : "ir" (i) : "memory");
10605 return c;
10606 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10607 {
10608 unsigned char c;
10609
10610 - asm volatile(_ASM_DEC "%0; sete %1"
10611 + asm volatile(_ASM_DEC "%0\n"
10612 +
10613 +#ifdef CONFIG_PAX_REFCOUNT
10614 + "jno 0f\n"
10615 + _ASM_INC "%0\n"
10616 + "int $4\n0:\n"
10617 + _ASM_EXTABLE(0b, 0b)
10618 +#endif
10619 +
10620 + "sete %1\n"
10621 : "+m" (l->a.counter), "=qm" (c)
10622 : : "memory");
10623 return c != 0;
10624 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10625 {
10626 unsigned char c;
10627
10628 - asm volatile(_ASM_INC "%0; sete %1"
10629 + asm volatile(_ASM_INC "%0\n"
10630 +
10631 +#ifdef CONFIG_PAX_REFCOUNT
10632 + "jno 0f\n"
10633 + _ASM_DEC "%0\n"
10634 + "int $4\n0:\n"
10635 + _ASM_EXTABLE(0b, 0b)
10636 +#endif
10637 +
10638 + "sete %1\n"
10639 : "+m" (l->a.counter), "=qm" (c)
10640 : : "memory");
10641 return c != 0;
10642 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10643 {
10644 unsigned char c;
10645
10646 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10647 + asm volatile(_ASM_ADD "%2,%0\n"
10648 +
10649 +#ifdef CONFIG_PAX_REFCOUNT
10650 + "jno 0f\n"
10651 + _ASM_SUB "%2,%0\n"
10652 + "int $4\n0:\n"
10653 + _ASM_EXTABLE(0b, 0b)
10654 +#endif
10655 +
10656 + "sets %1\n"
10657 : "+m" (l->a.counter), "=qm" (c)
10658 : "ir" (i) : "memory");
10659 return c;
10660 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10661 #endif
10662 /* Modern 486+ processor */
10663 __i = i;
10664 - asm volatile(_ASM_XADD "%0, %1;"
10665 + asm volatile(_ASM_XADD "%0, %1\n"
10666 +
10667 +#ifdef CONFIG_PAX_REFCOUNT
10668 + "jno 0f\n"
10669 + _ASM_MOV "%0,%1\n"
10670 + "int $4\n0:\n"
10671 + _ASM_EXTABLE(0b, 0b)
10672 +#endif
10673 +
10674 : "+r" (i), "+m" (l->a.counter)
10675 : : "memory");
10676 return i + __i;
10677 diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
10678 index ef51b50..514ba37 100644
10679 --- a/arch/x86/include/asm/microcode.h
10680 +++ b/arch/x86/include/asm/microcode.h
10681 @@ -12,13 +12,13 @@ struct device;
10682 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
10683
10684 struct microcode_ops {
10685 - enum ucode_state (*request_microcode_user) (int cpu,
10686 + enum ucode_state (* const request_microcode_user) (int cpu,
10687 const void __user *buf, size_t size);
10688
10689 - enum ucode_state (*request_microcode_fw) (int cpu,
10690 + enum ucode_state (* const request_microcode_fw) (int cpu,
10691 struct device *device);
10692
10693 - void (*microcode_fini_cpu) (int cpu);
10694 + void (* const microcode_fini_cpu) (int cpu);
10695
10696 /*
10697 * The generic 'microcode_core' part guarantees that
10698 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
10699 extern struct ucode_cpu_info ucode_cpu_info[];
10700
10701 #ifdef CONFIG_MICROCODE_INTEL
10702 -extern struct microcode_ops * __init init_intel_microcode(void);
10703 +extern const struct microcode_ops * __init init_intel_microcode(void);
10704 #else
10705 -static inline struct microcode_ops * __init init_intel_microcode(void)
10706 +static inline const struct microcode_ops * __init init_intel_microcode(void)
10707 {
10708 return NULL;
10709 }
10710 #endif /* CONFIG_MICROCODE_INTEL */
10711
10712 #ifdef CONFIG_MICROCODE_AMD
10713 -extern struct microcode_ops * __init init_amd_microcode(void);
10714 +extern const struct microcode_ops * __init init_amd_microcode(void);
10715 #else
10716 -static inline struct microcode_ops * __init init_amd_microcode(void)
10717 +static inline const struct microcode_ops * __init init_amd_microcode(void)
10718 {
10719 return NULL;
10720 }
10721 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10722 index 593e51d..fa69c9a 100644
10723 --- a/arch/x86/include/asm/mman.h
10724 +++ b/arch/x86/include/asm/mman.h
10725 @@ -5,4 +5,14 @@
10726
10727 #include <asm-generic/mman.h>
10728
10729 +#ifdef __KERNEL__
10730 +#ifndef __ASSEMBLY__
10731 +#ifdef CONFIG_X86_32
10732 +#define arch_mmap_check i386_mmap_check
10733 +int i386_mmap_check(unsigned long addr, unsigned long len,
10734 + unsigned long flags);
10735 +#endif
10736 +#endif
10737 +#endif
10738 +
10739 #endif /* _ASM_X86_MMAN_H */
10740 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10741 index 80a1dee..239c67d 100644
10742 --- a/arch/x86/include/asm/mmu.h
10743 +++ b/arch/x86/include/asm/mmu.h
10744 @@ -9,10 +9,23 @@
10745 * we put the segment information here.
10746 */
10747 typedef struct {
10748 - void *ldt;
10749 + struct desc_struct *ldt;
10750 int size;
10751 struct mutex lock;
10752 - void *vdso;
10753 + unsigned long vdso;
10754 +
10755 +#ifdef CONFIG_X86_32
10756 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10757 + unsigned long user_cs_base;
10758 + unsigned long user_cs_limit;
10759 +
10760 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10761 + cpumask_t cpu_user_cs_mask;
10762 +#endif
10763 +
10764 +#endif
10765 +#endif
10766 +
10767 } mm_context_t;
10768
10769 #ifdef CONFIG_SMP
10770 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10771 index 8b5393e..8143173 100644
10772 --- a/arch/x86/include/asm/mmu_context.h
10773 +++ b/arch/x86/include/asm/mmu_context.h
10774 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10775
10776 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10777 {
10778 +
10779 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10780 + unsigned int i;
10781 + pgd_t *pgd;
10782 +
10783 + pax_open_kernel();
10784 + pgd = get_cpu_pgd(smp_processor_id());
10785 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10786 + set_pgd_batched(pgd+i, native_make_pgd(0));
10787 + pax_close_kernel();
10788 +#endif
10789 +
10790 #ifdef CONFIG_SMP
10791 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10792 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10793 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10794 struct task_struct *tsk)
10795 {
10796 unsigned cpu = smp_processor_id();
10797 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
10798 + int tlbstate = TLBSTATE_OK;
10799 +#endif
10800
10801 if (likely(prev != next)) {
10802 #ifdef CONFIG_SMP
10803 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10804 + tlbstate = percpu_read(cpu_tlbstate.state);
10805 +#endif
10806 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10807 percpu_write(cpu_tlbstate.active_mm, next);
10808 #endif
10809 cpumask_set_cpu(cpu, mm_cpumask(next));
10810
10811 /* Re-load page tables */
10812 +#ifdef CONFIG_PAX_PER_CPU_PGD
10813 + pax_open_kernel();
10814 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10815 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10816 + pax_close_kernel();
10817 + load_cr3(get_cpu_pgd(cpu));
10818 +#else
10819 load_cr3(next->pgd);
10820 +#endif
10821
10822 /* stop flush ipis for the previous mm */
10823 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10824 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10825 */
10826 if (unlikely(prev->context.ldt != next->context.ldt))
10827 load_LDT_nolock(&next->context);
10828 - }
10829 +
10830 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10831 + if (!nx_enabled) {
10832 + smp_mb__before_clear_bit();
10833 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10834 + smp_mb__after_clear_bit();
10835 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10836 + }
10837 +#endif
10838 +
10839 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10840 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10841 + prev->context.user_cs_limit != next->context.user_cs_limit))
10842 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10843 #ifdef CONFIG_SMP
10844 + else if (unlikely(tlbstate != TLBSTATE_OK))
10845 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10846 +#endif
10847 +#endif
10848 +
10849 + }
10850 else {
10851 +
10852 +#ifdef CONFIG_PAX_PER_CPU_PGD
10853 + pax_open_kernel();
10854 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10855 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10856 + pax_close_kernel();
10857 + load_cr3(get_cpu_pgd(cpu));
10858 +#endif
10859 +
10860 +#ifdef CONFIG_SMP
10861 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10862 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10863
10864 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10865 * tlb flush IPI delivery. We must reload CR3
10866 * to make sure to use no freed page tables.
10867 */
10868 +
10869 +#ifndef CONFIG_PAX_PER_CPU_PGD
10870 load_cr3(next->pgd);
10871 +#endif
10872 +
10873 load_LDT_nolock(&next->context);
10874 +
10875 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10876 + if (!nx_enabled)
10877 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10878 +#endif
10879 +
10880 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10881 +#ifdef CONFIG_PAX_PAGEEXEC
10882 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
10883 +#endif
10884 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10885 +#endif
10886 +
10887 }
10888 +#endif
10889 }
10890 -#endif
10891 }
10892
10893 #define activate_mm(prev, next) \
10894 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10895 index 3e2ce58..caaf478 100644
10896 --- a/arch/x86/include/asm/module.h
10897 +++ b/arch/x86/include/asm/module.h
10898 @@ -5,6 +5,7 @@
10899
10900 #ifdef CONFIG_X86_64
10901 /* X86_64 does not define MODULE_PROC_FAMILY */
10902 +#define MODULE_PROC_FAMILY ""
10903 #elif defined CONFIG_M386
10904 #define MODULE_PROC_FAMILY "386 "
10905 #elif defined CONFIG_M486
10906 @@ -59,13 +60,26 @@
10907 #error unknown processor family
10908 #endif
10909
10910 -#ifdef CONFIG_X86_32
10911 -# ifdef CONFIG_4KSTACKS
10912 -# define MODULE_STACKSIZE "4KSTACKS "
10913 -# else
10914 -# define MODULE_STACKSIZE ""
10915 -# endif
10916 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
10917 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
10918 +#define MODULE_STACKSIZE "4KSTACKS "
10919 +#else
10920 +#define MODULE_STACKSIZE ""
10921 #endif
10922
10923 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10924 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10925 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10926 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10927 +#else
10928 +#define MODULE_PAX_KERNEXEC ""
10929 +#endif
10930 +
10931 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10932 +#define MODULE_PAX_UDEREF "UDEREF "
10933 +#else
10934 +#define MODULE_PAX_UDEREF ""
10935 +#endif
10936 +
10937 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10938 +
10939 #endif /* _ASM_X86_MODULE_H */
10940 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10941 index 7639dbf..e08a58c 100644
10942 --- a/arch/x86/include/asm/page_64_types.h
10943 +++ b/arch/x86/include/asm/page_64_types.h
10944 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10945
10946 /* duplicated to the one in bootmem.h */
10947 extern unsigned long max_pfn;
10948 -extern unsigned long phys_base;
10949 +extern const unsigned long phys_base;
10950
10951 extern unsigned long __phys_addr(unsigned long);
10952 #define __phys_reloc_hide(x) (x)
10953 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10954 index efb3899..ef30687 100644
10955 --- a/arch/x86/include/asm/paravirt.h
10956 +++ b/arch/x86/include/asm/paravirt.h
10957 @@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10958 val);
10959 }
10960
10961 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10962 +{
10963 + pgdval_t val = native_pgd_val(pgd);
10964 +
10965 + if (sizeof(pgdval_t) > sizeof(long))
10966 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10967 + val, (u64)val >> 32);
10968 + else
10969 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10970 + val);
10971 +}
10972 +
10973 static inline void pgd_clear(pgd_t *pgdp)
10974 {
10975 set_pgd(pgdp, __pgd(0));
10976 @@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10977 pv_mmu_ops.set_fixmap(idx, phys, flags);
10978 }
10979
10980 +#ifdef CONFIG_PAX_KERNEXEC
10981 +static inline unsigned long pax_open_kernel(void)
10982 +{
10983 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10984 +}
10985 +
10986 +static inline unsigned long pax_close_kernel(void)
10987 +{
10988 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10989 +}
10990 +#else
10991 +static inline unsigned long pax_open_kernel(void) { return 0; }
10992 +static inline unsigned long pax_close_kernel(void) { return 0; }
10993 +#endif
10994 +
10995 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10996
10997 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
10998 @@ -945,7 +972,7 @@ extern void default_banner(void);
10999
11000 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11001 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11002 -#define PARA_INDIRECT(addr) *%cs:addr
11003 +#define PARA_INDIRECT(addr) *%ss:addr
11004 #endif
11005
11006 #define INTERRUPT_RETURN \
11007 @@ -1022,6 +1049,21 @@ extern void default_banner(void);
11008 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11009 CLBR_NONE, \
11010 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11011 +
11012 +#define GET_CR0_INTO_RDI \
11013 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11014 + mov %rax,%rdi
11015 +
11016 +#define SET_RDI_INTO_CR0 \
11017 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11018 +
11019 +#define GET_CR3_INTO_RDI \
11020 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11021 + mov %rax,%rdi
11022 +
11023 +#define SET_RDI_INTO_CR3 \
11024 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11025 +
11026 #endif /* CONFIG_X86_32 */
11027
11028 #endif /* __ASSEMBLY__ */
11029 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11030 index 9357473..aeb2de5 100644
11031 --- a/arch/x86/include/asm/paravirt_types.h
11032 +++ b/arch/x86/include/asm/paravirt_types.h
11033 @@ -78,19 +78,19 @@ struct pv_init_ops {
11034 */
11035 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11036 unsigned long addr, unsigned len);
11037 -};
11038 +} __no_const;
11039
11040
11041 struct pv_lazy_ops {
11042 /* Set deferred update mode, used for batching operations. */
11043 void (*enter)(void);
11044 void (*leave)(void);
11045 -};
11046 +} __no_const;
11047
11048 struct pv_time_ops {
11049 unsigned long long (*sched_clock)(void);
11050 unsigned long (*get_tsc_khz)(void);
11051 -};
11052 +} __no_const;
11053
11054 struct pv_cpu_ops {
11055 /* hooks for various privileged instructions */
11056 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
11057
11058 void (*start_context_switch)(struct task_struct *prev);
11059 void (*end_context_switch)(struct task_struct *next);
11060 -};
11061 +} __no_const;
11062
11063 struct pv_irq_ops {
11064 /*
11065 @@ -217,7 +217,7 @@ struct pv_apic_ops {
11066 unsigned long start_eip,
11067 unsigned long start_esp);
11068 #endif
11069 -};
11070 +} __no_const;
11071
11072 struct pv_mmu_ops {
11073 unsigned long (*read_cr2)(void);
11074 @@ -301,6 +301,7 @@ struct pv_mmu_ops {
11075 struct paravirt_callee_save make_pud;
11076
11077 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11078 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11079 #endif /* PAGETABLE_LEVELS == 4 */
11080 #endif /* PAGETABLE_LEVELS >= 3 */
11081
11082 @@ -316,6 +317,12 @@ struct pv_mmu_ops {
11083 an mfn. We can tell which is which from the index. */
11084 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11085 phys_addr_t phys, pgprot_t flags);
11086 +
11087 +#ifdef CONFIG_PAX_KERNEXEC
11088 + unsigned long (*pax_open_kernel)(void);
11089 + unsigned long (*pax_close_kernel)(void);
11090 +#endif
11091 +
11092 };
11093
11094 struct raw_spinlock;
11095 @@ -326,7 +333,7 @@ struct pv_lock_ops {
11096 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
11097 int (*spin_trylock)(struct raw_spinlock *lock);
11098 void (*spin_unlock)(struct raw_spinlock *lock);
11099 -};
11100 +} __no_const;
11101
11102 /* This contains all the paravirt structures: we get a convenient
11103 * number for each function using the offset which we use to indicate
11104 diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
11105 index b399988..3f47c38 100644
11106 --- a/arch/x86/include/asm/pci_x86.h
11107 +++ b/arch/x86/include/asm/pci_x86.h
11108 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
11109 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
11110
11111 struct pci_raw_ops {
11112 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
11113 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
11114 int reg, int len, u32 *val);
11115 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
11116 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
11117 int reg, int len, u32 val);
11118 };
11119
11120 -extern struct pci_raw_ops *raw_pci_ops;
11121 -extern struct pci_raw_ops *raw_pci_ext_ops;
11122 +extern const struct pci_raw_ops *raw_pci_ops;
11123 +extern const struct pci_raw_ops *raw_pci_ext_ops;
11124
11125 -extern struct pci_raw_ops pci_direct_conf1;
11126 +extern const struct pci_raw_ops pci_direct_conf1;
11127 extern bool port_cf9_safe;
11128
11129 /* arch_initcall level */
11130 diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
11131 index b65a36d..50345a4 100644
11132 --- a/arch/x86/include/asm/percpu.h
11133 +++ b/arch/x86/include/asm/percpu.h
11134 @@ -78,6 +78,7 @@ do { \
11135 if (0) { \
11136 T__ tmp__; \
11137 tmp__ = (val); \
11138 + (void)tmp__; \
11139 } \
11140 switch (sizeof(var)) { \
11141 case 1: \
11142 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11143 index 271de94..ef944d6 100644
11144 --- a/arch/x86/include/asm/pgalloc.h
11145 +++ b/arch/x86/include/asm/pgalloc.h
11146 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11147 pmd_t *pmd, pte_t *pte)
11148 {
11149 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11150 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11151 +}
11152 +
11153 +static inline void pmd_populate_user(struct mm_struct *mm,
11154 + pmd_t *pmd, pte_t *pte)
11155 +{
11156 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11157 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11158 }
11159
11160 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11161 index 2334982..70bc412 100644
11162 --- a/arch/x86/include/asm/pgtable-2level.h
11163 +++ b/arch/x86/include/asm/pgtable-2level.h
11164 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11165
11166 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11167 {
11168 + pax_open_kernel();
11169 *pmdp = pmd;
11170 + pax_close_kernel();
11171 }
11172
11173 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11174 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11175 index 33927d2..ccde329 100644
11176 --- a/arch/x86/include/asm/pgtable-3level.h
11177 +++ b/arch/x86/include/asm/pgtable-3level.h
11178 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11179
11180 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11181 {
11182 + pax_open_kernel();
11183 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11184 + pax_close_kernel();
11185 }
11186
11187 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11188 {
11189 + pax_open_kernel();
11190 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11191 + pax_close_kernel();
11192 }
11193
11194 /*
11195 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11196 index af6fd36..867ff74 100644
11197 --- a/arch/x86/include/asm/pgtable.h
11198 +++ b/arch/x86/include/asm/pgtable.h
11199 @@ -39,6 +39,7 @@ extern struct list_head pgd_list;
11200
11201 #ifndef __PAGETABLE_PUD_FOLDED
11202 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11203 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11204 #define pgd_clear(pgd) native_pgd_clear(pgd)
11205 #endif
11206
11207 @@ -74,12 +75,51 @@ extern struct list_head pgd_list;
11208
11209 #define arch_end_context_switch(prev) do {} while(0)
11210
11211 +#define pax_open_kernel() native_pax_open_kernel()
11212 +#define pax_close_kernel() native_pax_close_kernel()
11213 #endif /* CONFIG_PARAVIRT */
11214
11215 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
11216 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11217 +
11218 +#ifdef CONFIG_PAX_KERNEXEC
11219 +static inline unsigned long native_pax_open_kernel(void)
11220 +{
11221 + unsigned long cr0;
11222 +
11223 + preempt_disable();
11224 + barrier();
11225 + cr0 = read_cr0() ^ X86_CR0_WP;
11226 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
11227 + write_cr0(cr0);
11228 + return cr0 ^ X86_CR0_WP;
11229 +}
11230 +
11231 +static inline unsigned long native_pax_close_kernel(void)
11232 +{
11233 + unsigned long cr0;
11234 +
11235 + cr0 = read_cr0() ^ X86_CR0_WP;
11236 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11237 + write_cr0(cr0);
11238 + barrier();
11239 + preempt_enable_no_resched();
11240 + return cr0 ^ X86_CR0_WP;
11241 +}
11242 +#else
11243 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
11244 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
11245 +#endif
11246 +
11247 /*
11248 * The following only work if pte_present() is true.
11249 * Undefined behaviour if not..
11250 */
11251 +static inline int pte_user(pte_t pte)
11252 +{
11253 + return pte_val(pte) & _PAGE_USER;
11254 +}
11255 +
11256 static inline int pte_dirty(pte_t pte)
11257 {
11258 return pte_flags(pte) & _PAGE_DIRTY;
11259 @@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11260 return pte_clear_flags(pte, _PAGE_RW);
11261 }
11262
11263 +static inline pte_t pte_mkread(pte_t pte)
11264 +{
11265 + return __pte(pte_val(pte) | _PAGE_USER);
11266 +}
11267 +
11268 static inline pte_t pte_mkexec(pte_t pte)
11269 {
11270 - return pte_clear_flags(pte, _PAGE_NX);
11271 +#ifdef CONFIG_X86_PAE
11272 + if (__supported_pte_mask & _PAGE_NX)
11273 + return pte_clear_flags(pte, _PAGE_NX);
11274 + else
11275 +#endif
11276 + return pte_set_flags(pte, _PAGE_USER);
11277 +}
11278 +
11279 +static inline pte_t pte_exprotect(pte_t pte)
11280 +{
11281 +#ifdef CONFIG_X86_PAE
11282 + if (__supported_pte_mask & _PAGE_NX)
11283 + return pte_set_flags(pte, _PAGE_NX);
11284 + else
11285 +#endif
11286 + return pte_clear_flags(pte, _PAGE_USER);
11287 }
11288
11289 static inline pte_t pte_mkdirty(pte_t pte)
11290 @@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11291 #endif
11292
11293 #ifndef __ASSEMBLY__
11294 +
11295 +#ifdef CONFIG_PAX_PER_CPU_PGD
11296 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11297 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11298 +{
11299 + return cpu_pgd[cpu];
11300 +}
11301 +#endif
11302 +
11303 #include <linux/mm_types.h>
11304
11305 static inline int pte_none(pte_t pte)
11306 @@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11307
11308 static inline int pgd_bad(pgd_t pgd)
11309 {
11310 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11311 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11312 }
11313
11314 static inline int pgd_none(pgd_t pgd)
11315 @@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
11316 * pgd_offset() returns a (pgd_t *)
11317 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11318 */
11319 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11320 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11321 +
11322 +#ifdef CONFIG_PAX_PER_CPU_PGD
11323 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11324 +#endif
11325 +
11326 /*
11327 * a shortcut which implies the use of the kernel's pgd, instead
11328 * of a process's
11329 @@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
11330 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11331 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11332
11333 +#ifdef CONFIG_X86_32
11334 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11335 +#else
11336 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11337 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11338 +
11339 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11340 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11341 +#else
11342 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11343 +#endif
11344 +
11345 +#endif
11346 +
11347 #ifndef __ASSEMBLY__
11348
11349 extern int direct_gbpages;
11350 @@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
11351 * dst and src can be on the same page, but the range must not overlap,
11352 * and must not cross a page boundary.
11353 */
11354 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11355 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11356 {
11357 - memcpy(dst, src, count * sizeof(pgd_t));
11358 + pax_open_kernel();
11359 + while (count--)
11360 + *dst++ = *src++;
11361 + pax_close_kernel();
11362 }
11363
11364 +#ifdef CONFIG_PAX_PER_CPU_PGD
11365 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11366 +#endif
11367 +
11368 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11369 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11370 +#else
11371 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11372 +#endif
11373
11374 #include <asm-generic/pgtable.h>
11375 #endif /* __ASSEMBLY__ */
11376 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11377 index 750f1bf..971e839 100644
11378 --- a/arch/x86/include/asm/pgtable_32.h
11379 +++ b/arch/x86/include/asm/pgtable_32.h
11380 @@ -26,9 +26,6 @@
11381 struct mm_struct;
11382 struct vm_area_struct;
11383
11384 -extern pgd_t swapper_pg_dir[1024];
11385 -extern pgd_t trampoline_pg_dir[1024];
11386 -
11387 static inline void pgtable_cache_init(void) { }
11388 static inline void check_pgt_cache(void) { }
11389 void paging_init(void);
11390 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11391 # include <asm/pgtable-2level.h>
11392 #endif
11393
11394 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11395 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
11396 +#ifdef CONFIG_X86_PAE
11397 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11398 +#endif
11399 +
11400 #if defined(CONFIG_HIGHPTE)
11401 #define __KM_PTE \
11402 (in_nmi() ? KM_NMI_PTE : \
11403 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11404 /* Clear a kernel PTE and flush it from the TLB */
11405 #define kpte_clear_flush(ptep, vaddr) \
11406 do { \
11407 + pax_open_kernel(); \
11408 pte_clear(&init_mm, (vaddr), (ptep)); \
11409 + pax_close_kernel(); \
11410 __flush_tlb_one((vaddr)); \
11411 } while (0)
11412
11413 @@ -85,6 +90,9 @@ do { \
11414
11415 #endif /* !__ASSEMBLY__ */
11416
11417 +#define HAVE_ARCH_UNMAPPED_AREA
11418 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11419 +
11420 /*
11421 * kern_addr_valid() is (1) for FLATMEM and (0) for
11422 * SPARSEMEM and DISCONTIGMEM
11423 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11424 index 5e67c15..12d5c47 100644
11425 --- a/arch/x86/include/asm/pgtable_32_types.h
11426 +++ b/arch/x86/include/asm/pgtable_32_types.h
11427 @@ -8,7 +8,7 @@
11428 */
11429 #ifdef CONFIG_X86_PAE
11430 # include <asm/pgtable-3level_types.h>
11431 -# define PMD_SIZE (1UL << PMD_SHIFT)
11432 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11433 # define PMD_MASK (~(PMD_SIZE - 1))
11434 #else
11435 # include <asm/pgtable-2level_types.h>
11436 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11437 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11438 #endif
11439
11440 +#ifdef CONFIG_PAX_KERNEXEC
11441 +#ifndef __ASSEMBLY__
11442 +extern unsigned char MODULES_EXEC_VADDR[];
11443 +extern unsigned char MODULES_EXEC_END[];
11444 +#endif
11445 +#include <asm/boot.h>
11446 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11447 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11448 +#else
11449 +#define ktla_ktva(addr) (addr)
11450 +#define ktva_ktla(addr) (addr)
11451 +#endif
11452 +
11453 #define MODULES_VADDR VMALLOC_START
11454 #define MODULES_END VMALLOC_END
11455 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11456 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11457 index c57a301..6b414ff 100644
11458 --- a/arch/x86/include/asm/pgtable_64.h
11459 +++ b/arch/x86/include/asm/pgtable_64.h
11460 @@ -16,10 +16,14 @@
11461
11462 extern pud_t level3_kernel_pgt[512];
11463 extern pud_t level3_ident_pgt[512];
11464 +extern pud_t level3_vmalloc_start_pgt[512];
11465 +extern pud_t level3_vmalloc_end_pgt[512];
11466 +extern pud_t level3_vmemmap_pgt[512];
11467 +extern pud_t level2_vmemmap_pgt[512];
11468 extern pmd_t level2_kernel_pgt[512];
11469 extern pmd_t level2_fixmap_pgt[512];
11470 -extern pmd_t level2_ident_pgt[512];
11471 -extern pgd_t init_level4_pgt[];
11472 +extern pmd_t level2_ident_pgt[512*2];
11473 +extern pgd_t init_level4_pgt[512];
11474
11475 #define swapper_pg_dir init_level4_pgt
11476
11477 @@ -74,7 +78,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
11478
11479 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11480 {
11481 + pax_open_kernel();
11482 *pmdp = pmd;
11483 + pax_close_kernel();
11484 }
11485
11486 static inline void native_pmd_clear(pmd_t *pmd)
11487 @@ -94,6 +100,13 @@ static inline void native_pud_clear(pud_t *pud)
11488
11489 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11490 {
11491 + pax_open_kernel();
11492 + *pgdp = pgd;
11493 + pax_close_kernel();
11494 +}
11495 +
11496 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11497 +{
11498 *pgdp = pgd;
11499 }
11500
11501 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11502 index 766ea16..5b96cb3 100644
11503 --- a/arch/x86/include/asm/pgtable_64_types.h
11504 +++ b/arch/x86/include/asm/pgtable_64_types.h
11505 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11506 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11507 #define MODULES_END _AC(0xffffffffff000000, UL)
11508 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11509 +#define MODULES_EXEC_VADDR MODULES_VADDR
11510 +#define MODULES_EXEC_END MODULES_END
11511 +
11512 +#define ktla_ktva(addr) (addr)
11513 +#define ktva_ktla(addr) (addr)
11514
11515 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11516 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11517 index d1f4a76..2f46ba1 100644
11518 --- a/arch/x86/include/asm/pgtable_types.h
11519 +++ b/arch/x86/include/asm/pgtable_types.h
11520 @@ -16,12 +16,11 @@
11521 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11522 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11523 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11524 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11525 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11526 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11527 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11528 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11529 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11530 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11531 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11532 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11533
11534 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11535 @@ -39,7 +38,6 @@
11536 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11537 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11538 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11539 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11540 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11541 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11542 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11543 @@ -55,8 +53,10 @@
11544
11545 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11546 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11547 -#else
11548 +#elif defined(CONFIG_KMEMCHECK)
11549 #define _PAGE_NX (_AT(pteval_t, 0))
11550 +#else
11551 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11552 #endif
11553
11554 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11555 @@ -93,6 +93,9 @@
11556 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11557 _PAGE_ACCESSED)
11558
11559 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11560 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11561 +
11562 #define __PAGE_KERNEL_EXEC \
11563 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11564 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11565 @@ -103,8 +106,8 @@
11566 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11567 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11568 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11569 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11570 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
11571 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11572 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
11573 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11574 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
11575 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
11576 @@ -163,8 +166,8 @@
11577 * bits are combined, this will alow user to access the high address mapped
11578 * VDSO in the presence of CONFIG_COMPAT_VDSO
11579 */
11580 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11581 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11582 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11583 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11584 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11585 #endif
11586
11587 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11588 {
11589 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11590 }
11591 +#endif
11592
11593 +#if PAGETABLE_LEVELS == 3
11594 +#include <asm-generic/pgtable-nopud.h>
11595 +#endif
11596 +
11597 +#if PAGETABLE_LEVELS == 2
11598 +#include <asm-generic/pgtable-nopmd.h>
11599 +#endif
11600 +
11601 +#ifndef __ASSEMBLY__
11602 #if PAGETABLE_LEVELS > 3
11603 typedef struct { pudval_t pud; } pud_t;
11604
11605 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11606 return pud.pud;
11607 }
11608 #else
11609 -#include <asm-generic/pgtable-nopud.h>
11610 -
11611 static inline pudval_t native_pud_val(pud_t pud)
11612 {
11613 return native_pgd_val(pud.pgd);
11614 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11615 return pmd.pmd;
11616 }
11617 #else
11618 -#include <asm-generic/pgtable-nopmd.h>
11619 -
11620 static inline pmdval_t native_pmd_val(pmd_t pmd)
11621 {
11622 return native_pgd_val(pmd.pud.pgd);
11623 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
11624
11625 extern pteval_t __supported_pte_mask;
11626 extern void set_nx(void);
11627 +
11628 +#ifdef CONFIG_X86_32
11629 +#ifdef CONFIG_X86_PAE
11630 extern int nx_enabled;
11631 +#else
11632 +#define nx_enabled (0)
11633 +#endif
11634 +#else
11635 +#define nx_enabled (1)
11636 +#endif
11637
11638 #define pgprot_writecombine pgprot_writecombine
11639 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11640 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11641 index fa04dea..5f823fc 100644
11642 --- a/arch/x86/include/asm/processor.h
11643 +++ b/arch/x86/include/asm/processor.h
11644 @@ -272,7 +272,7 @@ struct tss_struct {
11645
11646 } ____cacheline_aligned;
11647
11648 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11649 +extern struct tss_struct init_tss[NR_CPUS];
11650
11651 /*
11652 * Save the original ist values for checking stack pointers during debugging
11653 @@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
11654 */
11655 #define TASK_SIZE PAGE_OFFSET
11656 #define TASK_SIZE_MAX TASK_SIZE
11657 +
11658 +#ifdef CONFIG_PAX_SEGMEXEC
11659 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11660 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11661 +#else
11662 #define STACK_TOP TASK_SIZE
11663 -#define STACK_TOP_MAX STACK_TOP
11664 +#endif
11665 +
11666 +#define STACK_TOP_MAX TASK_SIZE
11667
11668 #define INIT_THREAD { \
11669 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11670 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11671 .vm86_info = NULL, \
11672 .sysenter_cs = __KERNEL_CS, \
11673 .io_bitmap_ptr = NULL, \
11674 @@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
11675 */
11676 #define INIT_TSS { \
11677 .x86_tss = { \
11678 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11679 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11680 .ss0 = __KERNEL_DS, \
11681 .ss1 = __KERNEL_CS, \
11682 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11683 @@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
11684 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11685
11686 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11687 -#define KSTK_TOP(info) \
11688 -({ \
11689 - unsigned long *__ptr = (unsigned long *)(info); \
11690 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11691 -})
11692 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11693
11694 /*
11695 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11696 @@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11697 #define task_pt_regs(task) \
11698 ({ \
11699 struct pt_regs *__regs__; \
11700 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11701 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11702 __regs__ - 1; \
11703 })
11704
11705 @@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11706 /*
11707 * User space process size. 47bits minus one guard page.
11708 */
11709 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11710 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11711
11712 /* This decides where the kernel will search for a free chunk of vm
11713 * space during mmap's.
11714 */
11715 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11716 - 0xc0000000 : 0xFFFFe000)
11717 + 0xc0000000 : 0xFFFFf000)
11718
11719 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11720 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11721 @@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11722 #define STACK_TOP_MAX TASK_SIZE_MAX
11723
11724 #define INIT_THREAD { \
11725 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11726 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11727 }
11728
11729 #define INIT_TSS { \
11730 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11731 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11732 }
11733
11734 /*
11735 @@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11736 */
11737 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11738
11739 +#ifdef CONFIG_PAX_SEGMEXEC
11740 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11741 +#endif
11742 +
11743 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11744
11745 /* Get/set a process' ability to use the timestamp counter instruction */
11746 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11747 index 0f0d908..f2e3da2 100644
11748 --- a/arch/x86/include/asm/ptrace.h
11749 +++ b/arch/x86/include/asm/ptrace.h
11750 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11751 }
11752
11753 /*
11754 - * user_mode_vm(regs) determines whether a register set came from user mode.
11755 + * user_mode(regs) determines whether a register set came from user mode.
11756 * This is true if V8086 mode was enabled OR if the register set was from
11757 * protected mode with RPL-3 CS value. This tricky test checks that with
11758 * one comparison. Many places in the kernel can bypass this full check
11759 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11760 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11761 + * be used.
11762 */
11763 -static inline int user_mode(struct pt_regs *regs)
11764 +static inline int user_mode_novm(struct pt_regs *regs)
11765 {
11766 #ifdef CONFIG_X86_32
11767 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11768 #else
11769 - return !!(regs->cs & 3);
11770 + return !!(regs->cs & SEGMENT_RPL_MASK);
11771 #endif
11772 }
11773
11774 -static inline int user_mode_vm(struct pt_regs *regs)
11775 +static inline int user_mode(struct pt_regs *regs)
11776 {
11777 #ifdef CONFIG_X86_32
11778 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11779 USER_RPL;
11780 #else
11781 - return user_mode(regs);
11782 + return user_mode_novm(regs);
11783 #endif
11784 }
11785
11786 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11787 index 562d4fd..6e39df1 100644
11788 --- a/arch/x86/include/asm/reboot.h
11789 +++ b/arch/x86/include/asm/reboot.h
11790 @@ -6,19 +6,19 @@
11791 struct pt_regs;
11792
11793 struct machine_ops {
11794 - void (*restart)(char *cmd);
11795 - void (*halt)(void);
11796 - void (*power_off)(void);
11797 + void (* __noreturn restart)(char *cmd);
11798 + void (* __noreturn halt)(void);
11799 + void (* __noreturn power_off)(void);
11800 void (*shutdown)(void);
11801 void (*crash_shutdown)(struct pt_regs *);
11802 - void (*emergency_restart)(void);
11803 -};
11804 + void (* __noreturn emergency_restart)(void);
11805 +} __no_const;
11806
11807 extern struct machine_ops machine_ops;
11808
11809 void native_machine_crash_shutdown(struct pt_regs *regs);
11810 void native_machine_shutdown(void);
11811 -void machine_real_restart(const unsigned char *code, int length);
11812 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
11813
11814 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
11815 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
11816 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11817 index 606ede1..dbfff37 100644
11818 --- a/arch/x86/include/asm/rwsem.h
11819 +++ b/arch/x86/include/asm/rwsem.h
11820 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11821 {
11822 asm volatile("# beginning down_read\n\t"
11823 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11824 +
11825 +#ifdef CONFIG_PAX_REFCOUNT
11826 + "jno 0f\n"
11827 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
11828 + "int $4\n0:\n"
11829 + _ASM_EXTABLE(0b, 0b)
11830 +#endif
11831 +
11832 /* adds 0x00000001, returns the old value */
11833 " jns 1f\n"
11834 " call call_rwsem_down_read_failed\n"
11835 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11836 "1:\n\t"
11837 " mov %1,%2\n\t"
11838 " add %3,%2\n\t"
11839 +
11840 +#ifdef CONFIG_PAX_REFCOUNT
11841 + "jno 0f\n"
11842 + "sub %3,%2\n"
11843 + "int $4\n0:\n"
11844 + _ASM_EXTABLE(0b, 0b)
11845 +#endif
11846 +
11847 " jle 2f\n\t"
11848 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11849 " jnz 1b\n\t"
11850 @@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11851 tmp = RWSEM_ACTIVE_WRITE_BIAS;
11852 asm volatile("# beginning down_write\n\t"
11853 LOCK_PREFIX " xadd %1,(%2)\n\t"
11854 +
11855 +#ifdef CONFIG_PAX_REFCOUNT
11856 + "jno 0f\n"
11857 + "mov %1,(%2)\n"
11858 + "int $4\n0:\n"
11859 + _ASM_EXTABLE(0b, 0b)
11860 +#endif
11861 +
11862 /* subtract 0x0000ffff, returns the old value */
11863 " test %1,%1\n\t"
11864 /* was the count 0 before? */
11865 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11866 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
11867 asm volatile("# beginning __up_read\n\t"
11868 LOCK_PREFIX " xadd %1,(%2)\n\t"
11869 +
11870 +#ifdef CONFIG_PAX_REFCOUNT
11871 + "jno 0f\n"
11872 + "mov %1,(%2)\n"
11873 + "int $4\n0:\n"
11874 + _ASM_EXTABLE(0b, 0b)
11875 +#endif
11876 +
11877 /* subtracts 1, returns the old value */
11878 " jns 1f\n\t"
11879 " call call_rwsem_wake\n"
11880 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11881 rwsem_count_t tmp;
11882 asm volatile("# beginning __up_write\n\t"
11883 LOCK_PREFIX " xadd %1,(%2)\n\t"
11884 +
11885 +#ifdef CONFIG_PAX_REFCOUNT
11886 + "jno 0f\n"
11887 + "mov %1,(%2)\n"
11888 + "int $4\n0:\n"
11889 + _ASM_EXTABLE(0b, 0b)
11890 +#endif
11891 +
11892 /* tries to transition
11893 0xffff0001 -> 0x00000000 */
11894 " jz 1f\n"
11895 @@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11896 {
11897 asm volatile("# beginning __downgrade_write\n\t"
11898 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11899 +
11900 +#ifdef CONFIG_PAX_REFCOUNT
11901 + "jno 0f\n"
11902 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11903 + "int $4\n0:\n"
11904 + _ASM_EXTABLE(0b, 0b)
11905 +#endif
11906 +
11907 /*
11908 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11909 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11910 @@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11911 static inline void rwsem_atomic_add(rwsem_count_t delta,
11912 struct rw_semaphore *sem)
11913 {
11914 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11915 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11916 +
11917 +#ifdef CONFIG_PAX_REFCOUNT
11918 + "jno 0f\n"
11919 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
11920 + "int $4\n0:\n"
11921 + _ASM_EXTABLE(0b, 0b)
11922 +#endif
11923 +
11924 : "+m" (sem->count)
11925 : "er" (delta));
11926 }
11927 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
11928 {
11929 rwsem_count_t tmp = delta;
11930
11931 - asm volatile(LOCK_PREFIX "xadd %0,%1"
11932 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
11933 +
11934 +#ifdef CONFIG_PAX_REFCOUNT
11935 + "jno 0f\n"
11936 + "mov %0,%1\n"
11937 + "int $4\n0:\n"
11938 + _ASM_EXTABLE(0b, 0b)
11939 +#endif
11940 +
11941 : "+r" (tmp), "+m" (sem->count)
11942 : : "memory");
11943
11944 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11945 index 14e0ed8..7f7dd5e 100644
11946 --- a/arch/x86/include/asm/segment.h
11947 +++ b/arch/x86/include/asm/segment.h
11948 @@ -62,10 +62,15 @@
11949 * 26 - ESPFIX small SS
11950 * 27 - per-cpu [ offset to per-cpu data area ]
11951 * 28 - stack_canary-20 [ for stack protector ]
11952 - * 29 - unused
11953 - * 30 - unused
11954 + * 29 - PCI BIOS CS
11955 + * 30 - PCI BIOS DS
11956 * 31 - TSS for double fault handler
11957 */
11958 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11959 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11960 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11961 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11962 +
11963 #define GDT_ENTRY_TLS_MIN 6
11964 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11965
11966 @@ -77,6 +82,8 @@
11967
11968 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
11969
11970 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11971 +
11972 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
11973
11974 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
11975 @@ -88,7 +95,7 @@
11976 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
11977 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
11978
11979 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11980 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11981 #ifdef CONFIG_SMP
11982 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
11983 #else
11984 @@ -102,6 +109,12 @@
11985 #define __KERNEL_STACK_CANARY 0
11986 #endif
11987
11988 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
11989 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11990 +
11991 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
11992 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11993 +
11994 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11995
11996 /*
11997 @@ -139,7 +152,7 @@
11998 */
11999
12000 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12001 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12002 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12003
12004
12005 #else
12006 @@ -163,6 +176,8 @@
12007 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
12008 #define __USER32_DS __USER_DS
12009
12010 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12011 +
12012 #define GDT_ENTRY_TSS 8 /* needs two entries */
12013 #define GDT_ENTRY_LDT 10 /* needs two entries */
12014 #define GDT_ENTRY_TLS_MIN 12
12015 @@ -183,6 +198,7 @@
12016 #endif
12017
12018 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
12019 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
12020 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
12021 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
12022 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
12023 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12024 index 4c2f63c..5685db2 100644
12025 --- a/arch/x86/include/asm/smp.h
12026 +++ b/arch/x86/include/asm/smp.h
12027 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
12028 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
12029 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12030 DECLARE_PER_CPU(u16, cpu_llc_id);
12031 -DECLARE_PER_CPU(int, cpu_number);
12032 +DECLARE_PER_CPU(unsigned int, cpu_number);
12033
12034 static inline struct cpumask *cpu_sibling_mask(int cpu)
12035 {
12036 @@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
12037 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
12038
12039 /* Static state in head.S used to set up a CPU */
12040 -extern struct {
12041 - void *sp;
12042 - unsigned short ss;
12043 -} stack_start;
12044 +extern unsigned long stack_start; /* Initial stack pointer address */
12045
12046 struct smp_ops {
12047 void (*smp_prepare_boot_cpu)(void);
12048 @@ -60,7 +57,7 @@ struct smp_ops {
12049
12050 void (*send_call_func_ipi)(const struct cpumask *mask);
12051 void (*send_call_func_single_ipi)(int cpu);
12052 -};
12053 +} __no_const;
12054
12055 /* Globals due to paravirt */
12056 extern void set_cpu_sibling_map(int cpu);
12057 @@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12058 extern int safe_smp_processor_id(void);
12059
12060 #elif defined(CONFIG_X86_64_SMP)
12061 -#define raw_smp_processor_id() (percpu_read(cpu_number))
12062 -
12063 -#define stack_smp_processor_id() \
12064 -({ \
12065 - struct thread_info *ti; \
12066 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12067 - ti->cpu; \
12068 -})
12069 +#define raw_smp_processor_id() (percpu_read(cpu_number))
12070 +#define stack_smp_processor_id() raw_smp_processor_id()
12071 #define safe_smp_processor_id() smp_processor_id()
12072
12073 #endif
12074 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12075 index 4e77853..4359783 100644
12076 --- a/arch/x86/include/asm/spinlock.h
12077 +++ b/arch/x86/include/asm/spinlock.h
12078 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
12079 static inline void __raw_read_lock(raw_rwlock_t *rw)
12080 {
12081 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
12082 +
12083 +#ifdef CONFIG_PAX_REFCOUNT
12084 + "jno 0f\n"
12085 + LOCK_PREFIX " addl $1,(%0)\n"
12086 + "int $4\n0:\n"
12087 + _ASM_EXTABLE(0b, 0b)
12088 +#endif
12089 +
12090 "jns 1f\n"
12091 "call __read_lock_failed\n\t"
12092 "1:\n"
12093 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
12094 static inline void __raw_write_lock(raw_rwlock_t *rw)
12095 {
12096 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
12097 +
12098 +#ifdef CONFIG_PAX_REFCOUNT
12099 + "jno 0f\n"
12100 + LOCK_PREFIX " addl %1,(%0)\n"
12101 + "int $4\n0:\n"
12102 + _ASM_EXTABLE(0b, 0b)
12103 +#endif
12104 +
12105 "jz 1f\n"
12106 "call __write_lock_failed\n\t"
12107 "1:\n"
12108 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
12109
12110 static inline void __raw_read_unlock(raw_rwlock_t *rw)
12111 {
12112 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
12113 + asm volatile(LOCK_PREFIX "incl %0\n"
12114 +
12115 +#ifdef CONFIG_PAX_REFCOUNT
12116 + "jno 0f\n"
12117 + LOCK_PREFIX "decl %0\n"
12118 + "int $4\n0:\n"
12119 + _ASM_EXTABLE(0b, 0b)
12120 +#endif
12121 +
12122 + :"+m" (rw->lock) : : "memory");
12123 }
12124
12125 static inline void __raw_write_unlock(raw_rwlock_t *rw)
12126 {
12127 - asm volatile(LOCK_PREFIX "addl %1, %0"
12128 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
12129 +
12130 +#ifdef CONFIG_PAX_REFCOUNT
12131 + "jno 0f\n"
12132 + LOCK_PREFIX "subl %1, %0\n"
12133 + "int $4\n0:\n"
12134 + _ASM_EXTABLE(0b, 0b)
12135 +#endif
12136 +
12137 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
12138 }
12139
12140 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12141 index 1575177..cb23f52 100644
12142 --- a/arch/x86/include/asm/stackprotector.h
12143 +++ b/arch/x86/include/asm/stackprotector.h
12144 @@ -48,7 +48,7 @@
12145 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12146 */
12147 #define GDT_STACK_CANARY_INIT \
12148 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12149 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12150
12151 /*
12152 * Initialize the stackprotector canary value.
12153 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
12154
12155 static inline void load_stack_canary_segment(void)
12156 {
12157 -#ifdef CONFIG_X86_32
12158 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12159 asm volatile ("mov %0, %%gs" : : "r" (0));
12160 #endif
12161 }
12162 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
12163 index e0fbf29..858ef4a 100644
12164 --- a/arch/x86/include/asm/system.h
12165 +++ b/arch/x86/include/asm/system.h
12166 @@ -132,7 +132,7 @@ do { \
12167 "thread_return:\n\t" \
12168 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12169 __switch_canary \
12170 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
12171 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12172 "movq %%rax,%%rdi\n\t" \
12173 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12174 "jnz ret_from_fork\n\t" \
12175 @@ -143,7 +143,7 @@ do { \
12176 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12177 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12178 [_tif_fork] "i" (_TIF_FORK), \
12179 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
12180 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
12181 [current_task] "m" (per_cpu_var(current_task)) \
12182 __switch_canary_iparam \
12183 : "memory", "cc" __EXTRA_CLOBBER)
12184 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
12185 {
12186 unsigned long __limit;
12187 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12188 - return __limit + 1;
12189 + return __limit;
12190 }
12191
12192 static inline void native_clts(void)
12193 @@ -340,12 +340,12 @@ void enable_hlt(void);
12194
12195 void cpu_idle_wait(void);
12196
12197 -extern unsigned long arch_align_stack(unsigned long sp);
12198 +#define arch_align_stack(x) ((x) & ~0xfUL)
12199 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
12200
12201 void default_idle(void);
12202
12203 -void stop_this_cpu(void *dummy);
12204 +void stop_this_cpu(void *dummy) __noreturn;
12205
12206 /*
12207 * Force strict CPU ordering.
12208 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12209 index 19c3ce4..8962535 100644
12210 --- a/arch/x86/include/asm/thread_info.h
12211 +++ b/arch/x86/include/asm/thread_info.h
12212 @@ -10,6 +10,7 @@
12213 #include <linux/compiler.h>
12214 #include <asm/page.h>
12215 #include <asm/types.h>
12216 +#include <asm/percpu.h>
12217
12218 /*
12219 * low level task data that entry.S needs immediate access to
12220 @@ -24,7 +25,6 @@ struct exec_domain;
12221 #include <asm/atomic.h>
12222
12223 struct thread_info {
12224 - struct task_struct *task; /* main task structure */
12225 struct exec_domain *exec_domain; /* execution domain */
12226 __u32 flags; /* low level flags */
12227 __u32 status; /* thread synchronous flags */
12228 @@ -34,18 +34,12 @@ struct thread_info {
12229 mm_segment_t addr_limit;
12230 struct restart_block restart_block;
12231 void __user *sysenter_return;
12232 -#ifdef CONFIG_X86_32
12233 - unsigned long previous_esp; /* ESP of the previous stack in
12234 - case of nested (IRQ) stacks
12235 - */
12236 - __u8 supervisor_stack[0];
12237 -#endif
12238 + unsigned long lowest_stack;
12239 int uaccess_err;
12240 };
12241
12242 -#define INIT_THREAD_INFO(tsk) \
12243 +#define INIT_THREAD_INFO \
12244 { \
12245 - .task = &tsk, \
12246 .exec_domain = &default_exec_domain, \
12247 .flags = 0, \
12248 .cpu = 0, \
12249 @@ -56,7 +50,7 @@ struct thread_info {
12250 }, \
12251 }
12252
12253 -#define init_thread_info (init_thread_union.thread_info)
12254 +#define init_thread_info (init_thread_union.stack)
12255 #define init_stack (init_thread_union.stack)
12256
12257 #else /* !__ASSEMBLY__ */
12258 @@ -163,45 +157,40 @@ struct thread_info {
12259 #define alloc_thread_info(tsk) \
12260 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
12261
12262 -#ifdef CONFIG_X86_32
12263 -
12264 -#define STACK_WARN (THREAD_SIZE/8)
12265 -/*
12266 - * macros/functions for gaining access to the thread information structure
12267 - *
12268 - * preempt_count needs to be 1 initially, until the scheduler is functional.
12269 - */
12270 -#ifndef __ASSEMBLY__
12271 -
12272 -
12273 -/* how to get the current stack pointer from C */
12274 -register unsigned long current_stack_pointer asm("esp") __used;
12275 -
12276 -/* how to get the thread information struct from C */
12277 -static inline struct thread_info *current_thread_info(void)
12278 -{
12279 - return (struct thread_info *)
12280 - (current_stack_pointer & ~(THREAD_SIZE - 1));
12281 -}
12282 -
12283 -#else /* !__ASSEMBLY__ */
12284 -
12285 +#ifdef __ASSEMBLY__
12286 /* how to get the thread information struct from ASM */
12287 #define GET_THREAD_INFO(reg) \
12288 - movl $-THREAD_SIZE, reg; \
12289 - andl %esp, reg
12290 + mov PER_CPU_VAR(current_tinfo), reg
12291
12292 /* use this one if reg already contains %esp */
12293 -#define GET_THREAD_INFO_WITH_ESP(reg) \
12294 - andl $-THREAD_SIZE, reg
12295 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12296 +#else
12297 +/* how to get the thread information struct from C */
12298 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12299 +
12300 +static __always_inline struct thread_info *current_thread_info(void)
12301 +{
12302 + return percpu_read_stable(current_tinfo);
12303 +}
12304 +#endif
12305 +
12306 +#ifdef CONFIG_X86_32
12307 +
12308 +#define STACK_WARN (THREAD_SIZE/8)
12309 +/*
12310 + * macros/functions for gaining access to the thread information structure
12311 + *
12312 + * preempt_count needs to be 1 initially, until the scheduler is functional.
12313 + */
12314 +#ifndef __ASSEMBLY__
12315 +
12316 +/* how to get the current stack pointer from C */
12317 +register unsigned long current_stack_pointer asm("esp") __used;
12318
12319 #endif
12320
12321 #else /* X86_32 */
12322
12323 -#include <asm/percpu.h>
12324 -#define KERNEL_STACK_OFFSET (5*8)
12325 -
12326 /*
12327 * macros/functions for gaining access to the thread information structure
12328 * preempt_count needs to be 1 initially, until the scheduler is functional.
12329 @@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
12330 #ifndef __ASSEMBLY__
12331 DECLARE_PER_CPU(unsigned long, kernel_stack);
12332
12333 -static inline struct thread_info *current_thread_info(void)
12334 -{
12335 - struct thread_info *ti;
12336 - ti = (void *)(percpu_read_stable(kernel_stack) +
12337 - KERNEL_STACK_OFFSET - THREAD_SIZE);
12338 - return ti;
12339 -}
12340 -
12341 -#else /* !__ASSEMBLY__ */
12342 -
12343 -/* how to get the thread information struct from ASM */
12344 -#define GET_THREAD_INFO(reg) \
12345 - movq PER_CPU_VAR(kernel_stack),reg ; \
12346 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12347 -
12348 +/* how to get the current stack pointer from C */
12349 +register unsigned long current_stack_pointer asm("rsp") __used;
12350 #endif
12351
12352 #endif /* !X86_32 */
12353 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
12354 extern void free_thread_info(struct thread_info *ti);
12355 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12356 #define arch_task_cache_init arch_task_cache_init
12357 +
12358 +#define __HAVE_THREAD_FUNCTIONS
12359 +#define task_thread_info(task) (&(task)->tinfo)
12360 +#define task_stack_page(task) ((task)->stack)
12361 +#define setup_thread_stack(p, org) do {} while (0)
12362 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12363 +
12364 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12365 +extern struct task_struct *alloc_task_struct(void);
12366 +extern void free_task_struct(struct task_struct *);
12367 +
12368 #endif
12369 #endif /* _ASM_X86_THREAD_INFO_H */
12370 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12371 index 61c5874..8a046e9 100644
12372 --- a/arch/x86/include/asm/uaccess.h
12373 +++ b/arch/x86/include/asm/uaccess.h
12374 @@ -8,12 +8,15 @@
12375 #include <linux/thread_info.h>
12376 #include <linux/prefetch.h>
12377 #include <linux/string.h>
12378 +#include <linux/sched.h>
12379 #include <asm/asm.h>
12380 #include <asm/page.h>
12381
12382 #define VERIFY_READ 0
12383 #define VERIFY_WRITE 1
12384
12385 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12386 +
12387 /*
12388 * The fs value determines whether argument validity checking should be
12389 * performed or not. If get_fs() == USER_DS, checking is performed, with
12390 @@ -29,7 +32,12 @@
12391
12392 #define get_ds() (KERNEL_DS)
12393 #define get_fs() (current_thread_info()->addr_limit)
12394 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12395 +void __set_fs(mm_segment_t x);
12396 +void set_fs(mm_segment_t x);
12397 +#else
12398 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12399 +#endif
12400
12401 #define segment_eq(a, b) ((a).seg == (b).seg)
12402
12403 @@ -77,7 +85,33 @@
12404 * checks that the pointer is in the user space range - after calling
12405 * this function, memory access functions may still return -EFAULT.
12406 */
12407 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12408 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12409 +#define access_ok(type, addr, size) \
12410 +({ \
12411 + long __size = size; \
12412 + unsigned long __addr = (unsigned long)addr; \
12413 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12414 + unsigned long __end_ao = __addr + __size - 1; \
12415 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12416 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12417 + while(__addr_ao <= __end_ao) { \
12418 + char __c_ao; \
12419 + __addr_ao += PAGE_SIZE; \
12420 + if (__size > PAGE_SIZE) \
12421 + cond_resched(); \
12422 + if (__get_user(__c_ao, (char __user *)__addr)) \
12423 + break; \
12424 + if (type != VERIFY_WRITE) { \
12425 + __addr = __addr_ao; \
12426 + continue; \
12427 + } \
12428 + if (__put_user(__c_ao, (char __user *)__addr)) \
12429 + break; \
12430 + __addr = __addr_ao; \
12431 + } \
12432 + } \
12433 + __ret_ao; \
12434 +})
12435
12436 /*
12437 * The exception table consists of pairs of addresses: the first is the
12438 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
12439 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12440 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12441
12442 -
12443 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12444 +#define __copyuser_seg "gs;"
12445 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12446 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12447 +#else
12448 +#define __copyuser_seg
12449 +#define __COPYUSER_SET_ES
12450 +#define __COPYUSER_RESTORE_ES
12451 +#endif
12452
12453 #ifdef CONFIG_X86_32
12454 #define __put_user_asm_u64(x, addr, err, errret) \
12455 - asm volatile("1: movl %%eax,0(%2)\n" \
12456 - "2: movl %%edx,4(%2)\n" \
12457 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12458 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12459 "3:\n" \
12460 ".section .fixup,\"ax\"\n" \
12461 "4: movl %3,%0\n" \
12462 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
12463 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12464
12465 #define __put_user_asm_ex_u64(x, addr) \
12466 - asm volatile("1: movl %%eax,0(%1)\n" \
12467 - "2: movl %%edx,4(%1)\n" \
12468 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12469 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12470 "3:\n" \
12471 _ASM_EXTABLE(1b, 2b - 1b) \
12472 _ASM_EXTABLE(2b, 3b - 2b) \
12473 @@ -253,7 +295,7 @@ extern void __put_user_8(void);
12474 __typeof__(*(ptr)) __pu_val; \
12475 __chk_user_ptr(ptr); \
12476 might_fault(); \
12477 - __pu_val = x; \
12478 + __pu_val = (x); \
12479 switch (sizeof(*(ptr))) { \
12480 case 1: \
12481 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12482 @@ -374,7 +416,7 @@ do { \
12483 } while (0)
12484
12485 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12486 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12487 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12488 "2:\n" \
12489 ".section .fixup,\"ax\"\n" \
12490 "3: mov %3,%0\n" \
12491 @@ -382,7 +424,7 @@ do { \
12492 " jmp 2b\n" \
12493 ".previous\n" \
12494 _ASM_EXTABLE(1b, 3b) \
12495 - : "=r" (err), ltype(x) \
12496 + : "=r" (err), ltype (x) \
12497 : "m" (__m(addr)), "i" (errret), "0" (err))
12498
12499 #define __get_user_size_ex(x, ptr, size) \
12500 @@ -407,7 +449,7 @@ do { \
12501 } while (0)
12502
12503 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12504 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12505 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12506 "2:\n" \
12507 _ASM_EXTABLE(1b, 2b - 1b) \
12508 : ltype(x) : "m" (__m(addr)))
12509 @@ -424,13 +466,24 @@ do { \
12510 int __gu_err; \
12511 unsigned long __gu_val; \
12512 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12513 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12514 + (x) = (__typeof__(*(ptr)))__gu_val; \
12515 __gu_err; \
12516 })
12517
12518 /* FIXME: this hack is definitely wrong -AK */
12519 struct __large_struct { unsigned long buf[100]; };
12520 -#define __m(x) (*(struct __large_struct __user *)(x))
12521 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12522 +#define ____m(x) \
12523 +({ \
12524 + unsigned long ____x = (unsigned long)(x); \
12525 + if (____x < PAX_USER_SHADOW_BASE) \
12526 + ____x += PAX_USER_SHADOW_BASE; \
12527 + (void __user *)____x; \
12528 +})
12529 +#else
12530 +#define ____m(x) (x)
12531 +#endif
12532 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12533
12534 /*
12535 * Tell gcc we read from memory instead of writing: this is because
12536 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
12537 * aliasing issues.
12538 */
12539 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12540 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12541 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12542 "2:\n" \
12543 ".section .fixup,\"ax\"\n" \
12544 "3: mov %3,%0\n" \
12545 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
12546 ".previous\n" \
12547 _ASM_EXTABLE(1b, 3b) \
12548 : "=r"(err) \
12549 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12550 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12551
12552 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12553 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12554 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12555 "2:\n" \
12556 _ASM_EXTABLE(1b, 2b - 1b) \
12557 : : ltype(x), "m" (__m(addr)))
12558 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
12559 * On error, the variable @x is set to zero.
12560 */
12561
12562 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12563 +#define __get_user(x, ptr) get_user((x), (ptr))
12564 +#else
12565 #define __get_user(x, ptr) \
12566 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12567 +#endif
12568
12569 /**
12570 * __put_user: - Write a simple value into user space, with less checking.
12571 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
12572 * Returns zero on success, or -EFAULT on error.
12573 */
12574
12575 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12576 +#define __put_user(x, ptr) put_user((x), (ptr))
12577 +#else
12578 #define __put_user(x, ptr) \
12579 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12580 +#endif
12581
12582 #define __get_user_unaligned __get_user
12583 #define __put_user_unaligned __put_user
12584 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
12585 #define get_user_ex(x, ptr) do { \
12586 unsigned long __gue_val; \
12587 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12588 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12589 + (x) = (__typeof__(*(ptr)))__gue_val; \
12590 } while (0)
12591
12592 #ifdef CONFIG_X86_WP_WORKS_OK
12593 @@ -567,6 +628,7 @@ extern struct movsl_mask {
12594
12595 #define ARCH_HAS_NOCACHE_UACCESS 1
12596
12597 +#define ARCH_HAS_SORT_EXTABLE
12598 #ifdef CONFIG_X86_32
12599 # include "uaccess_32.h"
12600 #else
12601 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12602 index 632fb44..e30e334 100644
12603 --- a/arch/x86/include/asm/uaccess_32.h
12604 +++ b/arch/x86/include/asm/uaccess_32.h
12605 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12606 static __always_inline unsigned long __must_check
12607 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12608 {
12609 + pax_track_stack();
12610 +
12611 + if ((long)n < 0)
12612 + return n;
12613 +
12614 if (__builtin_constant_p(n)) {
12615 unsigned long ret;
12616
12617 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12618 return ret;
12619 }
12620 }
12621 + if (!__builtin_constant_p(n))
12622 + check_object_size(from, n, true);
12623 return __copy_to_user_ll(to, from, n);
12624 }
12625
12626 @@ -83,12 +90,16 @@ static __always_inline unsigned long __must_check
12627 __copy_to_user(void __user *to, const void *from, unsigned long n)
12628 {
12629 might_fault();
12630 +
12631 return __copy_to_user_inatomic(to, from, n);
12632 }
12633
12634 static __always_inline unsigned long
12635 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12636 {
12637 + if ((long)n < 0)
12638 + return n;
12639 +
12640 /* Avoid zeroing the tail if the copy fails..
12641 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12642 * but as the zeroing behaviour is only significant when n is not
12643 @@ -138,6 +149,12 @@ static __always_inline unsigned long
12644 __copy_from_user(void *to, const void __user *from, unsigned long n)
12645 {
12646 might_fault();
12647 +
12648 + pax_track_stack();
12649 +
12650 + if ((long)n < 0)
12651 + return n;
12652 +
12653 if (__builtin_constant_p(n)) {
12654 unsigned long ret;
12655
12656 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12657 return ret;
12658 }
12659 }
12660 + if (!__builtin_constant_p(n))
12661 + check_object_size(to, n, false);
12662 return __copy_from_user_ll(to, from, n);
12663 }
12664
12665 @@ -160,6 +179,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12666 const void __user *from, unsigned long n)
12667 {
12668 might_fault();
12669 +
12670 + if ((long)n < 0)
12671 + return n;
12672 +
12673 if (__builtin_constant_p(n)) {
12674 unsigned long ret;
12675
12676 @@ -182,14 +205,62 @@ static __always_inline unsigned long
12677 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12678 unsigned long n)
12679 {
12680 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12681 + if ((long)n < 0)
12682 + return n;
12683 +
12684 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12685 +}
12686 +
12687 +/**
12688 + * copy_to_user: - Copy a block of data into user space.
12689 + * @to: Destination address, in user space.
12690 + * @from: Source address, in kernel space.
12691 + * @n: Number of bytes to copy.
12692 + *
12693 + * Context: User context only. This function may sleep.
12694 + *
12695 + * Copy data from kernel space to user space.
12696 + *
12697 + * Returns number of bytes that could not be copied.
12698 + * On success, this will be zero.
12699 + */
12700 +static __always_inline unsigned long __must_check
12701 +copy_to_user(void __user *to, const void *from, unsigned long n)
12702 +{
12703 + if (access_ok(VERIFY_WRITE, to, n))
12704 + n = __copy_to_user(to, from, n);
12705 + return n;
12706 +}
12707 +
12708 +/**
12709 + * copy_from_user: - Copy a block of data from user space.
12710 + * @to: Destination address, in kernel space.
12711 + * @from: Source address, in user space.
12712 + * @n: Number of bytes to copy.
12713 + *
12714 + * Context: User context only. This function may sleep.
12715 + *
12716 + * Copy data from user space to kernel space.
12717 + *
12718 + * Returns number of bytes that could not be copied.
12719 + * On success, this will be zero.
12720 + *
12721 + * If some data could not be copied, this function will pad the copied
12722 + * data to the requested size using zero bytes.
12723 + */
12724 +static __always_inline unsigned long __must_check
12725 +copy_from_user(void *to, const void __user *from, unsigned long n)
12726 +{
12727 + if (access_ok(VERIFY_READ, from, n))
12728 + n = __copy_from_user(to, from, n);
12729 + else if ((long)n > 0) {
12730 + if (!__builtin_constant_p(n))
12731 + check_object_size(to, n, false);
12732 + memset(to, 0, n);
12733 + }
12734 + return n;
12735 }
12736
12737 -unsigned long __must_check copy_to_user(void __user *to,
12738 - const void *from, unsigned long n);
12739 -unsigned long __must_check copy_from_user(void *to,
12740 - const void __user *from,
12741 - unsigned long n);
12742 long __must_check strncpy_from_user(char *dst, const char __user *src,
12743 long count);
12744 long __must_check __strncpy_from_user(char *dst,
12745 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12746 index db24b21..f595ae7 100644
12747 --- a/arch/x86/include/asm/uaccess_64.h
12748 +++ b/arch/x86/include/asm/uaccess_64.h
12749 @@ -9,6 +9,9 @@
12750 #include <linux/prefetch.h>
12751 #include <linux/lockdep.h>
12752 #include <asm/page.h>
12753 +#include <asm/pgtable.h>
12754 +
12755 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
12756
12757 /*
12758 * Copy To/From Userspace
12759 @@ -16,116 +19,205 @@
12760
12761 /* Handles exceptions in both to and from, but doesn't do access_ok */
12762 __must_check unsigned long
12763 -copy_user_generic(void *to, const void *from, unsigned len);
12764 +copy_user_generic(void *to, const void *from, unsigned long len);
12765
12766 __must_check unsigned long
12767 -copy_to_user(void __user *to, const void *from, unsigned len);
12768 -__must_check unsigned long
12769 -copy_from_user(void *to, const void __user *from, unsigned len);
12770 -__must_check unsigned long
12771 -copy_in_user(void __user *to, const void __user *from, unsigned len);
12772 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
12773
12774 static __always_inline __must_check
12775 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
12776 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12777 {
12778 - int ret = 0;
12779 + unsigned ret = 0;
12780
12781 might_fault();
12782 - if (!__builtin_constant_p(size))
12783 - return copy_user_generic(dst, (__force void *)src, size);
12784 +
12785 + if (size > INT_MAX)
12786 + return size;
12787 +
12788 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12789 + if (!__access_ok(VERIFY_READ, src, size))
12790 + return size;
12791 +#endif
12792 +
12793 + if (!__builtin_constant_p(size)) {
12794 + check_object_size(dst, size, false);
12795 +
12796 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12797 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12798 + src += PAX_USER_SHADOW_BASE;
12799 +#endif
12800 +
12801 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12802 + }
12803 switch (size) {
12804 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12805 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12806 ret, "b", "b", "=q", 1);
12807 return ret;
12808 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12809 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12810 ret, "w", "w", "=r", 2);
12811 return ret;
12812 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12813 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12814 ret, "l", "k", "=r", 4);
12815 return ret;
12816 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12817 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12818 ret, "q", "", "=r", 8);
12819 return ret;
12820 case 10:
12821 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12822 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12823 ret, "q", "", "=r", 10);
12824 if (unlikely(ret))
12825 return ret;
12826 __get_user_asm(*(u16 *)(8 + (char *)dst),
12827 - (u16 __user *)(8 + (char __user *)src),
12828 + (const u16 __user *)(8 + (const char __user *)src),
12829 ret, "w", "w", "=r", 2);
12830 return ret;
12831 case 16:
12832 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12833 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12834 ret, "q", "", "=r", 16);
12835 if (unlikely(ret))
12836 return ret;
12837 __get_user_asm(*(u64 *)(8 + (char *)dst),
12838 - (u64 __user *)(8 + (char __user *)src),
12839 + (const u64 __user *)(8 + (const char __user *)src),
12840 ret, "q", "", "=r", 8);
12841 return ret;
12842 default:
12843 - return copy_user_generic(dst, (__force void *)src, size);
12844 +
12845 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12846 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12847 + src += PAX_USER_SHADOW_BASE;
12848 +#endif
12849 +
12850 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12851 }
12852 }
12853
12854 static __always_inline __must_check
12855 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
12856 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12857 {
12858 - int ret = 0;
12859 + unsigned ret = 0;
12860
12861 might_fault();
12862 - if (!__builtin_constant_p(size))
12863 - return copy_user_generic((__force void *)dst, src, size);
12864 +
12865 + pax_track_stack();
12866 +
12867 + if (size > INT_MAX)
12868 + return size;
12869 +
12870 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12871 + if (!__access_ok(VERIFY_WRITE, dst, size))
12872 + return size;
12873 +#endif
12874 +
12875 + if (!__builtin_constant_p(size)) {
12876 + check_object_size(src, size, true);
12877 +
12878 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12879 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12880 + dst += PAX_USER_SHADOW_BASE;
12881 +#endif
12882 +
12883 + return copy_user_generic((__force_kernel void *)dst, src, size);
12884 + }
12885 switch (size) {
12886 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12887 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12888 ret, "b", "b", "iq", 1);
12889 return ret;
12890 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12891 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12892 ret, "w", "w", "ir", 2);
12893 return ret;
12894 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12895 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12896 ret, "l", "k", "ir", 4);
12897 return ret;
12898 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12899 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12900 ret, "q", "", "er", 8);
12901 return ret;
12902 case 10:
12903 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12904 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12905 ret, "q", "", "er", 10);
12906 if (unlikely(ret))
12907 return ret;
12908 asm("":::"memory");
12909 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12910 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12911 ret, "w", "w", "ir", 2);
12912 return ret;
12913 case 16:
12914 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12915 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12916 ret, "q", "", "er", 16);
12917 if (unlikely(ret))
12918 return ret;
12919 asm("":::"memory");
12920 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12921 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12922 ret, "q", "", "er", 8);
12923 return ret;
12924 default:
12925 - return copy_user_generic((__force void *)dst, src, size);
12926 +
12927 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12928 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12929 + dst += PAX_USER_SHADOW_BASE;
12930 +#endif
12931 +
12932 + return copy_user_generic((__force_kernel void *)dst, src, size);
12933 + }
12934 +}
12935 +
12936 +static __always_inline __must_check
12937 +unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
12938 +{
12939 + if (access_ok(VERIFY_WRITE, to, len))
12940 + len = __copy_to_user(to, from, len);
12941 + return len;
12942 +}
12943 +
12944 +static __always_inline __must_check
12945 +unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
12946 +{
12947 + might_fault();
12948 +
12949 + if (access_ok(VERIFY_READ, from, len))
12950 + len = __copy_from_user(to, from, len);
12951 + else if (len < INT_MAX) {
12952 + if (!__builtin_constant_p(len))
12953 + check_object_size(to, len, false);
12954 + memset(to, 0, len);
12955 }
12956 + return len;
12957 }
12958
12959 static __always_inline __must_check
12960 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12961 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12962 {
12963 - int ret = 0;
12964 + unsigned ret = 0;
12965
12966 might_fault();
12967 - if (!__builtin_constant_p(size))
12968 - return copy_user_generic((__force void *)dst,
12969 - (__force void *)src, size);
12970 +
12971 + pax_track_stack();
12972 +
12973 + if (size > INT_MAX)
12974 + return size;
12975 +
12976 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12977 + if (!__access_ok(VERIFY_READ, src, size))
12978 + return size;
12979 + if (!__access_ok(VERIFY_WRITE, dst, size))
12980 + return size;
12981 +#endif
12982 +
12983 + if (!__builtin_constant_p(size)) {
12984 +
12985 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12986 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12987 + src += PAX_USER_SHADOW_BASE;
12988 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12989 + dst += PAX_USER_SHADOW_BASE;
12990 +#endif
12991 +
12992 + return copy_user_generic((__force_kernel void *)dst,
12993 + (__force_kernel const void *)src, size);
12994 + }
12995 switch (size) {
12996 case 1: {
12997 u8 tmp;
12998 - __get_user_asm(tmp, (u8 __user *)src,
12999 + __get_user_asm(tmp, (const u8 __user *)src,
13000 ret, "b", "b", "=q", 1);
13001 if (likely(!ret))
13002 __put_user_asm(tmp, (u8 __user *)dst,
13003 @@ -134,7 +226,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13004 }
13005 case 2: {
13006 u16 tmp;
13007 - __get_user_asm(tmp, (u16 __user *)src,
13008 + __get_user_asm(tmp, (const u16 __user *)src,
13009 ret, "w", "w", "=r", 2);
13010 if (likely(!ret))
13011 __put_user_asm(tmp, (u16 __user *)dst,
13012 @@ -144,7 +236,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13013
13014 case 4: {
13015 u32 tmp;
13016 - __get_user_asm(tmp, (u32 __user *)src,
13017 + __get_user_asm(tmp, (const u32 __user *)src,
13018 ret, "l", "k", "=r", 4);
13019 if (likely(!ret))
13020 __put_user_asm(tmp, (u32 __user *)dst,
13021 @@ -153,7 +245,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13022 }
13023 case 8: {
13024 u64 tmp;
13025 - __get_user_asm(tmp, (u64 __user *)src,
13026 + __get_user_asm(tmp, (const u64 __user *)src,
13027 ret, "q", "", "=r", 8);
13028 if (likely(!ret))
13029 __put_user_asm(tmp, (u64 __user *)dst,
13030 @@ -161,8 +253,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13031 return ret;
13032 }
13033 default:
13034 - return copy_user_generic((__force void *)dst,
13035 - (__force void *)src, size);
13036 +
13037 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13038 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13039 + src += PAX_USER_SHADOW_BASE;
13040 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13041 + dst += PAX_USER_SHADOW_BASE;
13042 +#endif
13043 +
13044 + return copy_user_generic((__force_kernel void *)dst,
13045 + (__force_kernel const void *)src, size);
13046 }
13047 }
13048
13049 @@ -176,33 +276,75 @@ __must_check long strlen_user(const char __user *str);
13050 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
13051 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13052
13053 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
13054 - unsigned size);
13055 +static __must_check __always_inline unsigned long
13056 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13057 +{
13058 + pax_track_stack();
13059 +
13060 + if (size > INT_MAX)
13061 + return size;
13062 +
13063 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13064 + if (!__access_ok(VERIFY_READ, src, size))
13065 + return size;
13066
13067 -static __must_check __always_inline int
13068 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13069 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13070 + src += PAX_USER_SHADOW_BASE;
13071 +#endif
13072 +
13073 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13074 +}
13075 +
13076 +static __must_check __always_inline unsigned long
13077 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13078 {
13079 - return copy_user_generic((__force void *)dst, src, size);
13080 + if (size > INT_MAX)
13081 + return size;
13082 +
13083 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13084 + if (!__access_ok(VERIFY_WRITE, dst, size))
13085 + return size;
13086 +
13087 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13088 + dst += PAX_USER_SHADOW_BASE;
13089 +#endif
13090 +
13091 + return copy_user_generic((__force_kernel void *)dst, src, size);
13092 }
13093
13094 -extern long __copy_user_nocache(void *dst, const void __user *src,
13095 - unsigned size, int zerorest);
13096 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13097 + unsigned long size, int zerorest);
13098
13099 -static inline int
13100 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13101 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13102 {
13103 might_sleep();
13104 +
13105 + if (size > INT_MAX)
13106 + return size;
13107 +
13108 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13109 + if (!__access_ok(VERIFY_READ, src, size))
13110 + return size;
13111 +#endif
13112 +
13113 return __copy_user_nocache(dst, src, size, 1);
13114 }
13115
13116 -static inline int
13117 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13118 - unsigned size)
13119 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13120 + unsigned long size)
13121 {
13122 + if (size > INT_MAX)
13123 + return size;
13124 +
13125 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13126 + if (!__access_ok(VERIFY_READ, src, size))
13127 + return size;
13128 +#endif
13129 +
13130 return __copy_user_nocache(dst, src, size, 0);
13131 }
13132
13133 -unsigned long
13134 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13135 +extern unsigned long
13136 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
13137
13138 #endif /* _ASM_X86_UACCESS_64_H */
13139 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13140 index 9064052..786cfbc 100644
13141 --- a/arch/x86/include/asm/vdso.h
13142 +++ b/arch/x86/include/asm/vdso.h
13143 @@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
13144 #define VDSO32_SYMBOL(base, name) \
13145 ({ \
13146 extern const char VDSO32_##name[]; \
13147 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13148 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13149 })
13150 #endif
13151
13152 diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
13153 index 3d61e20..9507180 100644
13154 --- a/arch/x86/include/asm/vgtod.h
13155 +++ b/arch/x86/include/asm/vgtod.h
13156 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
13157 int sysctl_enabled;
13158 struct timezone sys_tz;
13159 struct { /* extract of a clocksource struct */
13160 + char name[8];
13161 cycle_t (*vread)(void);
13162 cycle_t cycle_last;
13163 cycle_t mask;
13164 diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
13165 index 61e08c0..b0da582 100644
13166 --- a/arch/x86/include/asm/vmi.h
13167 +++ b/arch/x86/include/asm/vmi.h
13168 @@ -191,6 +191,7 @@ struct vrom_header {
13169 u8 reserved[96]; /* Reserved for headers */
13170 char vmi_init[8]; /* VMI_Init jump point */
13171 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
13172 + char rom_data[8048]; /* rest of the option ROM */
13173 } __attribute__((packed));
13174
13175 struct pnp_header {
13176 diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
13177 index c6e0bee..fcb9f74 100644
13178 --- a/arch/x86/include/asm/vmi_time.h
13179 +++ b/arch/x86/include/asm/vmi_time.h
13180 @@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
13181 int (*wallclock_updated)(void);
13182 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
13183 void (*cancel_alarm)(u32 flags);
13184 -} vmi_timer_ops;
13185 +} __no_const vmi_timer_ops;
13186
13187 /* Prototypes */
13188 extern void __init vmi_time_init(void);
13189 diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
13190 index d0983d2..1f7c9e9 100644
13191 --- a/arch/x86/include/asm/vsyscall.h
13192 +++ b/arch/x86/include/asm/vsyscall.h
13193 @@ -15,9 +15,10 @@ enum vsyscall_num {
13194
13195 #ifdef __KERNEL__
13196 #include <linux/seqlock.h>
13197 +#include <linux/getcpu.h>
13198 +#include <linux/time.h>
13199
13200 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
13201 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
13202
13203 /* Definitions for CONFIG_GENERIC_TIME definitions */
13204 #define __section_vsyscall_gtod_data __attribute__ \
13205 @@ -31,7 +32,6 @@ enum vsyscall_num {
13206 #define VGETCPU_LSL 2
13207
13208 extern int __vgetcpu_mode;
13209 -extern volatile unsigned long __jiffies;
13210
13211 /* kernel space (writeable) */
13212 extern int vgetcpu_mode;
13213 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
13214
13215 extern void map_vsyscall(void);
13216
13217 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
13218 +extern time_t vtime(time_t *t);
13219 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
13220 #endif /* __KERNEL__ */
13221
13222 #endif /* _ASM_X86_VSYSCALL_H */
13223 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13224 index 2c756fd..3377e37 100644
13225 --- a/arch/x86/include/asm/x86_init.h
13226 +++ b/arch/x86/include/asm/x86_init.h
13227 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
13228 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13229 void (*find_smp_config)(unsigned int reserve);
13230 void (*get_smp_config)(unsigned int early);
13231 -};
13232 +} __no_const;
13233
13234 /**
13235 * struct x86_init_resources - platform specific resource related ops
13236 @@ -42,7 +42,7 @@ struct x86_init_resources {
13237 void (*probe_roms)(void);
13238 void (*reserve_resources)(void);
13239 char *(*memory_setup)(void);
13240 -};
13241 +} __no_const;
13242
13243 /**
13244 * struct x86_init_irqs - platform specific interrupt setup
13245 @@ -55,7 +55,7 @@ struct x86_init_irqs {
13246 void (*pre_vector_init)(void);
13247 void (*intr_init)(void);
13248 void (*trap_init)(void);
13249 -};
13250 +} __no_const;
13251
13252 /**
13253 * struct x86_init_oem - oem platform specific customizing functions
13254 @@ -65,7 +65,7 @@ struct x86_init_irqs {
13255 struct x86_init_oem {
13256 void (*arch_setup)(void);
13257 void (*banner)(void);
13258 -};
13259 +} __no_const;
13260
13261 /**
13262 * struct x86_init_paging - platform specific paging functions
13263 @@ -75,7 +75,7 @@ struct x86_init_oem {
13264 struct x86_init_paging {
13265 void (*pagetable_setup_start)(pgd_t *base);
13266 void (*pagetable_setup_done)(pgd_t *base);
13267 -};
13268 +} __no_const;
13269
13270 /**
13271 * struct x86_init_timers - platform specific timer setup
13272 @@ -88,7 +88,7 @@ struct x86_init_timers {
13273 void (*setup_percpu_clockev)(void);
13274 void (*tsc_pre_init)(void);
13275 void (*timer_init)(void);
13276 -};
13277 +} __no_const;
13278
13279 /**
13280 * struct x86_init_ops - functions for platform specific setup
13281 @@ -101,7 +101,7 @@ struct x86_init_ops {
13282 struct x86_init_oem oem;
13283 struct x86_init_paging paging;
13284 struct x86_init_timers timers;
13285 -};
13286 +} __no_const;
13287
13288 /**
13289 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13290 @@ -109,7 +109,7 @@ struct x86_init_ops {
13291 */
13292 struct x86_cpuinit_ops {
13293 void (*setup_percpu_clockev)(void);
13294 -};
13295 +} __no_const;
13296
13297 /**
13298 * struct x86_platform_ops - platform specific runtime functions
13299 @@ -121,7 +121,7 @@ struct x86_platform_ops {
13300 unsigned long (*calibrate_tsc)(void);
13301 unsigned long (*get_wallclock)(void);
13302 int (*set_wallclock)(unsigned long nowtime);
13303 -};
13304 +} __no_const;
13305
13306 extern struct x86_init_ops x86_init;
13307 extern struct x86_cpuinit_ops x86_cpuinit;
13308 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13309 index 727acc1..554f3eb 100644
13310 --- a/arch/x86/include/asm/xsave.h
13311 +++ b/arch/x86/include/asm/xsave.h
13312 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
13313 static inline int xsave_user(struct xsave_struct __user *buf)
13314 {
13315 int err;
13316 +
13317 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13318 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13319 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13320 +#endif
13321 +
13322 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
13323 "2:\n"
13324 ".section .fixup,\"ax\"\n"
13325 @@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13326 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13327 {
13328 int err;
13329 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13330 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13331 u32 lmask = mask;
13332 u32 hmask = mask >> 32;
13333
13334 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13335 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13336 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13337 +#endif
13338 +
13339 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13340 "2:\n"
13341 ".section .fixup,\"ax\"\n"
13342 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13343 index 6a564ac..9b1340c 100644
13344 --- a/arch/x86/kernel/acpi/realmode/Makefile
13345 +++ b/arch/x86/kernel/acpi/realmode/Makefile
13346 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13347 $(call cc-option, -fno-stack-protector) \
13348 $(call cc-option, -mpreferred-stack-boundary=2)
13349 KBUILD_CFLAGS += $(call cc-option, -m32)
13350 +ifdef CONSTIFY_PLUGIN
13351 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13352 +endif
13353 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13354 GCOV_PROFILE := n
13355
13356 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13357 index 580b4e2..d4129e4 100644
13358 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13359 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13360 @@ -91,6 +91,9 @@ _start:
13361 /* Do any other stuff... */
13362
13363 #ifndef CONFIG_64BIT
13364 + /* Recheck NX bit overrides (64bit path does this in trampoline) */
13365 + call verify_cpu
13366 +
13367 /* This could also be done in C code... */
13368 movl pmode_cr3, %eax
13369 movl %eax, %cr3
13370 @@ -104,7 +107,7 @@ _start:
13371 movl %eax, %ecx
13372 orl %edx, %ecx
13373 jz 1f
13374 - movl $0xc0000080, %ecx
13375 + mov $MSR_EFER, %ecx
13376 wrmsr
13377 1:
13378
13379 @@ -114,6 +117,7 @@ _start:
13380 movl pmode_cr0, %eax
13381 movl %eax, %cr0
13382 jmp pmode_return
13383 +# include "../../verify_cpu.S"
13384 #else
13385 pushw $0
13386 pushw trampoline_segment
13387 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13388 index ca93638..7042f24 100644
13389 --- a/arch/x86/kernel/acpi/sleep.c
13390 +++ b/arch/x86/kernel/acpi/sleep.c
13391 @@ -11,11 +11,12 @@
13392 #include <linux/cpumask.h>
13393 #include <asm/segment.h>
13394 #include <asm/desc.h>
13395 +#include <asm/e820.h>
13396
13397 #include "realmode/wakeup.h"
13398 #include "sleep.h"
13399
13400 -unsigned long acpi_wakeup_address;
13401 +unsigned long acpi_wakeup_address = 0x2000;
13402 unsigned long acpi_realmode_flags;
13403
13404 /* address in low memory of the wakeup routine. */
13405 @@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
13406 #else /* CONFIG_64BIT */
13407 header->trampoline_segment = setup_trampoline() >> 4;
13408 #ifdef CONFIG_SMP
13409 - stack_start.sp = temp_stack + sizeof(temp_stack);
13410 + stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13411 +
13412 + pax_open_kernel();
13413 early_gdt_descr.address =
13414 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13415 + pax_close_kernel();
13416 +
13417 initial_gs = per_cpu_offset(smp_processor_id());
13418 #endif
13419 initial_code = (unsigned long)wakeup_long64;
13420 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
13421 return;
13422 }
13423
13424 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
13425 -
13426 - if (!acpi_realmode) {
13427 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
13428 - return;
13429 - }
13430 -
13431 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
13432 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
13433 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
13434 }
13435
13436
13437 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13438 index 8ded418..079961e 100644
13439 --- a/arch/x86/kernel/acpi/wakeup_32.S
13440 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13441 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13442 # and restore the stack ... but you need gdt for this to work
13443 movl saved_context_esp, %esp
13444
13445 - movl %cs:saved_magic, %eax
13446 - cmpl $0x12345678, %eax
13447 + cmpl $0x12345678, saved_magic
13448 jne bogus_magic
13449
13450 # jump to place where we left off
13451 - movl saved_eip, %eax
13452 - jmp *%eax
13453 + jmp *(saved_eip)
13454
13455 bogus_magic:
13456 jmp bogus_magic
13457 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13458 index de7353c..075da5f 100644
13459 --- a/arch/x86/kernel/alternative.c
13460 +++ b/arch/x86/kernel/alternative.c
13461 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13462
13463 BUG_ON(p->len > MAX_PATCH_LEN);
13464 /* prep the buffer with the original instructions */
13465 - memcpy(insnbuf, p->instr, p->len);
13466 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13467 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13468 (unsigned long)p->instr, p->len);
13469
13470 @@ -475,7 +475,7 @@ void __init alternative_instructions(void)
13471 if (smp_alt_once)
13472 free_init_pages("SMP alternatives",
13473 (unsigned long)__smp_locks,
13474 - (unsigned long)__smp_locks_end);
13475 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13476
13477 restart_nmi();
13478 }
13479 @@ -492,13 +492,17 @@ void __init alternative_instructions(void)
13480 * instructions. And on the local CPU you need to be protected again NMI or MCE
13481 * handlers seeing an inconsistent instruction while you patch.
13482 */
13483 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13484 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
13485 size_t len)
13486 {
13487 unsigned long flags;
13488 local_irq_save(flags);
13489 - memcpy(addr, opcode, len);
13490 +
13491 + pax_open_kernel();
13492 + memcpy(ktla_ktva(addr), opcode, len);
13493 sync_core();
13494 + pax_close_kernel();
13495 +
13496 local_irq_restore(flags);
13497 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13498 that causes hangs on some VIA CPUs. */
13499 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13500 */
13501 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13502 {
13503 - unsigned long flags;
13504 - char *vaddr;
13505 + unsigned char *vaddr = ktla_ktva(addr);
13506 struct page *pages[2];
13507 - int i;
13508 + size_t i;
13509
13510 if (!core_kernel_text((unsigned long)addr)) {
13511 - pages[0] = vmalloc_to_page(addr);
13512 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13513 + pages[0] = vmalloc_to_page(vaddr);
13514 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13515 } else {
13516 - pages[0] = virt_to_page(addr);
13517 + pages[0] = virt_to_page(vaddr);
13518 WARN_ON(!PageReserved(pages[0]));
13519 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13520 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13521 }
13522 BUG_ON(!pages[0]);
13523 - local_irq_save(flags);
13524 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13525 - if (pages[1])
13526 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13527 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13528 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13529 - clear_fixmap(FIX_TEXT_POKE0);
13530 - if (pages[1])
13531 - clear_fixmap(FIX_TEXT_POKE1);
13532 - local_flush_tlb();
13533 - sync_core();
13534 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13535 - that causes hangs on some VIA CPUs. */
13536 + text_poke_early(addr, opcode, len);
13537 for (i = 0; i < len; i++)
13538 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13539 - local_irq_restore(flags);
13540 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13541 return addr;
13542 }
13543 diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
13544 index 3a44b75..1601800 100644
13545 --- a/arch/x86/kernel/amd_iommu.c
13546 +++ b/arch/x86/kernel/amd_iommu.c
13547 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
13548 }
13549 }
13550
13551 -static struct dma_map_ops amd_iommu_dma_ops = {
13552 +static const struct dma_map_ops amd_iommu_dma_ops = {
13553 .alloc_coherent = alloc_coherent,
13554 .free_coherent = free_coherent,
13555 .map_page = map_page,
13556 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13557 index 1d2d670..8e3f477 100644
13558 --- a/arch/x86/kernel/apic/apic.c
13559 +++ b/arch/x86/kernel/apic/apic.c
13560 @@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
13561 /*
13562 * Debug level, exported for io_apic.c
13563 */
13564 -unsigned int apic_verbosity;
13565 +int apic_verbosity;
13566
13567 int pic_mode;
13568
13569 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13570 apic_write(APIC_ESR, 0);
13571 v1 = apic_read(APIC_ESR);
13572 ack_APIC_irq();
13573 - atomic_inc(&irq_err_count);
13574 + atomic_inc_unchecked(&irq_err_count);
13575
13576 /*
13577 * Here is what the APIC error bits mean:
13578 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
13579 u16 *bios_cpu_apicid;
13580 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
13581
13582 + pax_track_stack();
13583 +
13584 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
13585 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
13586
13587 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13588 index 8928d97..f799cea 100644
13589 --- a/arch/x86/kernel/apic/io_apic.c
13590 +++ b/arch/x86/kernel/apic/io_apic.c
13591 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
13592 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
13593 GFP_ATOMIC);
13594 if (!ioapic_entries)
13595 - return 0;
13596 + return NULL;
13597
13598 for (apic = 0; apic < nr_ioapics; apic++) {
13599 ioapic_entries[apic] =
13600 @@ -733,7 +733,7 @@ nomem:
13601 kfree(ioapic_entries[apic]);
13602 kfree(ioapic_entries);
13603
13604 - return 0;
13605 + return NULL;
13606 }
13607
13608 /*
13609 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13610 }
13611 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13612
13613 -void lock_vector_lock(void)
13614 +void lock_vector_lock(void) __acquires(vector_lock)
13615 {
13616 /* Used to the online set of cpus does not change
13617 * during assign_irq_vector.
13618 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
13619 spin_lock(&vector_lock);
13620 }
13621
13622 -void unlock_vector_lock(void)
13623 +void unlock_vector_lock(void) __releases(vector_lock)
13624 {
13625 spin_unlock(&vector_lock);
13626 }
13627 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
13628 ack_APIC_irq();
13629 }
13630
13631 -atomic_t irq_mis_count;
13632 +atomic_unchecked_t irq_mis_count;
13633
13634 static void ack_apic_level(unsigned int irq)
13635 {
13636 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
13637
13638 /* Tail end of version 0x11 I/O APIC bug workaround */
13639 if (!(v & (1 << (i & 0x1f)))) {
13640 - atomic_inc(&irq_mis_count);
13641 + atomic_inc_unchecked(&irq_mis_count);
13642 spin_lock(&ioapic_lock);
13643 __mask_and_edge_IO_APIC_irq(cfg);
13644 __unmask_and_level_IO_APIC_irq(cfg);
13645 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13646 index 151ace6..f317474 100644
13647 --- a/arch/x86/kernel/apm_32.c
13648 +++ b/arch/x86/kernel/apm_32.c
13649 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
13650 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13651 * even though they are called in protected mode.
13652 */
13653 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13654 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13655 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13656
13657 static const char driver_version[] = "1.16ac"; /* no spaces */
13658 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13659 BUG_ON(cpu != 0);
13660 gdt = get_cpu_gdt_table(cpu);
13661 save_desc_40 = gdt[0x40 / 8];
13662 +
13663 + pax_open_kernel();
13664 gdt[0x40 / 8] = bad_bios_desc;
13665 + pax_close_kernel();
13666
13667 apm_irq_save(flags);
13668 APM_DO_SAVE_SEGS;
13669 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13670 &call->esi);
13671 APM_DO_RESTORE_SEGS;
13672 apm_irq_restore(flags);
13673 +
13674 + pax_open_kernel();
13675 gdt[0x40 / 8] = save_desc_40;
13676 + pax_close_kernel();
13677 +
13678 put_cpu();
13679
13680 return call->eax & 0xff;
13681 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13682 BUG_ON(cpu != 0);
13683 gdt = get_cpu_gdt_table(cpu);
13684 save_desc_40 = gdt[0x40 / 8];
13685 +
13686 + pax_open_kernel();
13687 gdt[0x40 / 8] = bad_bios_desc;
13688 + pax_close_kernel();
13689
13690 apm_irq_save(flags);
13691 APM_DO_SAVE_SEGS;
13692 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
13693 &call->eax);
13694 APM_DO_RESTORE_SEGS;
13695 apm_irq_restore(flags);
13696 +
13697 + pax_open_kernel();
13698 gdt[0x40 / 8] = save_desc_40;
13699 + pax_close_kernel();
13700 +
13701 put_cpu();
13702 return error;
13703 }
13704 @@ -975,7 +989,7 @@ recalc:
13705
13706 static void apm_power_off(void)
13707 {
13708 - unsigned char po_bios_call[] = {
13709 + const unsigned char po_bios_call[] = {
13710 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
13711 0x8e, 0xd0, /* movw ax,ss */
13712 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
13713 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
13714 * code to that CPU.
13715 */
13716 gdt = get_cpu_gdt_table(0);
13717 +
13718 + pax_open_kernel();
13719 set_desc_base(&gdt[APM_CS >> 3],
13720 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13721 set_desc_base(&gdt[APM_CS_16 >> 3],
13722 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13723 set_desc_base(&gdt[APM_DS >> 3],
13724 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13725 + pax_close_kernel();
13726
13727 proc_create("apm", 0, NULL, &apm_file_ops);
13728
13729 diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
13730 index dfdbf64..9b2b6ce 100644
13731 --- a/arch/x86/kernel/asm-offsets_32.c
13732 +++ b/arch/x86/kernel/asm-offsets_32.c
13733 @@ -51,7 +51,6 @@ void foo(void)
13734 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
13735 BLANK();
13736
13737 - OFFSET(TI_task, thread_info, task);
13738 OFFSET(TI_exec_domain, thread_info, exec_domain);
13739 OFFSET(TI_flags, thread_info, flags);
13740 OFFSET(TI_status, thread_info, status);
13741 @@ -60,6 +59,8 @@ void foo(void)
13742 OFFSET(TI_restart_block, thread_info, restart_block);
13743 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
13744 OFFSET(TI_cpu, thread_info, cpu);
13745 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13746 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13747 BLANK();
13748
13749 OFFSET(GDS_size, desc_ptr, size);
13750 @@ -99,6 +100,7 @@ void foo(void)
13751
13752 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13753 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13754 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13755 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
13756 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
13757 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
13758 @@ -115,6 +117,11 @@ void foo(void)
13759 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
13760 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13761 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13762 +
13763 +#ifdef CONFIG_PAX_KERNEXEC
13764 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13765 +#endif
13766 +
13767 #endif
13768
13769 #ifdef CONFIG_XEN
13770 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13771 index 4a6aeed..371de20 100644
13772 --- a/arch/x86/kernel/asm-offsets_64.c
13773 +++ b/arch/x86/kernel/asm-offsets_64.c
13774 @@ -44,6 +44,8 @@ int main(void)
13775 ENTRY(addr_limit);
13776 ENTRY(preempt_count);
13777 ENTRY(status);
13778 + ENTRY(lowest_stack);
13779 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13780 #ifdef CONFIG_IA32_EMULATION
13781 ENTRY(sysenter_return);
13782 #endif
13783 @@ -63,6 +65,18 @@ int main(void)
13784 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13785 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
13786 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13787 +
13788 +#ifdef CONFIG_PAX_KERNEXEC
13789 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13790 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13791 +#endif
13792 +
13793 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13794 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13795 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13796 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13797 +#endif
13798 +
13799 #endif
13800
13801
13802 @@ -115,6 +129,7 @@ int main(void)
13803 ENTRY(cr8);
13804 BLANK();
13805 #undef ENTRY
13806 + DEFINE(TSS_size, sizeof(struct tss_struct));
13807 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
13808 BLANK();
13809 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
13810 @@ -130,6 +145,7 @@ int main(void)
13811
13812 BLANK();
13813 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13814 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13815 #ifdef CONFIG_XEN
13816 BLANK();
13817 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13818 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13819 index ff502cc..dc5133e 100644
13820 --- a/arch/x86/kernel/cpu/Makefile
13821 +++ b/arch/x86/kernel/cpu/Makefile
13822 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
13823 CFLAGS_REMOVE_common.o = -pg
13824 endif
13825
13826 -# Make sure load_percpu_segment has no stackprotector
13827 -nostackp := $(call cc-option, -fno-stack-protector)
13828 -CFLAGS_common.o := $(nostackp)
13829 -
13830 obj-y := intel_cacheinfo.o addon_cpuid_features.o
13831 obj-y += proc.o capflags.o powerflags.o common.o
13832 obj-y += vmware.o hypervisor.o sched.o
13833 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13834 index 6e082dc..a0b5f36 100644
13835 --- a/arch/x86/kernel/cpu/amd.c
13836 +++ b/arch/x86/kernel/cpu/amd.c
13837 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13838 unsigned int size)
13839 {
13840 /* AMD errata T13 (order #21922) */
13841 - if ((c->x86 == 6)) {
13842 + if (c->x86 == 6) {
13843 /* Duron Rev A0 */
13844 if (c->x86_model == 3 && c->x86_mask == 0)
13845 size = 64;
13846 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13847 index 4e34d10..ba6bc97 100644
13848 --- a/arch/x86/kernel/cpu/common.c
13849 +++ b/arch/x86/kernel/cpu/common.c
13850 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13851
13852 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13853
13854 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13855 -#ifdef CONFIG_X86_64
13856 - /*
13857 - * We need valid kernel segments for data and code in long mode too
13858 - * IRET will check the segment types kkeil 2000/10/28
13859 - * Also sysret mandates a special GDT layout
13860 - *
13861 - * TLS descriptors are currently at a different place compared to i386.
13862 - * Hopefully nobody expects them at a fixed place (Wine?)
13863 - */
13864 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13865 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13866 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13867 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13868 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13869 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13870 -#else
13871 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13872 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13873 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13874 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13875 - /*
13876 - * Segments used for calling PnP BIOS have byte granularity.
13877 - * They code segments and data segments have fixed 64k limits,
13878 - * the transfer segment sizes are set at run time.
13879 - */
13880 - /* 32-bit code */
13881 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13882 - /* 16-bit code */
13883 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13884 - /* 16-bit data */
13885 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13886 - /* 16-bit data */
13887 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13888 - /* 16-bit data */
13889 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13890 - /*
13891 - * The APM segments have byte granularity and their bases
13892 - * are set at run time. All have 64k limits.
13893 - */
13894 - /* 32-bit code */
13895 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13896 - /* 16-bit code */
13897 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13898 - /* data */
13899 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13900 -
13901 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13902 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13903 - GDT_STACK_CANARY_INIT
13904 -#endif
13905 -} };
13906 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13907 -
13908 static int __init x86_xsave_setup(char *s)
13909 {
13910 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13911 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
13912 {
13913 struct desc_ptr gdt_descr;
13914
13915 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13916 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13917 gdt_descr.size = GDT_SIZE - 1;
13918 load_gdt(&gdt_descr);
13919 /* Reload the per-cpu base */
13920 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13921 /* Filter out anything that depends on CPUID levels we don't have */
13922 filter_cpuid_features(c, true);
13923
13924 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13925 + setup_clear_cpu_cap(X86_FEATURE_SEP);
13926 +#endif
13927 +
13928 /* If the model name is still unset, do table lookup. */
13929 if (!c->x86_model_id[0]) {
13930 const char *p;
13931 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
13932 }
13933 __setup("clearcpuid=", setup_disablecpuid);
13934
13935 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13936 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
13937 +
13938 #ifdef CONFIG_X86_64
13939 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13940
13941 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13942 EXPORT_PER_CPU_SYMBOL(current_task);
13943
13944 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13945 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13946 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13947 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13948
13949 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13950 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13951 {
13952 memset(regs, 0, sizeof(struct pt_regs));
13953 regs->fs = __KERNEL_PERCPU;
13954 - regs->gs = __KERNEL_STACK_CANARY;
13955 + savesegment(gs, regs->gs);
13956
13957 return regs;
13958 }
13959 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
13960 int i;
13961
13962 cpu = stack_smp_processor_id();
13963 - t = &per_cpu(init_tss, cpu);
13964 + t = init_tss + cpu;
13965 orig_ist = &per_cpu(orig_ist, cpu);
13966
13967 #ifdef CONFIG_NUMA
13968 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
13969 switch_to_new_gdt(cpu);
13970 loadsegment(fs, 0);
13971
13972 - load_idt((const struct desc_ptr *)&idt_descr);
13973 + load_idt(&idt_descr);
13974
13975 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13976 syscall_init();
13977 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
13978 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13979 barrier();
13980
13981 - check_efer();
13982 if (cpu != 0)
13983 enable_x2apic();
13984
13985 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
13986 {
13987 int cpu = smp_processor_id();
13988 struct task_struct *curr = current;
13989 - struct tss_struct *t = &per_cpu(init_tss, cpu);
13990 + struct tss_struct *t = init_tss + cpu;
13991 struct thread_struct *thread = &curr->thread;
13992
13993 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13994 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13995 index 6a77cca..4f4fca0 100644
13996 --- a/arch/x86/kernel/cpu/intel.c
13997 +++ b/arch/x86/kernel/cpu/intel.c
13998 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13999 * Update the IDT descriptor and reload the IDT so that
14000 * it uses the read-only mapped virtual address.
14001 */
14002 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14003 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14004 load_idt(&idt_descr);
14005 }
14006 #endif
14007 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
14008 index 417990f..96dc36b 100644
14009 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
14010 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
14011 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
14012 return ret;
14013 }
14014
14015 -static struct sysfs_ops sysfs_ops = {
14016 +static const struct sysfs_ops sysfs_ops = {
14017 .show = show,
14018 .store = store,
14019 };
14020 diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14021 index 472763d..9831e11 100644
14022 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
14023 +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14024 @@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
14025 static int inject_init(void)
14026 {
14027 printk(KERN_INFO "Machine check injector initialized\n");
14028 - mce_chrdev_ops.write = mce_write;
14029 + pax_open_kernel();
14030 + *(void **)&mce_chrdev_ops.write = mce_write;
14031 + pax_close_kernel();
14032 register_die_notifier(&mce_raise_nb);
14033 return 0;
14034 }
14035 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14036 index 0f16a2b..21740f5 100644
14037 --- a/arch/x86/kernel/cpu/mcheck/mce.c
14038 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
14039 @@ -43,6 +43,7 @@
14040 #include <asm/ipi.h>
14041 #include <asm/mce.h>
14042 #include <asm/msr.h>
14043 +#include <asm/local.h>
14044
14045 #include "mce-internal.h"
14046
14047 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
14048 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14049 m->cs, m->ip);
14050
14051 - if (m->cs == __KERNEL_CS)
14052 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14053 print_symbol("{%s}", m->ip);
14054 pr_cont("\n");
14055 }
14056 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
14057
14058 #define PANIC_TIMEOUT 5 /* 5 seconds */
14059
14060 -static atomic_t mce_paniced;
14061 +static atomic_unchecked_t mce_paniced;
14062
14063 static int fake_panic;
14064 -static atomic_t mce_fake_paniced;
14065 +static atomic_unchecked_t mce_fake_paniced;
14066
14067 /* Panic in progress. Enable interrupts and wait for final IPI */
14068 static void wait_for_panic(void)
14069 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14070 /*
14071 * Make sure only one CPU runs in machine check panic
14072 */
14073 - if (atomic_inc_return(&mce_paniced) > 1)
14074 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14075 wait_for_panic();
14076 barrier();
14077
14078 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14079 console_verbose();
14080 } else {
14081 /* Don't log too much for fake panic */
14082 - if (atomic_inc_return(&mce_fake_paniced) > 1)
14083 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14084 return;
14085 }
14086 print_mce_head();
14087 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
14088 * might have been modified by someone else.
14089 */
14090 rmb();
14091 - if (atomic_read(&mce_paniced))
14092 + if (atomic_read_unchecked(&mce_paniced))
14093 wait_for_panic();
14094 if (!monarch_timeout)
14095 goto out;
14096 @@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14097 }
14098
14099 /* Call the installed machine check handler for this CPU setup. */
14100 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
14101 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14102 unexpected_machine_check;
14103
14104 /*
14105 @@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
14106 return;
14107 }
14108
14109 + pax_open_kernel();
14110 machine_check_vector = do_machine_check;
14111 + pax_close_kernel();
14112
14113 mce_init();
14114 mce_cpu_features(c);
14115 @@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
14116 */
14117
14118 static DEFINE_SPINLOCK(mce_state_lock);
14119 -static int open_count; /* #times opened */
14120 +static local_t open_count; /* #times opened */
14121 static int open_exclu; /* already open exclusive? */
14122
14123 static int mce_open(struct inode *inode, struct file *file)
14124 {
14125 spin_lock(&mce_state_lock);
14126
14127 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
14128 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
14129 spin_unlock(&mce_state_lock);
14130
14131 return -EBUSY;
14132 @@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
14133
14134 if (file->f_flags & O_EXCL)
14135 open_exclu = 1;
14136 - open_count++;
14137 + local_inc(&open_count);
14138
14139 spin_unlock(&mce_state_lock);
14140
14141 @@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
14142 {
14143 spin_lock(&mce_state_lock);
14144
14145 - open_count--;
14146 + local_dec(&open_count);
14147 open_exclu = 0;
14148
14149 spin_unlock(&mce_state_lock);
14150 @@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
14151 static void mce_reset(void)
14152 {
14153 cpu_missing = 0;
14154 - atomic_set(&mce_fake_paniced, 0);
14155 + atomic_set_unchecked(&mce_fake_paniced, 0);
14156 atomic_set(&mce_executing, 0);
14157 atomic_set(&mce_callin, 0);
14158 atomic_set(&global_nwo, 0);
14159 diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
14160 index ef3cd31..9d2f6ab 100644
14161 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
14162 +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
14163 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
14164 return ret;
14165 }
14166
14167 -static struct sysfs_ops threshold_ops = {
14168 +static const struct sysfs_ops threshold_ops = {
14169 .show = show,
14170 .store = store,
14171 };
14172 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14173 index 5c0e653..0882b0a 100644
14174 --- a/arch/x86/kernel/cpu/mcheck/p5.c
14175 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
14176 @@ -12,6 +12,7 @@
14177 #include <asm/system.h>
14178 #include <asm/mce.h>
14179 #include <asm/msr.h>
14180 +#include <asm/pgtable.h>
14181
14182 /* By default disabled */
14183 int mce_p5_enabled __read_mostly;
14184 @@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14185 if (!cpu_has(c, X86_FEATURE_MCE))
14186 return;
14187
14188 + pax_open_kernel();
14189 machine_check_vector = pentium_machine_check;
14190 + pax_close_kernel();
14191 /* Make sure the vector pointer is visible before we enable MCEs: */
14192 wmb();
14193
14194 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14195 index 54060f5..c1a7577 100644
14196 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
14197 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14198 @@ -11,6 +11,7 @@
14199 #include <asm/system.h>
14200 #include <asm/mce.h>
14201 #include <asm/msr.h>
14202 +#include <asm/pgtable.h>
14203
14204 /* Machine check handler for WinChip C6: */
14205 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14206 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14207 {
14208 u32 lo, hi;
14209
14210 + pax_open_kernel();
14211 machine_check_vector = winchip_machine_check;
14212 + pax_close_kernel();
14213 /* Make sure the vector pointer is visible before we enable MCEs: */
14214 wmb();
14215
14216 diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
14217 index 33af141..92ba9cd 100644
14218 --- a/arch/x86/kernel/cpu/mtrr/amd.c
14219 +++ b/arch/x86/kernel/cpu/mtrr/amd.c
14220 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
14221 return 0;
14222 }
14223
14224 -static struct mtrr_ops amd_mtrr_ops = {
14225 +static const struct mtrr_ops amd_mtrr_ops = {
14226 .vendor = X86_VENDOR_AMD,
14227 .set = amd_set_mtrr,
14228 .get = amd_get_mtrr,
14229 diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
14230 index de89f14..316fe3e 100644
14231 --- a/arch/x86/kernel/cpu/mtrr/centaur.c
14232 +++ b/arch/x86/kernel/cpu/mtrr/centaur.c
14233 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
14234 return 0;
14235 }
14236
14237 -static struct mtrr_ops centaur_mtrr_ops = {
14238 +static const struct mtrr_ops centaur_mtrr_ops = {
14239 .vendor = X86_VENDOR_CENTAUR,
14240 .set = centaur_set_mcr,
14241 .get = centaur_get_mcr,
14242 diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
14243 index 228d982..68a3343 100644
14244 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c
14245 +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
14246 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
14247 post_set();
14248 }
14249
14250 -static struct mtrr_ops cyrix_mtrr_ops = {
14251 +static const struct mtrr_ops cyrix_mtrr_ops = {
14252 .vendor = X86_VENDOR_CYRIX,
14253 .set_all = cyrix_set_all,
14254 .set = cyrix_set_arr,
14255 diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
14256 index 55da0c5..4d75584 100644
14257 --- a/arch/x86/kernel/cpu/mtrr/generic.c
14258 +++ b/arch/x86/kernel/cpu/mtrr/generic.c
14259 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
14260 /*
14261 * Generic structure...
14262 */
14263 -struct mtrr_ops generic_mtrr_ops = {
14264 +const struct mtrr_ops generic_mtrr_ops = {
14265 .use_intel_if = 1,
14266 .set_all = generic_set_all,
14267 .get = generic_get_mtrr,
14268 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14269 index fd60f09..c94ef52 100644
14270 --- a/arch/x86/kernel/cpu/mtrr/main.c
14271 +++ b/arch/x86/kernel/cpu/mtrr/main.c
14272 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
14273 u64 size_or_mask, size_and_mask;
14274 static bool mtrr_aps_delayed_init;
14275
14276 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14277 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14278
14279 -struct mtrr_ops *mtrr_if;
14280 +const struct mtrr_ops *mtrr_if;
14281
14282 static void set_mtrr(unsigned int reg, unsigned long base,
14283 unsigned long size, mtrr_type type);
14284
14285 -void set_mtrr_ops(struct mtrr_ops *ops)
14286 +void set_mtrr_ops(const struct mtrr_ops *ops)
14287 {
14288 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
14289 mtrr_ops[ops->vendor] = ops;
14290 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14291 index a501dee..816c719 100644
14292 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14293 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14294 @@ -25,14 +25,14 @@ struct mtrr_ops {
14295 int (*validate_add_page)(unsigned long base, unsigned long size,
14296 unsigned int type);
14297 int (*have_wrcomb)(void);
14298 -};
14299 +} __do_const;
14300
14301 extern int generic_get_free_region(unsigned long base, unsigned long size,
14302 int replace_reg);
14303 extern int generic_validate_add_page(unsigned long base, unsigned long size,
14304 unsigned int type);
14305
14306 -extern struct mtrr_ops generic_mtrr_ops;
14307 +extern const struct mtrr_ops generic_mtrr_ops;
14308
14309 extern int positive_have_wrcomb(void);
14310
14311 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
14312 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
14313 void get_mtrr_state(void);
14314
14315 -extern void set_mtrr_ops(struct mtrr_ops *ops);
14316 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
14317
14318 extern u64 size_or_mask, size_and_mask;
14319 -extern struct mtrr_ops *mtrr_if;
14320 +extern const struct mtrr_ops *mtrr_if;
14321
14322 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
14323 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
14324 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14325 index 0ff02ca..fc49a60 100644
14326 --- a/arch/x86/kernel/cpu/perf_event.c
14327 +++ b/arch/x86/kernel/cpu/perf_event.c
14328 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
14329 * count to the generic event atomically:
14330 */
14331 again:
14332 - prev_raw_count = atomic64_read(&hwc->prev_count);
14333 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
14334 rdmsrl(hwc->event_base + idx, new_raw_count);
14335
14336 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
14337 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
14338 new_raw_count) != prev_raw_count)
14339 goto again;
14340
14341 @@ -741,7 +741,7 @@ again:
14342 delta = (new_raw_count << shift) - (prev_raw_count << shift);
14343 delta >>= shift;
14344
14345 - atomic64_add(delta, &event->count);
14346 + atomic64_add_unchecked(delta, &event->count);
14347 atomic64_sub(delta, &hwc->period_left);
14348
14349 return new_raw_count;
14350 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
14351 * The hw event starts counting from this event offset,
14352 * mark it to be able to extra future deltas:
14353 */
14354 - atomic64_set(&hwc->prev_count, (u64)-left);
14355 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
14356
14357 err = checking_wrmsrl(hwc->event_base + idx,
14358 (u64)(-left) & x86_pmu.event_mask);
14359 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
14360 break;
14361
14362 callchain_store(entry, frame.return_address);
14363 - fp = frame.next_frame;
14364 + fp = (__force const void __user *)frame.next_frame;
14365 }
14366 }
14367
14368 diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
14369 index 898df97..9e82503 100644
14370 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c
14371 +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
14372 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
14373
14374 /* Interface defining a CPU specific perfctr watchdog */
14375 struct wd_ops {
14376 - int (*reserve)(void);
14377 - void (*unreserve)(void);
14378 - int (*setup)(unsigned nmi_hz);
14379 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14380 - void (*stop)(void);
14381 + int (* const reserve)(void);
14382 + void (* const unreserve)(void);
14383 + int (* const setup)(unsigned nmi_hz);
14384 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14385 + void (* const stop)(void);
14386 unsigned perfctr;
14387 unsigned evntsel;
14388 u64 checkbit;
14389 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
14390 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
14391 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
14392
14393 +/* cannot be const */
14394 static struct wd_ops intel_arch_wd_ops;
14395
14396 static int setup_intel_arch_watchdog(unsigned nmi_hz)
14397 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
14398 return 1;
14399 }
14400
14401 +/* cannot be const */
14402 static struct wd_ops intel_arch_wd_ops __read_mostly = {
14403 .reserve = single_msr_reserve,
14404 .unreserve = single_msr_unreserve,
14405 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14406 index ff95824..2ffdcb5 100644
14407 --- a/arch/x86/kernel/crash.c
14408 +++ b/arch/x86/kernel/crash.c
14409 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
14410 regs = args->regs;
14411
14412 #ifdef CONFIG_X86_32
14413 - if (!user_mode_vm(regs)) {
14414 + if (!user_mode(regs)) {
14415 crash_fixup_ss_esp(&fixed_regs, regs);
14416 regs = &fixed_regs;
14417 }
14418 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14419 index 37250fe..bf2ec74 100644
14420 --- a/arch/x86/kernel/doublefault_32.c
14421 +++ b/arch/x86/kernel/doublefault_32.c
14422 @@ -11,7 +11,7 @@
14423
14424 #define DOUBLEFAULT_STACKSIZE (1024)
14425 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14426 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14427 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14428
14429 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14430
14431 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
14432 unsigned long gdt, tss;
14433
14434 store_gdt(&gdt_desc);
14435 - gdt = gdt_desc.address;
14436 + gdt = (unsigned long)gdt_desc.address;
14437
14438 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14439
14440 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14441 /* 0x2 bit is always set */
14442 .flags = X86_EFLAGS_SF | 0x2,
14443 .sp = STACK_START,
14444 - .es = __USER_DS,
14445 + .es = __KERNEL_DS,
14446 .cs = __KERNEL_CS,
14447 .ss = __KERNEL_DS,
14448 - .ds = __USER_DS,
14449 + .ds = __KERNEL_DS,
14450 .fs = __KERNEL_PERCPU,
14451
14452 .__cr3 = __pa_nodebug(swapper_pg_dir),
14453 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14454 index 2d8a371..4fa6ae6 100644
14455 --- a/arch/x86/kernel/dumpstack.c
14456 +++ b/arch/x86/kernel/dumpstack.c
14457 @@ -2,6 +2,9 @@
14458 * Copyright (C) 1991, 1992 Linus Torvalds
14459 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14460 */
14461 +#ifdef CONFIG_GRKERNSEC_HIDESYM
14462 +#define __INCLUDED_BY_HIDESYM 1
14463 +#endif
14464 #include <linux/kallsyms.h>
14465 #include <linux/kprobes.h>
14466 #include <linux/uaccess.h>
14467 @@ -28,7 +31,7 @@ static int die_counter;
14468
14469 void printk_address(unsigned long address, int reliable)
14470 {
14471 - printk(" [<%p>] %s%pS\n", (void *) address,
14472 + printk(" [<%p>] %s%pA\n", (void *) address,
14473 reliable ? "" : "? ", (void *) address);
14474 }
14475
14476 @@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
14477 static void
14478 print_ftrace_graph_addr(unsigned long addr, void *data,
14479 const struct stacktrace_ops *ops,
14480 - struct thread_info *tinfo, int *graph)
14481 + struct task_struct *task, int *graph)
14482 {
14483 - struct task_struct *task = tinfo->task;
14484 unsigned long ret_addr;
14485 int index = task->curr_ret_stack;
14486
14487 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14488 static inline void
14489 print_ftrace_graph_addr(unsigned long addr, void *data,
14490 const struct stacktrace_ops *ops,
14491 - struct thread_info *tinfo, int *graph)
14492 + struct task_struct *task, int *graph)
14493 { }
14494 #endif
14495
14496 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14497 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14498 */
14499
14500 -static inline int valid_stack_ptr(struct thread_info *tinfo,
14501 - void *p, unsigned int size, void *end)
14502 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14503 {
14504 - void *t = tinfo;
14505 if (end) {
14506 if (p < end && p >= (end-THREAD_SIZE))
14507 return 1;
14508 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14509 }
14510
14511 unsigned long
14512 -print_context_stack(struct thread_info *tinfo,
14513 +print_context_stack(struct task_struct *task, void *stack_start,
14514 unsigned long *stack, unsigned long bp,
14515 const struct stacktrace_ops *ops, void *data,
14516 unsigned long *end, int *graph)
14517 {
14518 struct stack_frame *frame = (struct stack_frame *)bp;
14519
14520 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14521 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14522 unsigned long addr;
14523
14524 addr = *stack;
14525 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
14526 } else {
14527 ops->address(data, addr, 0);
14528 }
14529 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14530 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14531 }
14532 stack++;
14533 }
14534 @@ -180,7 +180,7 @@ void dump_stack(void)
14535 #endif
14536
14537 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14538 - current->pid, current->comm, print_tainted(),
14539 + task_pid_nr(current), current->comm, print_tainted(),
14540 init_utsname()->release,
14541 (int)strcspn(init_utsname()->version, " "),
14542 init_utsname()->version);
14543 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
14544 return flags;
14545 }
14546
14547 +extern void gr_handle_kernel_exploit(void);
14548 +
14549 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14550 {
14551 if (regs && kexec_should_crash(current))
14552 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14553 panic("Fatal exception in interrupt");
14554 if (panic_on_oops)
14555 panic("Fatal exception");
14556 - do_exit(signr);
14557 +
14558 + gr_handle_kernel_exploit();
14559 +
14560 + do_group_exit(signr);
14561 }
14562
14563 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14564 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14565 unsigned long flags = oops_begin();
14566 int sig = SIGSEGV;
14567
14568 - if (!user_mode_vm(regs))
14569 + if (!user_mode(regs))
14570 report_bug(regs->ip, regs);
14571
14572 if (__die(str, regs, err))
14573 diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
14574 index 81086c2..13e8b17 100644
14575 --- a/arch/x86/kernel/dumpstack.h
14576 +++ b/arch/x86/kernel/dumpstack.h
14577 @@ -15,7 +15,7 @@
14578 #endif
14579
14580 extern unsigned long
14581 -print_context_stack(struct thread_info *tinfo,
14582 +print_context_stack(struct task_struct *task, void *stack_start,
14583 unsigned long *stack, unsigned long bp,
14584 const struct stacktrace_ops *ops, void *data,
14585 unsigned long *end, int *graph);
14586 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14587 index f7dd2a7..504f53b 100644
14588 --- a/arch/x86/kernel/dumpstack_32.c
14589 +++ b/arch/x86/kernel/dumpstack_32.c
14590 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14591 #endif
14592
14593 for (;;) {
14594 - struct thread_info *context;
14595 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14596 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14597
14598 - context = (struct thread_info *)
14599 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14600 - bp = print_context_stack(context, stack, bp, ops,
14601 - data, NULL, &graph);
14602 -
14603 - stack = (unsigned long *)context->previous_esp;
14604 - if (!stack)
14605 + if (stack_start == task_stack_page(task))
14606 break;
14607 + stack = *(unsigned long **)stack_start;
14608 if (ops->stack(data, "IRQ") < 0)
14609 break;
14610 touch_nmi_watchdog();
14611 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
14612 * When in-kernel, we also print out the stack and code at the
14613 * time of the fault..
14614 */
14615 - if (!user_mode_vm(regs)) {
14616 + if (!user_mode(regs)) {
14617 unsigned int code_prologue = code_bytes * 43 / 64;
14618 unsigned int code_len = code_bytes;
14619 unsigned char c;
14620 u8 *ip;
14621 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14622
14623 printk(KERN_EMERG "Stack:\n");
14624 show_stack_log_lvl(NULL, regs, &regs->sp,
14625 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
14626
14627 printk(KERN_EMERG "Code: ");
14628
14629 - ip = (u8 *)regs->ip - code_prologue;
14630 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14631 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14632 /* try starting at IP */
14633 - ip = (u8 *)regs->ip;
14634 + ip = (u8 *)regs->ip + cs_base;
14635 code_len = code_len - code_prologue + 1;
14636 }
14637 for (i = 0; i < code_len; i++, ip++) {
14638 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
14639 printk(" Bad EIP value.");
14640 break;
14641 }
14642 - if (ip == (u8 *)regs->ip)
14643 + if (ip == (u8 *)regs->ip + cs_base)
14644 printk("<%02x> ", c);
14645 else
14646 printk("%02x ", c);
14647 @@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
14648 printk("\n");
14649 }
14650
14651 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14652 +void pax_check_alloca(unsigned long size)
14653 +{
14654 + unsigned long sp = (unsigned long)&sp, stack_left;
14655 +
14656 + /* all kernel stacks are of the same size */
14657 + stack_left = sp & (THREAD_SIZE - 1);
14658 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14659 +}
14660 +EXPORT_SYMBOL(pax_check_alloca);
14661 +#endif
14662 +
14663 int is_valid_bugaddr(unsigned long ip)
14664 {
14665 unsigned short ud2;
14666
14667 + ip = ktla_ktva(ip);
14668 if (ip < PAGE_OFFSET)
14669 return 0;
14670 if (probe_kernel_address((unsigned short *)ip, ud2))
14671 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14672 index a071e6b..36cd585 100644
14673 --- a/arch/x86/kernel/dumpstack_64.c
14674 +++ b/arch/x86/kernel/dumpstack_64.c
14675 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14676 unsigned long *irq_stack_end =
14677 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14678 unsigned used = 0;
14679 - struct thread_info *tinfo;
14680 int graph = 0;
14681 + void *stack_start;
14682
14683 if (!task)
14684 task = current;
14685 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14686 * current stack address. If the stacks consist of nested
14687 * exceptions
14688 */
14689 - tinfo = task_thread_info(task);
14690 for (;;) {
14691 char *id;
14692 unsigned long *estack_end;
14693 +
14694 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14695 &used, &id);
14696
14697 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14698 if (ops->stack(data, id) < 0)
14699 break;
14700
14701 - bp = print_context_stack(tinfo, stack, bp, ops,
14702 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14703 data, estack_end, &graph);
14704 ops->stack(data, "<EOE>");
14705 /*
14706 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14707 if (stack >= irq_stack && stack < irq_stack_end) {
14708 if (ops->stack(data, "IRQ") < 0)
14709 break;
14710 - bp = print_context_stack(tinfo, stack, bp,
14711 + bp = print_context_stack(task, irq_stack, stack, bp,
14712 ops, data, irq_stack_end, &graph);
14713 /*
14714 * We link to the next stack (which would be
14715 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14716 /*
14717 * This handles the process stack:
14718 */
14719 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14720 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14721 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14722 put_cpu();
14723 }
14724 EXPORT_SYMBOL(dump_trace);
14725 @@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
14726 return ud2 == 0x0b0f;
14727 }
14728
14729 +
14730 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14731 +void pax_check_alloca(unsigned long size)
14732 +{
14733 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14734 + unsigned cpu, used;
14735 + char *id;
14736 +
14737 + /* check the process stack first */
14738 + stack_start = (unsigned long)task_stack_page(current);
14739 + stack_end = stack_start + THREAD_SIZE;
14740 + if (likely(stack_start <= sp && sp < stack_end)) {
14741 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14742 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14743 + return;
14744 + }
14745 +
14746 + cpu = get_cpu();
14747 +
14748 + /* check the irq stacks */
14749 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14750 + stack_start = stack_end - IRQ_STACK_SIZE;
14751 + if (stack_start <= sp && sp < stack_end) {
14752 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14753 + put_cpu();
14754 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14755 + return;
14756 + }
14757 +
14758 + /* check the exception stacks */
14759 + used = 0;
14760 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14761 + stack_start = stack_end - EXCEPTION_STKSZ;
14762 + if (stack_end && stack_start <= sp && sp < stack_end) {
14763 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14764 + put_cpu();
14765 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14766 + return;
14767 + }
14768 +
14769 + put_cpu();
14770 +
14771 + /* unknown stack */
14772 + BUG();
14773 +}
14774 +EXPORT_SYMBOL(pax_check_alloca);
14775 +#endif
14776 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
14777 index a89739a..95e0c48 100644
14778 --- a/arch/x86/kernel/e820.c
14779 +++ b/arch/x86/kernel/e820.c
14780 @@ -733,7 +733,7 @@ struct early_res {
14781 };
14782 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
14783 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
14784 - {}
14785 + { 0, 0, {0}, 0 }
14786 };
14787
14788 static int __init find_overlapped_early(u64 start, u64 end)
14789 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14790 index b9c830c..1e41a96 100644
14791 --- a/arch/x86/kernel/early_printk.c
14792 +++ b/arch/x86/kernel/early_printk.c
14793 @@ -7,6 +7,7 @@
14794 #include <linux/pci_regs.h>
14795 #include <linux/pci_ids.h>
14796 #include <linux/errno.h>
14797 +#include <linux/sched.h>
14798 #include <asm/io.h>
14799 #include <asm/processor.h>
14800 #include <asm/fcntl.h>
14801 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
14802 int n;
14803 va_list ap;
14804
14805 + pax_track_stack();
14806 +
14807 va_start(ap, fmt);
14808 n = vscnprintf(buf, sizeof(buf), fmt, ap);
14809 early_console->write(early_console, buf, n);
14810 diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
14811 index 5cab48e..b025f9b 100644
14812 --- a/arch/x86/kernel/efi_32.c
14813 +++ b/arch/x86/kernel/efi_32.c
14814 @@ -38,70 +38,56 @@
14815 */
14816
14817 static unsigned long efi_rt_eflags;
14818 -static pgd_t efi_bak_pg_dir_pointer[2];
14819 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
14820
14821 -void efi_call_phys_prelog(void)
14822 +void __init efi_call_phys_prelog(void)
14823 {
14824 - unsigned long cr4;
14825 - unsigned long temp;
14826 struct desc_ptr gdt_descr;
14827
14828 +#ifdef CONFIG_PAX_KERNEXEC
14829 + struct desc_struct d;
14830 +#endif
14831 +
14832 local_irq_save(efi_rt_eflags);
14833
14834 - /*
14835 - * If I don't have PAE, I should just duplicate two entries in page
14836 - * directory. If I have PAE, I just need to duplicate one entry in
14837 - * page directory.
14838 - */
14839 - cr4 = read_cr4_safe();
14840 -
14841 - if (cr4 & X86_CR4_PAE) {
14842 - efi_bak_pg_dir_pointer[0].pgd =
14843 - swapper_pg_dir[pgd_index(0)].pgd;
14844 - swapper_pg_dir[0].pgd =
14845 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14846 - } else {
14847 - efi_bak_pg_dir_pointer[0].pgd =
14848 - swapper_pg_dir[pgd_index(0)].pgd;
14849 - efi_bak_pg_dir_pointer[1].pgd =
14850 - swapper_pg_dir[pgd_index(0x400000)].pgd;
14851 - swapper_pg_dir[pgd_index(0)].pgd =
14852 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14853 - temp = PAGE_OFFSET + 0x400000;
14854 - swapper_pg_dir[pgd_index(0x400000)].pgd =
14855 - swapper_pg_dir[pgd_index(temp)].pgd;
14856 - }
14857 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
14858 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14859 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
14860
14861 /*
14862 * After the lock is released, the original page table is restored.
14863 */
14864 __flush_tlb_all();
14865
14866 +#ifdef CONFIG_PAX_KERNEXEC
14867 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
14868 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14869 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
14870 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14871 +#endif
14872 +
14873 gdt_descr.address = __pa(get_cpu_gdt_table(0));
14874 gdt_descr.size = GDT_SIZE - 1;
14875 load_gdt(&gdt_descr);
14876 }
14877
14878 -void efi_call_phys_epilog(void)
14879 +void __init efi_call_phys_epilog(void)
14880 {
14881 - unsigned long cr4;
14882 struct desc_ptr gdt_descr;
14883
14884 +#ifdef CONFIG_PAX_KERNEXEC
14885 + struct desc_struct d;
14886 +
14887 + memset(&d, 0, sizeof d);
14888 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14889 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14890 +#endif
14891 +
14892 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
14893 gdt_descr.size = GDT_SIZE - 1;
14894 load_gdt(&gdt_descr);
14895
14896 - cr4 = read_cr4_safe();
14897 -
14898 - if (cr4 & X86_CR4_PAE) {
14899 - swapper_pg_dir[pgd_index(0)].pgd =
14900 - efi_bak_pg_dir_pointer[0].pgd;
14901 - } else {
14902 - swapper_pg_dir[pgd_index(0)].pgd =
14903 - efi_bak_pg_dir_pointer[0].pgd;
14904 - swapper_pg_dir[pgd_index(0x400000)].pgd =
14905 - efi_bak_pg_dir_pointer[1].pgd;
14906 - }
14907 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
14908
14909 /*
14910 * After the lock is released, the original page table is restored.
14911 diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
14912 index fbe66e6..c5c0dd2 100644
14913 --- a/arch/x86/kernel/efi_stub_32.S
14914 +++ b/arch/x86/kernel/efi_stub_32.S
14915 @@ -6,7 +6,9 @@
14916 */
14917
14918 #include <linux/linkage.h>
14919 +#include <linux/init.h>
14920 #include <asm/page_types.h>
14921 +#include <asm/segment.h>
14922
14923 /*
14924 * efi_call_phys(void *, ...) is a function with variable parameters.
14925 @@ -20,7 +22,7 @@
14926 * service functions will comply with gcc calling convention, too.
14927 */
14928
14929 -.text
14930 +__INIT
14931 ENTRY(efi_call_phys)
14932 /*
14933 * 0. The function can only be called in Linux kernel. So CS has been
14934 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
14935 * The mapping of lower virtual memory has been created in prelog and
14936 * epilog.
14937 */
14938 - movl $1f, %edx
14939 - subl $__PAGE_OFFSET, %edx
14940 - jmp *%edx
14941 + movl $(__KERNEXEC_EFI_DS), %edx
14942 + mov %edx, %ds
14943 + mov %edx, %es
14944 + mov %edx, %ss
14945 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
14946 1:
14947
14948 /*
14949 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
14950 * parameter 2, ..., param n. To make things easy, we save the return
14951 * address of efi_call_phys in a global variable.
14952 */
14953 - popl %edx
14954 - movl %edx, saved_return_addr
14955 - /* get the function pointer into ECX*/
14956 - popl %ecx
14957 - movl %ecx, efi_rt_function_ptr
14958 - movl $2f, %edx
14959 - subl $__PAGE_OFFSET, %edx
14960 - pushl %edx
14961 + popl (saved_return_addr)
14962 + popl (efi_rt_function_ptr)
14963
14964 /*
14965 * 3. Clear PG bit in %CR0.
14966 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
14967 /*
14968 * 5. Call the physical function.
14969 */
14970 - jmp *%ecx
14971 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
14972
14973 -2:
14974 /*
14975 * 6. After EFI runtime service returns, control will return to
14976 * following instruction. We'd better readjust stack pointer first.
14977 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
14978 movl %cr0, %edx
14979 orl $0x80000000, %edx
14980 movl %edx, %cr0
14981 - jmp 1f
14982 -1:
14983 +
14984 /*
14985 * 8. Now restore the virtual mode from flat mode by
14986 * adding EIP with PAGE_OFFSET.
14987 */
14988 - movl $1f, %edx
14989 - jmp *%edx
14990 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
14991 1:
14992 + movl $(__KERNEL_DS), %edx
14993 + mov %edx, %ds
14994 + mov %edx, %es
14995 + mov %edx, %ss
14996
14997 /*
14998 * 9. Balance the stack. And because EAX contain the return value,
14999 * we'd better not clobber it.
15000 */
15001 - leal efi_rt_function_ptr, %edx
15002 - movl (%edx), %ecx
15003 - pushl %ecx
15004 + pushl (efi_rt_function_ptr)
15005
15006 /*
15007 - * 10. Push the saved return address onto the stack and return.
15008 + * 10. Return to the saved return address.
15009 */
15010 - leal saved_return_addr, %edx
15011 - movl (%edx), %ecx
15012 - pushl %ecx
15013 - ret
15014 + jmpl *(saved_return_addr)
15015 ENDPROC(efi_call_phys)
15016 .previous
15017
15018 -.data
15019 +__INITDATA
15020 saved_return_addr:
15021 .long 0
15022 efi_rt_function_ptr:
15023 diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
15024 index 4c07cca..2c8427d 100644
15025 --- a/arch/x86/kernel/efi_stub_64.S
15026 +++ b/arch/x86/kernel/efi_stub_64.S
15027 @@ -7,6 +7,7 @@
15028 */
15029
15030 #include <linux/linkage.h>
15031 +#include <asm/alternative-asm.h>
15032
15033 #define SAVE_XMM \
15034 mov %rsp, %rax; \
15035 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
15036 call *%rdi
15037 addq $32, %rsp
15038 RESTORE_XMM
15039 + pax_force_retaddr 0, 1
15040 ret
15041 ENDPROC(efi_call0)
15042
15043 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
15044 call *%rdi
15045 addq $32, %rsp
15046 RESTORE_XMM
15047 + pax_force_retaddr 0, 1
15048 ret
15049 ENDPROC(efi_call1)
15050
15051 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
15052 call *%rdi
15053 addq $32, %rsp
15054 RESTORE_XMM
15055 + pax_force_retaddr 0, 1
15056 ret
15057 ENDPROC(efi_call2)
15058
15059 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
15060 call *%rdi
15061 addq $32, %rsp
15062 RESTORE_XMM
15063 + pax_force_retaddr 0, 1
15064 ret
15065 ENDPROC(efi_call3)
15066
15067 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
15068 call *%rdi
15069 addq $32, %rsp
15070 RESTORE_XMM
15071 + pax_force_retaddr 0, 1
15072 ret
15073 ENDPROC(efi_call4)
15074
15075 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
15076 call *%rdi
15077 addq $48, %rsp
15078 RESTORE_XMM
15079 + pax_force_retaddr 0, 1
15080 ret
15081 ENDPROC(efi_call5)
15082
15083 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
15084 call *%rdi
15085 addq $48, %rsp
15086 RESTORE_XMM
15087 + pax_force_retaddr 0, 1
15088 ret
15089 ENDPROC(efi_call6)
15090 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
15091 index c097e7d..c689cf4 100644
15092 --- a/arch/x86/kernel/entry_32.S
15093 +++ b/arch/x86/kernel/entry_32.S
15094 @@ -185,13 +185,146 @@
15095 /*CFI_REL_OFFSET gs, PT_GS*/
15096 .endm
15097 .macro SET_KERNEL_GS reg
15098 +
15099 +#ifdef CONFIG_CC_STACKPROTECTOR
15100 movl $(__KERNEL_STACK_CANARY), \reg
15101 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
15102 + movl $(__USER_DS), \reg
15103 +#else
15104 + xorl \reg, \reg
15105 +#endif
15106 +
15107 movl \reg, %gs
15108 .endm
15109
15110 #endif /* CONFIG_X86_32_LAZY_GS */
15111
15112 -.macro SAVE_ALL
15113 +.macro pax_enter_kernel
15114 +#ifdef CONFIG_PAX_KERNEXEC
15115 + call pax_enter_kernel
15116 +#endif
15117 +.endm
15118 +
15119 +.macro pax_exit_kernel
15120 +#ifdef CONFIG_PAX_KERNEXEC
15121 + call pax_exit_kernel
15122 +#endif
15123 +.endm
15124 +
15125 +#ifdef CONFIG_PAX_KERNEXEC
15126 +ENTRY(pax_enter_kernel)
15127 +#ifdef CONFIG_PARAVIRT
15128 + pushl %eax
15129 + pushl %ecx
15130 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
15131 + mov %eax, %esi
15132 +#else
15133 + mov %cr0, %esi
15134 +#endif
15135 + bts $16, %esi
15136 + jnc 1f
15137 + mov %cs, %esi
15138 + cmp $__KERNEL_CS, %esi
15139 + jz 3f
15140 + ljmp $__KERNEL_CS, $3f
15141 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
15142 +2:
15143 +#ifdef CONFIG_PARAVIRT
15144 + mov %esi, %eax
15145 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
15146 +#else
15147 + mov %esi, %cr0
15148 +#endif
15149 +3:
15150 +#ifdef CONFIG_PARAVIRT
15151 + popl %ecx
15152 + popl %eax
15153 +#endif
15154 + ret
15155 +ENDPROC(pax_enter_kernel)
15156 +
15157 +ENTRY(pax_exit_kernel)
15158 +#ifdef CONFIG_PARAVIRT
15159 + pushl %eax
15160 + pushl %ecx
15161 +#endif
15162 + mov %cs, %esi
15163 + cmp $__KERNEXEC_KERNEL_CS, %esi
15164 + jnz 2f
15165 +#ifdef CONFIG_PARAVIRT
15166 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
15167 + mov %eax, %esi
15168 +#else
15169 + mov %cr0, %esi
15170 +#endif
15171 + btr $16, %esi
15172 + ljmp $__KERNEL_CS, $1f
15173 +1:
15174 +#ifdef CONFIG_PARAVIRT
15175 + mov %esi, %eax
15176 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
15177 +#else
15178 + mov %esi, %cr0
15179 +#endif
15180 +2:
15181 +#ifdef CONFIG_PARAVIRT
15182 + popl %ecx
15183 + popl %eax
15184 +#endif
15185 + ret
15186 +ENDPROC(pax_exit_kernel)
15187 +#endif
15188 +
15189 +.macro pax_erase_kstack
15190 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15191 + call pax_erase_kstack
15192 +#endif
15193 +.endm
15194 +
15195 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15196 +/*
15197 + * ebp: thread_info
15198 + * ecx, edx: can be clobbered
15199 + */
15200 +ENTRY(pax_erase_kstack)
15201 + pushl %edi
15202 + pushl %eax
15203 +
15204 + mov TI_lowest_stack(%ebp), %edi
15205 + mov $-0xBEEF, %eax
15206 + std
15207 +
15208 +1: mov %edi, %ecx
15209 + and $THREAD_SIZE_asm - 1, %ecx
15210 + shr $2, %ecx
15211 + repne scasl
15212 + jecxz 2f
15213 +
15214 + cmp $2*16, %ecx
15215 + jc 2f
15216 +
15217 + mov $2*16, %ecx
15218 + repe scasl
15219 + jecxz 2f
15220 + jne 1b
15221 +
15222 +2: cld
15223 + mov %esp, %ecx
15224 + sub %edi, %ecx
15225 + shr $2, %ecx
15226 + rep stosl
15227 +
15228 + mov TI_task_thread_sp0(%ebp), %edi
15229 + sub $128, %edi
15230 + mov %edi, TI_lowest_stack(%ebp)
15231 +
15232 + popl %eax
15233 + popl %edi
15234 + ret
15235 +ENDPROC(pax_erase_kstack)
15236 +#endif
15237 +
15238 +.macro __SAVE_ALL _DS
15239 cld
15240 PUSH_GS
15241 pushl %fs
15242 @@ -224,7 +357,7 @@
15243 pushl %ebx
15244 CFI_ADJUST_CFA_OFFSET 4
15245 CFI_REL_OFFSET ebx, 0
15246 - movl $(__USER_DS), %edx
15247 + movl $\_DS, %edx
15248 movl %edx, %ds
15249 movl %edx, %es
15250 movl $(__KERNEL_PERCPU), %edx
15251 @@ -232,6 +365,15 @@
15252 SET_KERNEL_GS %edx
15253 .endm
15254
15255 +.macro SAVE_ALL
15256 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15257 + __SAVE_ALL __KERNEL_DS
15258 + pax_enter_kernel
15259 +#else
15260 + __SAVE_ALL __USER_DS
15261 +#endif
15262 +.endm
15263 +
15264 .macro RESTORE_INT_REGS
15265 popl %ebx
15266 CFI_ADJUST_CFA_OFFSET -4
15267 @@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
15268 CFI_ADJUST_CFA_OFFSET -4
15269 jmp syscall_exit
15270 CFI_ENDPROC
15271 -END(ret_from_fork)
15272 +ENDPROC(ret_from_fork)
15273
15274 /*
15275 * Return to user mode is not as complex as all this looks,
15276 @@ -352,7 +494,15 @@ check_userspace:
15277 movb PT_CS(%esp), %al
15278 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
15279 cmpl $USER_RPL, %eax
15280 +
15281 +#ifdef CONFIG_PAX_KERNEXEC
15282 + jae resume_userspace
15283 +
15284 + PAX_EXIT_KERNEL
15285 + jmp resume_kernel
15286 +#else
15287 jb resume_kernel # not returning to v8086 or userspace
15288 +#endif
15289
15290 ENTRY(resume_userspace)
15291 LOCKDEP_SYS_EXIT
15292 @@ -364,8 +514,8 @@ ENTRY(resume_userspace)
15293 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15294 # int/exception return?
15295 jne work_pending
15296 - jmp restore_all
15297 -END(ret_from_exception)
15298 + jmp restore_all_pax
15299 +ENDPROC(ret_from_exception)
15300
15301 #ifdef CONFIG_PREEMPT
15302 ENTRY(resume_kernel)
15303 @@ -380,7 +530,7 @@ need_resched:
15304 jz restore_all
15305 call preempt_schedule_irq
15306 jmp need_resched
15307 -END(resume_kernel)
15308 +ENDPROC(resume_kernel)
15309 #endif
15310 CFI_ENDPROC
15311
15312 @@ -414,25 +564,36 @@ sysenter_past_esp:
15313 /*CFI_REL_OFFSET cs, 0*/
15314 /*
15315 * Push current_thread_info()->sysenter_return to the stack.
15316 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15317 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
15318 */
15319 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
15320 + pushl $0
15321 CFI_ADJUST_CFA_OFFSET 4
15322 CFI_REL_OFFSET eip, 0
15323
15324 pushl %eax
15325 CFI_ADJUST_CFA_OFFSET 4
15326 SAVE_ALL
15327 + GET_THREAD_INFO(%ebp)
15328 + movl TI_sysenter_return(%ebp),%ebp
15329 + movl %ebp,PT_EIP(%esp)
15330 ENABLE_INTERRUPTS(CLBR_NONE)
15331
15332 /*
15333 * Load the potential sixth argument from user stack.
15334 * Careful about security.
15335 */
15336 + movl PT_OLDESP(%esp),%ebp
15337 +
15338 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15339 + mov PT_OLDSS(%esp),%ds
15340 +1: movl %ds:(%ebp),%ebp
15341 + push %ss
15342 + pop %ds
15343 +#else
15344 cmpl $__PAGE_OFFSET-3,%ebp
15345 jae syscall_fault
15346 1: movl (%ebp),%ebp
15347 +#endif
15348 +
15349 movl %ebp,PT_EBP(%esp)
15350 .section __ex_table,"a"
15351 .align 4
15352 @@ -455,12 +616,24 @@ sysenter_do_call:
15353 testl $_TIF_ALLWORK_MASK, %ecx
15354 jne sysexit_audit
15355 sysenter_exit:
15356 +
15357 +#ifdef CONFIG_PAX_RANDKSTACK
15358 + pushl_cfi %eax
15359 + movl %esp, %eax
15360 + call pax_randomize_kstack
15361 + popl_cfi %eax
15362 +#endif
15363 +
15364 + pax_erase_kstack
15365 +
15366 /* if something modifies registers it must also disable sysexit */
15367 movl PT_EIP(%esp), %edx
15368 movl PT_OLDESP(%esp), %ecx
15369 xorl %ebp,%ebp
15370 TRACE_IRQS_ON
15371 1: mov PT_FS(%esp), %fs
15372 +2: mov PT_DS(%esp), %ds
15373 +3: mov PT_ES(%esp), %es
15374 PTGS_TO_GS
15375 ENABLE_INTERRUPTS_SYSEXIT
15376
15377 @@ -477,6 +650,9 @@ sysenter_audit:
15378 movl %eax,%edx /* 2nd arg: syscall number */
15379 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15380 call audit_syscall_entry
15381 +
15382 + pax_erase_kstack
15383 +
15384 pushl %ebx
15385 CFI_ADJUST_CFA_OFFSET 4
15386 movl PT_EAX(%esp),%eax /* reload syscall number */
15387 @@ -504,11 +680,17 @@ sysexit_audit:
15388
15389 CFI_ENDPROC
15390 .pushsection .fixup,"ax"
15391 -2: movl $0,PT_FS(%esp)
15392 +4: movl $0,PT_FS(%esp)
15393 + jmp 1b
15394 +5: movl $0,PT_DS(%esp)
15395 + jmp 1b
15396 +6: movl $0,PT_ES(%esp)
15397 jmp 1b
15398 .section __ex_table,"a"
15399 .align 4
15400 - .long 1b,2b
15401 + .long 1b,4b
15402 + .long 2b,5b
15403 + .long 3b,6b
15404 .popsection
15405 PTGS_TO_GS_EX
15406 ENDPROC(ia32_sysenter_target)
15407 @@ -538,6 +720,15 @@ syscall_exit:
15408 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15409 jne syscall_exit_work
15410
15411 +restore_all_pax:
15412 +
15413 +#ifdef CONFIG_PAX_RANDKSTACK
15414 + movl %esp, %eax
15415 + call pax_randomize_kstack
15416 +#endif
15417 +
15418 + pax_erase_kstack
15419 +
15420 restore_all:
15421 TRACE_IRQS_IRET
15422 restore_all_notrace:
15423 @@ -602,10 +793,29 @@ ldt_ss:
15424 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15425 mov %dx, %ax /* eax: new kernel esp */
15426 sub %eax, %edx /* offset (low word is 0) */
15427 - PER_CPU(gdt_page, %ebx)
15428 +#ifdef CONFIG_SMP
15429 + movl PER_CPU_VAR(cpu_number), %ebx
15430 + shll $PAGE_SHIFT_asm, %ebx
15431 + addl $cpu_gdt_table, %ebx
15432 +#else
15433 + movl $cpu_gdt_table, %ebx
15434 +#endif
15435 shr $16, %edx
15436 +
15437 +#ifdef CONFIG_PAX_KERNEXEC
15438 + mov %cr0, %esi
15439 + btr $16, %esi
15440 + mov %esi, %cr0
15441 +#endif
15442 +
15443 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
15444 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
15445 +
15446 +#ifdef CONFIG_PAX_KERNEXEC
15447 + bts $16, %esi
15448 + mov %esi, %cr0
15449 +#endif
15450 +
15451 pushl $__ESPFIX_SS
15452 CFI_ADJUST_CFA_OFFSET 4
15453 push %eax /* new kernel esp */
15454 @@ -636,36 +846,30 @@ work_resched:
15455 movl TI_flags(%ebp), %ecx
15456 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15457 # than syscall tracing?
15458 - jz restore_all
15459 + jz restore_all_pax
15460 testb $_TIF_NEED_RESCHED, %cl
15461 jnz work_resched
15462
15463 work_notifysig: # deal with pending signals and
15464 # notify-resume requests
15465 + movl %esp, %eax
15466 #ifdef CONFIG_VM86
15467 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15468 - movl %esp, %eax
15469 - jne work_notifysig_v86 # returning to kernel-space or
15470 + jz 1f # returning to kernel-space or
15471 # vm86-space
15472 - xorl %edx, %edx
15473 - call do_notify_resume
15474 - jmp resume_userspace_sig
15475
15476 - ALIGN
15477 -work_notifysig_v86:
15478 pushl %ecx # save ti_flags for do_notify_resume
15479 CFI_ADJUST_CFA_OFFSET 4
15480 call save_v86_state # %eax contains pt_regs pointer
15481 popl %ecx
15482 CFI_ADJUST_CFA_OFFSET -4
15483 movl %eax, %esp
15484 -#else
15485 - movl %esp, %eax
15486 +1:
15487 #endif
15488 xorl %edx, %edx
15489 call do_notify_resume
15490 jmp resume_userspace_sig
15491 -END(work_pending)
15492 +ENDPROC(work_pending)
15493
15494 # perform syscall exit tracing
15495 ALIGN
15496 @@ -673,11 +877,14 @@ syscall_trace_entry:
15497 movl $-ENOSYS,PT_EAX(%esp)
15498 movl %esp, %eax
15499 call syscall_trace_enter
15500 +
15501 + pax_erase_kstack
15502 +
15503 /* What it returned is what we'll actually use. */
15504 cmpl $(nr_syscalls), %eax
15505 jnae syscall_call
15506 jmp syscall_exit
15507 -END(syscall_trace_entry)
15508 +ENDPROC(syscall_trace_entry)
15509
15510 # perform syscall exit tracing
15511 ALIGN
15512 @@ -690,20 +897,24 @@ syscall_exit_work:
15513 movl %esp, %eax
15514 call syscall_trace_leave
15515 jmp resume_userspace
15516 -END(syscall_exit_work)
15517 +ENDPROC(syscall_exit_work)
15518 CFI_ENDPROC
15519
15520 RING0_INT_FRAME # can't unwind into user space anyway
15521 syscall_fault:
15522 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15523 + push %ss
15524 + pop %ds
15525 +#endif
15526 GET_THREAD_INFO(%ebp)
15527 movl $-EFAULT,PT_EAX(%esp)
15528 jmp resume_userspace
15529 -END(syscall_fault)
15530 +ENDPROC(syscall_fault)
15531
15532 syscall_badsys:
15533 movl $-ENOSYS,PT_EAX(%esp)
15534 jmp resume_userspace
15535 -END(syscall_badsys)
15536 +ENDPROC(syscall_badsys)
15537 CFI_ENDPROC
15538
15539 /*
15540 @@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
15541 PTREGSCALL(vm86)
15542 PTREGSCALL(vm86old)
15543
15544 + ALIGN;
15545 +ENTRY(kernel_execve)
15546 + push %ebp
15547 + sub $PT_OLDSS+4,%esp
15548 + push %edi
15549 + push %ecx
15550 + push %eax
15551 + lea 3*4(%esp),%edi
15552 + mov $PT_OLDSS/4+1,%ecx
15553 + xorl %eax,%eax
15554 + rep stosl
15555 + pop %eax
15556 + pop %ecx
15557 + pop %edi
15558 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15559 + mov %eax,PT_EBX(%esp)
15560 + mov %edx,PT_ECX(%esp)
15561 + mov %ecx,PT_EDX(%esp)
15562 + mov %esp,%eax
15563 + call sys_execve
15564 + GET_THREAD_INFO(%ebp)
15565 + test %eax,%eax
15566 + jz syscall_exit
15567 + add $PT_OLDSS+4,%esp
15568 + pop %ebp
15569 + ret
15570 +
15571 .macro FIXUP_ESPFIX_STACK
15572 /*
15573 * Switch back for ESPFIX stack to the normal zerobased stack
15574 @@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
15575 * normal stack and adjusts ESP with the matching offset.
15576 */
15577 /* fixup the stack */
15578 - PER_CPU(gdt_page, %ebx)
15579 +#ifdef CONFIG_SMP
15580 + movl PER_CPU_VAR(cpu_number), %ebx
15581 + shll $PAGE_SHIFT_asm, %ebx
15582 + addl $cpu_gdt_table, %ebx
15583 +#else
15584 + movl $cpu_gdt_table, %ebx
15585 +#endif
15586 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
15587 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
15588 shl $16, %eax
15589 @@ -793,7 +1037,7 @@ vector=vector+1
15590 .endr
15591 2: jmp common_interrupt
15592 .endr
15593 -END(irq_entries_start)
15594 +ENDPROC(irq_entries_start)
15595
15596 .previous
15597 END(interrupt)
15598 @@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
15599 CFI_ADJUST_CFA_OFFSET 4
15600 jmp error_code
15601 CFI_ENDPROC
15602 -END(coprocessor_error)
15603 +ENDPROC(coprocessor_error)
15604
15605 ENTRY(simd_coprocessor_error)
15606 RING0_INT_FRAME
15607 @@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
15608 CFI_ADJUST_CFA_OFFSET 4
15609 jmp error_code
15610 CFI_ENDPROC
15611 -END(simd_coprocessor_error)
15612 +ENDPROC(simd_coprocessor_error)
15613
15614 ENTRY(device_not_available)
15615 RING0_INT_FRAME
15616 @@ -860,7 +1104,7 @@ ENTRY(device_not_available)
15617 CFI_ADJUST_CFA_OFFSET 4
15618 jmp error_code
15619 CFI_ENDPROC
15620 -END(device_not_available)
15621 +ENDPROC(device_not_available)
15622
15623 #ifdef CONFIG_PARAVIRT
15624 ENTRY(native_iret)
15625 @@ -869,12 +1113,12 @@ ENTRY(native_iret)
15626 .align 4
15627 .long native_iret, iret_exc
15628 .previous
15629 -END(native_iret)
15630 +ENDPROC(native_iret)
15631
15632 ENTRY(native_irq_enable_sysexit)
15633 sti
15634 sysexit
15635 -END(native_irq_enable_sysexit)
15636 +ENDPROC(native_irq_enable_sysexit)
15637 #endif
15638
15639 ENTRY(overflow)
15640 @@ -885,7 +1129,7 @@ ENTRY(overflow)
15641 CFI_ADJUST_CFA_OFFSET 4
15642 jmp error_code
15643 CFI_ENDPROC
15644 -END(overflow)
15645 +ENDPROC(overflow)
15646
15647 ENTRY(bounds)
15648 RING0_INT_FRAME
15649 @@ -895,7 +1139,7 @@ ENTRY(bounds)
15650 CFI_ADJUST_CFA_OFFSET 4
15651 jmp error_code
15652 CFI_ENDPROC
15653 -END(bounds)
15654 +ENDPROC(bounds)
15655
15656 ENTRY(invalid_op)
15657 RING0_INT_FRAME
15658 @@ -905,7 +1149,7 @@ ENTRY(invalid_op)
15659 CFI_ADJUST_CFA_OFFSET 4
15660 jmp error_code
15661 CFI_ENDPROC
15662 -END(invalid_op)
15663 +ENDPROC(invalid_op)
15664
15665 ENTRY(coprocessor_segment_overrun)
15666 RING0_INT_FRAME
15667 @@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
15668 CFI_ADJUST_CFA_OFFSET 4
15669 jmp error_code
15670 CFI_ENDPROC
15671 -END(coprocessor_segment_overrun)
15672 +ENDPROC(coprocessor_segment_overrun)
15673
15674 ENTRY(invalid_TSS)
15675 RING0_EC_FRAME
15676 @@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
15677 CFI_ADJUST_CFA_OFFSET 4
15678 jmp error_code
15679 CFI_ENDPROC
15680 -END(invalid_TSS)
15681 +ENDPROC(invalid_TSS)
15682
15683 ENTRY(segment_not_present)
15684 RING0_EC_FRAME
15685 @@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
15686 CFI_ADJUST_CFA_OFFSET 4
15687 jmp error_code
15688 CFI_ENDPROC
15689 -END(segment_not_present)
15690 +ENDPROC(segment_not_present)
15691
15692 ENTRY(stack_segment)
15693 RING0_EC_FRAME
15694 @@ -939,7 +1183,7 @@ ENTRY(stack_segment)
15695 CFI_ADJUST_CFA_OFFSET 4
15696 jmp error_code
15697 CFI_ENDPROC
15698 -END(stack_segment)
15699 +ENDPROC(stack_segment)
15700
15701 ENTRY(alignment_check)
15702 RING0_EC_FRAME
15703 @@ -947,7 +1191,7 @@ ENTRY(alignment_check)
15704 CFI_ADJUST_CFA_OFFSET 4
15705 jmp error_code
15706 CFI_ENDPROC
15707 -END(alignment_check)
15708 +ENDPROC(alignment_check)
15709
15710 ENTRY(divide_error)
15711 RING0_INT_FRAME
15712 @@ -957,7 +1201,7 @@ ENTRY(divide_error)
15713 CFI_ADJUST_CFA_OFFSET 4
15714 jmp error_code
15715 CFI_ENDPROC
15716 -END(divide_error)
15717 +ENDPROC(divide_error)
15718
15719 #ifdef CONFIG_X86_MCE
15720 ENTRY(machine_check)
15721 @@ -968,7 +1212,7 @@ ENTRY(machine_check)
15722 CFI_ADJUST_CFA_OFFSET 4
15723 jmp error_code
15724 CFI_ENDPROC
15725 -END(machine_check)
15726 +ENDPROC(machine_check)
15727 #endif
15728
15729 ENTRY(spurious_interrupt_bug)
15730 @@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
15731 CFI_ADJUST_CFA_OFFSET 4
15732 jmp error_code
15733 CFI_ENDPROC
15734 -END(spurious_interrupt_bug)
15735 +ENDPROC(spurious_interrupt_bug)
15736
15737 ENTRY(kernel_thread_helper)
15738 pushl $0 # fake return address for unwinder
15739 @@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
15740
15741 ENTRY(mcount)
15742 ret
15743 -END(mcount)
15744 +ENDPROC(mcount)
15745
15746 ENTRY(ftrace_caller)
15747 cmpl $0, function_trace_stop
15748 @@ -1124,7 +1368,7 @@ ftrace_graph_call:
15749 .globl ftrace_stub
15750 ftrace_stub:
15751 ret
15752 -END(ftrace_caller)
15753 +ENDPROC(ftrace_caller)
15754
15755 #else /* ! CONFIG_DYNAMIC_FTRACE */
15756
15757 @@ -1160,7 +1404,7 @@ trace:
15758 popl %ecx
15759 popl %eax
15760 jmp ftrace_stub
15761 -END(mcount)
15762 +ENDPROC(mcount)
15763 #endif /* CONFIG_DYNAMIC_FTRACE */
15764 #endif /* CONFIG_FUNCTION_TRACER */
15765
15766 @@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
15767 popl %ecx
15768 popl %eax
15769 ret
15770 -END(ftrace_graph_caller)
15771 +ENDPROC(ftrace_graph_caller)
15772
15773 .globl return_to_handler
15774 return_to_handler:
15775 @@ -1198,7 +1442,6 @@ return_to_handler:
15776 ret
15777 #endif
15778
15779 -.section .rodata,"a"
15780 #include "syscall_table_32.S"
15781
15782 syscall_table_size=(.-sys_call_table)
15783 @@ -1255,15 +1498,18 @@ error_code:
15784 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15785 REG_TO_PTGS %ecx
15786 SET_KERNEL_GS %ecx
15787 - movl $(__USER_DS), %ecx
15788 + movl $(__KERNEL_DS), %ecx
15789 movl %ecx, %ds
15790 movl %ecx, %es
15791 +
15792 + pax_enter_kernel
15793 +
15794 TRACE_IRQS_OFF
15795 movl %esp,%eax # pt_regs pointer
15796 call *%edi
15797 jmp ret_from_exception
15798 CFI_ENDPROC
15799 -END(page_fault)
15800 +ENDPROC(page_fault)
15801
15802 /*
15803 * Debug traps and NMI can happen at the one SYSENTER instruction
15804 @@ -1309,7 +1555,7 @@ debug_stack_correct:
15805 call do_debug
15806 jmp ret_from_exception
15807 CFI_ENDPROC
15808 -END(debug)
15809 +ENDPROC(debug)
15810
15811 /*
15812 * NMI is doubly nasty. It can happen _while_ we're handling
15813 @@ -1351,6 +1597,9 @@ nmi_stack_correct:
15814 xorl %edx,%edx # zero error code
15815 movl %esp,%eax # pt_regs pointer
15816 call do_nmi
15817 +
15818 + pax_exit_kernel
15819 +
15820 jmp restore_all_notrace
15821 CFI_ENDPROC
15822
15823 @@ -1391,12 +1640,15 @@ nmi_espfix_stack:
15824 FIXUP_ESPFIX_STACK # %eax == %esp
15825 xorl %edx,%edx # zero error code
15826 call do_nmi
15827 +
15828 + pax_exit_kernel
15829 +
15830 RESTORE_REGS
15831 lss 12+4(%esp), %esp # back to espfix stack
15832 CFI_ADJUST_CFA_OFFSET -24
15833 jmp irq_return
15834 CFI_ENDPROC
15835 -END(nmi)
15836 +ENDPROC(nmi)
15837
15838 ENTRY(int3)
15839 RING0_INT_FRAME
15840 @@ -1409,7 +1661,7 @@ ENTRY(int3)
15841 call do_int3
15842 jmp ret_from_exception
15843 CFI_ENDPROC
15844 -END(int3)
15845 +ENDPROC(int3)
15846
15847 ENTRY(general_protection)
15848 RING0_EC_FRAME
15849 @@ -1417,7 +1669,7 @@ ENTRY(general_protection)
15850 CFI_ADJUST_CFA_OFFSET 4
15851 jmp error_code
15852 CFI_ENDPROC
15853 -END(general_protection)
15854 +ENDPROC(general_protection)
15855
15856 /*
15857 * End of kprobes section
15858 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15859 index 34a56a9..87790b4 100644
15860 --- a/arch/x86/kernel/entry_64.S
15861 +++ b/arch/x86/kernel/entry_64.S
15862 @@ -53,6 +53,8 @@
15863 #include <asm/paravirt.h>
15864 #include <asm/ftrace.h>
15865 #include <asm/percpu.h>
15866 +#include <asm/pgtable.h>
15867 +#include <asm/alternative-asm.h>
15868
15869 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15870 #include <linux/elf-em.h>
15871 @@ -64,8 +66,9 @@
15872 #ifdef CONFIG_FUNCTION_TRACER
15873 #ifdef CONFIG_DYNAMIC_FTRACE
15874 ENTRY(mcount)
15875 + pax_force_retaddr
15876 retq
15877 -END(mcount)
15878 +ENDPROC(mcount)
15879
15880 ENTRY(ftrace_caller)
15881 cmpl $0, function_trace_stop
15882 @@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
15883 #endif
15884
15885 GLOBAL(ftrace_stub)
15886 + pax_force_retaddr
15887 retq
15888 -END(ftrace_caller)
15889 +ENDPROC(ftrace_caller)
15890
15891 #else /* ! CONFIG_DYNAMIC_FTRACE */
15892 ENTRY(mcount)
15893 @@ -108,6 +112,7 @@ ENTRY(mcount)
15894 #endif
15895
15896 GLOBAL(ftrace_stub)
15897 + pax_force_retaddr
15898 retq
15899
15900 trace:
15901 @@ -117,12 +122,13 @@ trace:
15902 movq 8(%rbp), %rsi
15903 subq $MCOUNT_INSN_SIZE, %rdi
15904
15905 + pax_force_fptr ftrace_trace_function
15906 call *ftrace_trace_function
15907
15908 MCOUNT_RESTORE_FRAME
15909
15910 jmp ftrace_stub
15911 -END(mcount)
15912 +ENDPROC(mcount)
15913 #endif /* CONFIG_DYNAMIC_FTRACE */
15914 #endif /* CONFIG_FUNCTION_TRACER */
15915
15916 @@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
15917
15918 MCOUNT_RESTORE_FRAME
15919
15920 + pax_force_retaddr
15921 retq
15922 -END(ftrace_graph_caller)
15923 +ENDPROC(ftrace_graph_caller)
15924
15925 GLOBAL(return_to_handler)
15926 subq $24, %rsp
15927 @@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
15928 movq 8(%rsp), %rdx
15929 movq (%rsp), %rax
15930 addq $16, %rsp
15931 + pax_force_retaddr
15932 retq
15933 #endif
15934
15935 @@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
15936 ENDPROC(native_usergs_sysret64)
15937 #endif /* CONFIG_PARAVIRT */
15938
15939 + .macro ljmpq sel, off
15940 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15941 + .byte 0x48; ljmp *1234f(%rip)
15942 + .pushsection .rodata
15943 + .align 16
15944 + 1234: .quad \off; .word \sel
15945 + .popsection
15946 +#else
15947 + pushq $\sel
15948 + pushq $\off
15949 + lretq
15950 +#endif
15951 + .endm
15952 +
15953 + .macro pax_enter_kernel
15954 + pax_set_fptr_mask
15955 +#ifdef CONFIG_PAX_KERNEXEC
15956 + call pax_enter_kernel
15957 +#endif
15958 + .endm
15959 +
15960 + .macro pax_exit_kernel
15961 +#ifdef CONFIG_PAX_KERNEXEC
15962 + call pax_exit_kernel
15963 +#endif
15964 + .endm
15965 +
15966 +#ifdef CONFIG_PAX_KERNEXEC
15967 +ENTRY(pax_enter_kernel)
15968 + pushq %rdi
15969 +
15970 +#ifdef CONFIG_PARAVIRT
15971 + PV_SAVE_REGS(CLBR_RDI)
15972 +#endif
15973 +
15974 + GET_CR0_INTO_RDI
15975 + bts $16,%rdi
15976 + jnc 3f
15977 + mov %cs,%edi
15978 + cmp $__KERNEL_CS,%edi
15979 + jnz 2f
15980 +1:
15981 +
15982 +#ifdef CONFIG_PARAVIRT
15983 + PV_RESTORE_REGS(CLBR_RDI)
15984 +#endif
15985 +
15986 + popq %rdi
15987 + pax_force_retaddr
15988 + retq
15989 +
15990 +2: ljmpq __KERNEL_CS,1f
15991 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15992 +4: SET_RDI_INTO_CR0
15993 + jmp 1b
15994 +ENDPROC(pax_enter_kernel)
15995 +
15996 +ENTRY(pax_exit_kernel)
15997 + pushq %rdi
15998 +
15999 +#ifdef CONFIG_PARAVIRT
16000 + PV_SAVE_REGS(CLBR_RDI)
16001 +#endif
16002 +
16003 + mov %cs,%rdi
16004 + cmp $__KERNEXEC_KERNEL_CS,%edi
16005 + jz 2f
16006 +1:
16007 +
16008 +#ifdef CONFIG_PARAVIRT
16009 + PV_RESTORE_REGS(CLBR_RDI);
16010 +#endif
16011 +
16012 + popq %rdi
16013 + pax_force_retaddr
16014 + retq
16015 +
16016 +2: GET_CR0_INTO_RDI
16017 + btr $16,%rdi
16018 + ljmpq __KERNEL_CS,3f
16019 +3: SET_RDI_INTO_CR0
16020 + jmp 1b
16021 +#ifdef CONFIG_PARAVIRT
16022 + PV_RESTORE_REGS(CLBR_RDI);
16023 +#endif
16024 +
16025 + popq %rdi
16026 + pax_force_retaddr
16027 + retq
16028 +ENDPROC(pax_exit_kernel)
16029 +#endif
16030 +
16031 + .macro pax_enter_kernel_user
16032 + pax_set_fptr_mask
16033 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16034 + call pax_enter_kernel_user
16035 +#endif
16036 + .endm
16037 +
16038 + .macro pax_exit_kernel_user
16039 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16040 + call pax_exit_kernel_user
16041 +#endif
16042 +#ifdef CONFIG_PAX_RANDKSTACK
16043 + pushq %rax
16044 + call pax_randomize_kstack
16045 + popq %rax
16046 +#endif
16047 + .endm
16048 +
16049 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16050 +ENTRY(pax_enter_kernel_user)
16051 + pushq %rdi
16052 + pushq %rbx
16053 +
16054 +#ifdef CONFIG_PARAVIRT
16055 + PV_SAVE_REGS(CLBR_RDI)
16056 +#endif
16057 +
16058 + GET_CR3_INTO_RDI
16059 + mov %rdi,%rbx
16060 + add $__START_KERNEL_map,%rbx
16061 + sub phys_base(%rip),%rbx
16062 +
16063 +#ifdef CONFIG_PARAVIRT
16064 + pushq %rdi
16065 + cmpl $0, pv_info+PARAVIRT_enabled
16066 + jz 1f
16067 + i = 0
16068 + .rept USER_PGD_PTRS
16069 + mov i*8(%rbx),%rsi
16070 + mov $0,%sil
16071 + lea i*8(%rbx),%rdi
16072 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16073 + i = i + 1
16074 + .endr
16075 + jmp 2f
16076 +1:
16077 +#endif
16078 +
16079 + i = 0
16080 + .rept USER_PGD_PTRS
16081 + movb $0,i*8(%rbx)
16082 + i = i + 1
16083 + .endr
16084 +
16085 +#ifdef CONFIG_PARAVIRT
16086 +2: popq %rdi
16087 +#endif
16088 + SET_RDI_INTO_CR3
16089 +
16090 +#ifdef CONFIG_PAX_KERNEXEC
16091 + GET_CR0_INTO_RDI
16092 + bts $16,%rdi
16093 + SET_RDI_INTO_CR0
16094 +#endif
16095 +
16096 +#ifdef CONFIG_PARAVIRT
16097 + PV_RESTORE_REGS(CLBR_RDI)
16098 +#endif
16099 +
16100 + popq %rbx
16101 + popq %rdi
16102 + pax_force_retaddr
16103 + retq
16104 +ENDPROC(pax_enter_kernel_user)
16105 +
16106 +ENTRY(pax_exit_kernel_user)
16107 + push %rdi
16108 +
16109 +#ifdef CONFIG_PARAVIRT
16110 + pushq %rbx
16111 + PV_SAVE_REGS(CLBR_RDI)
16112 +#endif
16113 +
16114 +#ifdef CONFIG_PAX_KERNEXEC
16115 + GET_CR0_INTO_RDI
16116 + btr $16,%rdi
16117 + SET_RDI_INTO_CR0
16118 +#endif
16119 +
16120 + GET_CR3_INTO_RDI
16121 + add $__START_KERNEL_map,%rdi
16122 + sub phys_base(%rip),%rdi
16123 +
16124 +#ifdef CONFIG_PARAVIRT
16125 + cmpl $0, pv_info+PARAVIRT_enabled
16126 + jz 1f
16127 + mov %rdi,%rbx
16128 + i = 0
16129 + .rept USER_PGD_PTRS
16130 + mov i*8(%rbx),%rsi
16131 + mov $0x67,%sil
16132 + lea i*8(%rbx),%rdi
16133 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16134 + i = i + 1
16135 + .endr
16136 + jmp 2f
16137 +1:
16138 +#endif
16139 +
16140 + i = 0
16141 + .rept USER_PGD_PTRS
16142 + movb $0x67,i*8(%rdi)
16143 + i = i + 1
16144 + .endr
16145 +
16146 +#ifdef CONFIG_PARAVIRT
16147 +2: PV_RESTORE_REGS(CLBR_RDI)
16148 + popq %rbx
16149 +#endif
16150 +
16151 + popq %rdi
16152 + pax_force_retaddr
16153 + retq
16154 +ENDPROC(pax_exit_kernel_user)
16155 +#endif
16156 +
16157 +.macro pax_erase_kstack
16158 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16159 + call pax_erase_kstack
16160 +#endif
16161 +.endm
16162 +
16163 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16164 +/*
16165 + * r11: thread_info
16166 + * rcx, rdx: can be clobbered
16167 + */
16168 +ENTRY(pax_erase_kstack)
16169 + pushq %rdi
16170 + pushq %rax
16171 + pushq %r11
16172 +
16173 + GET_THREAD_INFO(%r11)
16174 + mov TI_lowest_stack(%r11), %rdi
16175 + mov $-0xBEEF, %rax
16176 + std
16177 +
16178 +1: mov %edi, %ecx
16179 + and $THREAD_SIZE_asm - 1, %ecx
16180 + shr $3, %ecx
16181 + repne scasq
16182 + jecxz 2f
16183 +
16184 + cmp $2*8, %ecx
16185 + jc 2f
16186 +
16187 + mov $2*8, %ecx
16188 + repe scasq
16189 + jecxz 2f
16190 + jne 1b
16191 +
16192 +2: cld
16193 + mov %esp, %ecx
16194 + sub %edi, %ecx
16195 +
16196 + cmp $THREAD_SIZE_asm, %rcx
16197 + jb 3f
16198 + ud2
16199 +3:
16200 +
16201 + shr $3, %ecx
16202 + rep stosq
16203 +
16204 + mov TI_task_thread_sp0(%r11), %rdi
16205 + sub $256, %rdi
16206 + mov %rdi, TI_lowest_stack(%r11)
16207 +
16208 + popq %r11
16209 + popq %rax
16210 + popq %rdi
16211 + pax_force_retaddr
16212 + ret
16213 +ENDPROC(pax_erase_kstack)
16214 +#endif
16215
16216 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
16217 #ifdef CONFIG_TRACE_IRQFLAGS
16218 @@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
16219 .endm
16220
16221 .macro UNFAKE_STACK_FRAME
16222 - addq $8*6, %rsp
16223 - CFI_ADJUST_CFA_OFFSET -(6*8)
16224 + addq $8*6 + ARG_SKIP, %rsp
16225 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16226 .endm
16227
16228 /*
16229 @@ -317,7 +601,7 @@ ENTRY(save_args)
16230 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
16231 movq_cfi rbp, 8 /* push %rbp */
16232 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
16233 - testl $3, CS(%rdi)
16234 + testb $3, CS(%rdi)
16235 je 1f
16236 SWAPGS
16237 /*
16238 @@ -337,9 +621,10 @@ ENTRY(save_args)
16239 * We entered an interrupt context - irqs are off:
16240 */
16241 2: TRACE_IRQS_OFF
16242 + pax_force_retaddr
16243 ret
16244 CFI_ENDPROC
16245 -END(save_args)
16246 +ENDPROC(save_args)
16247
16248 ENTRY(save_rest)
16249 PARTIAL_FRAME 1 REST_SKIP+8
16250 @@ -352,9 +637,10 @@ ENTRY(save_rest)
16251 movq_cfi r15, R15+16
16252 movq %r11, 8(%rsp) /* return address */
16253 FIXUP_TOP_OF_STACK %r11, 16
16254 + pax_force_retaddr
16255 ret
16256 CFI_ENDPROC
16257 -END(save_rest)
16258 +ENDPROC(save_rest)
16259
16260 /* save complete stack frame */
16261 .pushsection .kprobes.text, "ax"
16262 @@ -383,9 +669,10 @@ ENTRY(save_paranoid)
16263 js 1f /* negative -> in kernel */
16264 SWAPGS
16265 xorl %ebx,%ebx
16266 -1: ret
16267 +1: pax_force_retaddr_bts
16268 + ret
16269 CFI_ENDPROC
16270 -END(save_paranoid)
16271 +ENDPROC(save_paranoid)
16272 .popsection
16273
16274 /*
16275 @@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
16276
16277 RESTORE_REST
16278
16279 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16280 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16281 je int_ret_from_sys_call
16282
16283 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16284 @@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
16285 jmp ret_from_sys_call # go to the SYSRET fastpath
16286
16287 CFI_ENDPROC
16288 -END(ret_from_fork)
16289 +ENDPROC(ret_from_fork)
16290
16291 /*
16292 * System call entry. Upto 6 arguments in registers are supported.
16293 @@ -455,7 +742,7 @@ END(ret_from_fork)
16294 ENTRY(system_call)
16295 CFI_STARTPROC simple
16296 CFI_SIGNAL_FRAME
16297 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16298 + CFI_DEF_CFA rsp,0
16299 CFI_REGISTER rip,rcx
16300 /*CFI_REGISTER rflags,r11*/
16301 SWAPGS_UNSAFE_STACK
16302 @@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
16303
16304 movq %rsp,PER_CPU_VAR(old_rsp)
16305 movq PER_CPU_VAR(kernel_stack),%rsp
16306 + SAVE_ARGS 8*6,1
16307 + pax_enter_kernel_user
16308 /*
16309 * No need to follow this irqs off/on section - it's straight
16310 * and short:
16311 */
16312 ENABLE_INTERRUPTS(CLBR_NONE)
16313 - SAVE_ARGS 8,1
16314 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16315 movq %rcx,RIP-ARGOFFSET(%rsp)
16316 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16317 @@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
16318 system_call_fastpath:
16319 cmpq $__NR_syscall_max,%rax
16320 ja badsys
16321 - movq %r10,%rcx
16322 + movq R10-ARGOFFSET(%rsp),%rcx
16323 call *sys_call_table(,%rax,8) # XXX: rip relative
16324 movq %rax,RAX-ARGOFFSET(%rsp)
16325 /*
16326 @@ -502,6 +790,8 @@ sysret_check:
16327 andl %edi,%edx
16328 jnz sysret_careful
16329 CFI_REMEMBER_STATE
16330 + pax_exit_kernel_user
16331 + pax_erase_kstack
16332 /*
16333 * sysretq will re-enable interrupts:
16334 */
16335 @@ -555,14 +845,18 @@ badsys:
16336 * jump back to the normal fast path.
16337 */
16338 auditsys:
16339 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
16340 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16341 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16342 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16343 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16344 movq %rax,%rsi /* 2nd arg: syscall number */
16345 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16346 call audit_syscall_entry
16347 +
16348 + pax_erase_kstack
16349 +
16350 LOAD_ARGS 0 /* reload call-clobbered registers */
16351 + pax_set_fptr_mask
16352 jmp system_call_fastpath
16353
16354 /*
16355 @@ -592,16 +886,20 @@ tracesys:
16356 FIXUP_TOP_OF_STACK %rdi
16357 movq %rsp,%rdi
16358 call syscall_trace_enter
16359 +
16360 + pax_erase_kstack
16361 +
16362 /*
16363 * Reload arg registers from stack in case ptrace changed them.
16364 * We don't reload %rax because syscall_trace_enter() returned
16365 * the value it wants us to use in the table lookup.
16366 */
16367 LOAD_ARGS ARGOFFSET, 1
16368 + pax_set_fptr_mask
16369 RESTORE_REST
16370 cmpq $__NR_syscall_max,%rax
16371 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16372 - movq %r10,%rcx /* fixup for C */
16373 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16374 call *sys_call_table(,%rax,8)
16375 movq %rax,RAX-ARGOFFSET(%rsp)
16376 /* Use IRET because user could have changed frame */
16377 @@ -613,7 +911,7 @@ tracesys:
16378 GLOBAL(int_ret_from_sys_call)
16379 DISABLE_INTERRUPTS(CLBR_NONE)
16380 TRACE_IRQS_OFF
16381 - testl $3,CS-ARGOFFSET(%rsp)
16382 + testb $3,CS-ARGOFFSET(%rsp)
16383 je retint_restore_args
16384 movl $_TIF_ALLWORK_MASK,%edi
16385 /* edi: mask to check */
16386 @@ -624,6 +922,7 @@ GLOBAL(int_with_check)
16387 andl %edi,%edx
16388 jnz int_careful
16389 andl $~TS_COMPAT,TI_status(%rcx)
16390 + pax_erase_kstack
16391 jmp retint_swapgs
16392
16393 /* Either reschedule or signal or syscall exit tracking needed. */
16394 @@ -674,7 +973,7 @@ int_restore_rest:
16395 TRACE_IRQS_OFF
16396 jmp int_with_check
16397 CFI_ENDPROC
16398 -END(system_call)
16399 +ENDPROC(system_call)
16400
16401 /*
16402 * Certain special system calls that need to save a complete full stack frame.
16403 @@ -690,7 +989,7 @@ ENTRY(\label)
16404 call \func
16405 jmp ptregscall_common
16406 CFI_ENDPROC
16407 -END(\label)
16408 +ENDPROC(\label)
16409 .endm
16410
16411 PTREGSCALL stub_clone, sys_clone, %r8
16412 @@ -708,9 +1007,10 @@ ENTRY(ptregscall_common)
16413 movq_cfi_restore R12+8, r12
16414 movq_cfi_restore RBP+8, rbp
16415 movq_cfi_restore RBX+8, rbx
16416 + pax_force_retaddr
16417 ret $REST_SKIP /* pop extended registers */
16418 CFI_ENDPROC
16419 -END(ptregscall_common)
16420 +ENDPROC(ptregscall_common)
16421
16422 ENTRY(stub_execve)
16423 CFI_STARTPROC
16424 @@ -726,7 +1026,7 @@ ENTRY(stub_execve)
16425 RESTORE_REST
16426 jmp int_ret_from_sys_call
16427 CFI_ENDPROC
16428 -END(stub_execve)
16429 +ENDPROC(stub_execve)
16430
16431 /*
16432 * sigreturn is special because it needs to restore all registers on return.
16433 @@ -744,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
16434 RESTORE_REST
16435 jmp int_ret_from_sys_call
16436 CFI_ENDPROC
16437 -END(stub_rt_sigreturn)
16438 +ENDPROC(stub_rt_sigreturn)
16439
16440 /*
16441 * Build the entry stubs and pointer table with some assembler magic.
16442 @@ -780,7 +1080,7 @@ vector=vector+1
16443 2: jmp common_interrupt
16444 .endr
16445 CFI_ENDPROC
16446 -END(irq_entries_start)
16447 +ENDPROC(irq_entries_start)
16448
16449 .previous
16450 END(interrupt)
16451 @@ -800,6 +1100,16 @@ END(interrupt)
16452 CFI_ADJUST_CFA_OFFSET 10*8
16453 call save_args
16454 PARTIAL_FRAME 0
16455 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16456 + testb $3, CS(%rdi)
16457 + jnz 1f
16458 + pax_enter_kernel
16459 + jmp 2f
16460 +1: pax_enter_kernel_user
16461 +2:
16462 +#else
16463 + pax_enter_kernel
16464 +#endif
16465 call \func
16466 .endm
16467
16468 @@ -822,7 +1132,7 @@ ret_from_intr:
16469 CFI_ADJUST_CFA_OFFSET -8
16470 exit_intr:
16471 GET_THREAD_INFO(%rcx)
16472 - testl $3,CS-ARGOFFSET(%rsp)
16473 + testb $3,CS-ARGOFFSET(%rsp)
16474 je retint_kernel
16475
16476 /* Interrupt came from user space */
16477 @@ -844,12 +1154,15 @@ retint_swapgs: /* return to user-space */
16478 * The iretq could re-enable interrupts:
16479 */
16480 DISABLE_INTERRUPTS(CLBR_ANY)
16481 + pax_exit_kernel_user
16482 TRACE_IRQS_IRETQ
16483 SWAPGS
16484 jmp restore_args
16485
16486 retint_restore_args: /* return to kernel space */
16487 DISABLE_INTERRUPTS(CLBR_ANY)
16488 + pax_exit_kernel
16489 + pax_force_retaddr RIP-ARGOFFSET
16490 /*
16491 * The iretq could re-enable interrupts:
16492 */
16493 @@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
16494 #endif
16495
16496 CFI_ENDPROC
16497 -END(common_interrupt)
16498 +ENDPROC(common_interrupt)
16499
16500 /*
16501 * APIC interrupts.
16502 @@ -953,7 +1266,7 @@ ENTRY(\sym)
16503 interrupt \do_sym
16504 jmp ret_from_intr
16505 CFI_ENDPROC
16506 -END(\sym)
16507 +ENDPROC(\sym)
16508 .endm
16509
16510 #ifdef CONFIG_SMP
16511 @@ -1032,12 +1345,22 @@ ENTRY(\sym)
16512 CFI_ADJUST_CFA_OFFSET 15*8
16513 call error_entry
16514 DEFAULT_FRAME 0
16515 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16516 + testb $3, CS(%rsp)
16517 + jnz 1f
16518 + pax_enter_kernel
16519 + jmp 2f
16520 +1: pax_enter_kernel_user
16521 +2:
16522 +#else
16523 + pax_enter_kernel
16524 +#endif
16525 movq %rsp,%rdi /* pt_regs pointer */
16526 xorl %esi,%esi /* no error code */
16527 call \do_sym
16528 jmp error_exit /* %ebx: no swapgs flag */
16529 CFI_ENDPROC
16530 -END(\sym)
16531 +ENDPROC(\sym)
16532 .endm
16533
16534 .macro paranoidzeroentry sym do_sym
16535 @@ -1049,12 +1372,22 @@ ENTRY(\sym)
16536 subq $15*8, %rsp
16537 call save_paranoid
16538 TRACE_IRQS_OFF
16539 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16540 + testb $3, CS(%rsp)
16541 + jnz 1f
16542 + pax_enter_kernel
16543 + jmp 2f
16544 +1: pax_enter_kernel_user
16545 +2:
16546 +#else
16547 + pax_enter_kernel
16548 +#endif
16549 movq %rsp,%rdi /* pt_regs pointer */
16550 xorl %esi,%esi /* no error code */
16551 call \do_sym
16552 jmp paranoid_exit /* %ebx: no swapgs flag */
16553 CFI_ENDPROC
16554 -END(\sym)
16555 +ENDPROC(\sym)
16556 .endm
16557
16558 .macro paranoidzeroentry_ist sym do_sym ist
16559 @@ -1066,15 +1399,30 @@ ENTRY(\sym)
16560 subq $15*8, %rsp
16561 call save_paranoid
16562 TRACE_IRQS_OFF
16563 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16564 + testb $3, CS(%rsp)
16565 + jnz 1f
16566 + pax_enter_kernel
16567 + jmp 2f
16568 +1: pax_enter_kernel_user
16569 +2:
16570 +#else
16571 + pax_enter_kernel
16572 +#endif
16573 movq %rsp,%rdi /* pt_regs pointer */
16574 xorl %esi,%esi /* no error code */
16575 - PER_CPU(init_tss, %rbp)
16576 +#ifdef CONFIG_SMP
16577 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
16578 + lea init_tss(%rbp), %rbp
16579 +#else
16580 + lea init_tss(%rip), %rbp
16581 +#endif
16582 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16583 call \do_sym
16584 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16585 jmp paranoid_exit /* %ebx: no swapgs flag */
16586 CFI_ENDPROC
16587 -END(\sym)
16588 +ENDPROC(\sym)
16589 .endm
16590
16591 .macro errorentry sym do_sym
16592 @@ -1085,13 +1433,23 @@ ENTRY(\sym)
16593 CFI_ADJUST_CFA_OFFSET 15*8
16594 call error_entry
16595 DEFAULT_FRAME 0
16596 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16597 + testb $3, CS(%rsp)
16598 + jnz 1f
16599 + pax_enter_kernel
16600 + jmp 2f
16601 +1: pax_enter_kernel_user
16602 +2:
16603 +#else
16604 + pax_enter_kernel
16605 +#endif
16606 movq %rsp,%rdi /* pt_regs pointer */
16607 movq ORIG_RAX(%rsp),%rsi /* get error code */
16608 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16609 call \do_sym
16610 jmp error_exit /* %ebx: no swapgs flag */
16611 CFI_ENDPROC
16612 -END(\sym)
16613 +ENDPROC(\sym)
16614 .endm
16615
16616 /* error code is on the stack already */
16617 @@ -1104,13 +1462,23 @@ ENTRY(\sym)
16618 call save_paranoid
16619 DEFAULT_FRAME 0
16620 TRACE_IRQS_OFF
16621 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16622 + testb $3, CS(%rsp)
16623 + jnz 1f
16624 + pax_enter_kernel
16625 + jmp 2f
16626 +1: pax_enter_kernel_user
16627 +2:
16628 +#else
16629 + pax_enter_kernel
16630 +#endif
16631 movq %rsp,%rdi /* pt_regs pointer */
16632 movq ORIG_RAX(%rsp),%rsi /* get error code */
16633 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16634 call \do_sym
16635 jmp paranoid_exit /* %ebx: no swapgs flag */
16636 CFI_ENDPROC
16637 -END(\sym)
16638 +ENDPROC(\sym)
16639 .endm
16640
16641 zeroentry divide_error do_divide_error
16642 @@ -1141,9 +1509,10 @@ gs_change:
16643 SWAPGS
16644 popf
16645 CFI_ADJUST_CFA_OFFSET -8
16646 + pax_force_retaddr
16647 ret
16648 CFI_ENDPROC
16649 -END(native_load_gs_index)
16650 +ENDPROC(native_load_gs_index)
16651
16652 .section __ex_table,"a"
16653 .align 8
16654 @@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
16655 * of hacks for example to fork off the per-CPU idle tasks.
16656 * [Hopefully no generic code relies on the reschedule -AK]
16657 */
16658 - RESTORE_ALL
16659 + RESTORE_REST
16660 UNFAKE_STACK_FRAME
16661 + pax_force_retaddr
16662 ret
16663 CFI_ENDPROC
16664 -END(kernel_thread)
16665 +ENDPROC(kernel_thread)
16666
16667 ENTRY(child_rip)
16668 pushq $0 # fake return address
16669 @@ -1208,13 +1578,14 @@ ENTRY(child_rip)
16670 */
16671 movq %rdi, %rax
16672 movq %rsi, %rdi
16673 + pax_force_fptr %rax
16674 call *%rax
16675 # exit
16676 mov %eax, %edi
16677 call do_exit
16678 ud2 # padding for call trace
16679 CFI_ENDPROC
16680 -END(child_rip)
16681 +ENDPROC(child_rip)
16682
16683 /*
16684 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16685 @@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
16686 RESTORE_REST
16687 testq %rax,%rax
16688 je int_ret_from_sys_call
16689 - RESTORE_ARGS
16690 UNFAKE_STACK_FRAME
16691 + pax_force_retaddr
16692 ret
16693 CFI_ENDPROC
16694 -END(kernel_execve)
16695 +ENDPROC(kernel_execve)
16696
16697 /* Call softirq on interrupt stack. Interrupts are off. */
16698 ENTRY(call_softirq)
16699 @@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
16700 CFI_DEF_CFA_REGISTER rsp
16701 CFI_ADJUST_CFA_OFFSET -8
16702 decl PER_CPU_VAR(irq_count)
16703 + pax_force_retaddr
16704 ret
16705 CFI_ENDPROC
16706 -END(call_softirq)
16707 +ENDPROC(call_softirq)
16708
16709 #ifdef CONFIG_XEN
16710 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16711 @@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16712 decl PER_CPU_VAR(irq_count)
16713 jmp error_exit
16714 CFI_ENDPROC
16715 -END(xen_do_hypervisor_callback)
16716 +ENDPROC(xen_do_hypervisor_callback)
16717
16718 /*
16719 * Hypervisor uses this for application faults while it executes.
16720 @@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
16721 SAVE_ALL
16722 jmp error_exit
16723 CFI_ENDPROC
16724 -END(xen_failsafe_callback)
16725 +ENDPROC(xen_failsafe_callback)
16726
16727 #endif /* CONFIG_XEN */
16728
16729 @@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
16730 TRACE_IRQS_OFF
16731 testl %ebx,%ebx /* swapgs needed? */
16732 jnz paranoid_restore
16733 - testl $3,CS(%rsp)
16734 + testb $3,CS(%rsp)
16735 jnz paranoid_userspace
16736 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16737 + pax_exit_kernel
16738 + TRACE_IRQS_IRETQ 0
16739 + SWAPGS_UNSAFE_STACK
16740 + RESTORE_ALL 8
16741 + pax_force_retaddr_bts
16742 + jmp irq_return
16743 +#endif
16744 paranoid_swapgs:
16745 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16746 + pax_exit_kernel_user
16747 +#else
16748 + pax_exit_kernel
16749 +#endif
16750 TRACE_IRQS_IRETQ 0
16751 SWAPGS_UNSAFE_STACK
16752 RESTORE_ALL 8
16753 jmp irq_return
16754 paranoid_restore:
16755 + pax_exit_kernel
16756 TRACE_IRQS_IRETQ 0
16757 RESTORE_ALL 8
16758 + pax_force_retaddr_bts
16759 jmp irq_return
16760 paranoid_userspace:
16761 GET_THREAD_INFO(%rcx)
16762 @@ -1443,7 +1830,7 @@ paranoid_schedule:
16763 TRACE_IRQS_OFF
16764 jmp paranoid_userspace
16765 CFI_ENDPROC
16766 -END(paranoid_exit)
16767 +ENDPROC(paranoid_exit)
16768
16769 /*
16770 * Exception entry point. This expects an error code/orig_rax on the stack.
16771 @@ -1470,12 +1857,13 @@ ENTRY(error_entry)
16772 movq_cfi r14, R14+8
16773 movq_cfi r15, R15+8
16774 xorl %ebx,%ebx
16775 - testl $3,CS+8(%rsp)
16776 + testb $3,CS+8(%rsp)
16777 je error_kernelspace
16778 error_swapgs:
16779 SWAPGS
16780 error_sti:
16781 TRACE_IRQS_OFF
16782 + pax_force_retaddr_bts
16783 ret
16784 CFI_ENDPROC
16785
16786 @@ -1497,7 +1885,7 @@ error_kernelspace:
16787 cmpq $gs_change,RIP+8(%rsp)
16788 je error_swapgs
16789 jmp error_sti
16790 -END(error_entry)
16791 +ENDPROC(error_entry)
16792
16793
16794 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16795 @@ -1517,7 +1905,7 @@ ENTRY(error_exit)
16796 jnz retint_careful
16797 jmp retint_swapgs
16798 CFI_ENDPROC
16799 -END(error_exit)
16800 +ENDPROC(error_exit)
16801
16802
16803 /* runs on exception stack */
16804 @@ -1529,6 +1917,16 @@ ENTRY(nmi)
16805 CFI_ADJUST_CFA_OFFSET 15*8
16806 call save_paranoid
16807 DEFAULT_FRAME 0
16808 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16809 + testb $3, CS(%rsp)
16810 + jnz 1f
16811 + pax_enter_kernel
16812 + jmp 2f
16813 +1: pax_enter_kernel_user
16814 +2:
16815 +#else
16816 + pax_enter_kernel
16817 +#endif
16818 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16819 movq %rsp,%rdi
16820 movq $-1,%rsi
16821 @@ -1539,12 +1937,28 @@ ENTRY(nmi)
16822 DISABLE_INTERRUPTS(CLBR_NONE)
16823 testl %ebx,%ebx /* swapgs needed? */
16824 jnz nmi_restore
16825 - testl $3,CS(%rsp)
16826 + testb $3,CS(%rsp)
16827 jnz nmi_userspace
16828 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16829 + pax_exit_kernel
16830 + SWAPGS_UNSAFE_STACK
16831 + RESTORE_ALL 8
16832 + pax_force_retaddr_bts
16833 + jmp irq_return
16834 +#endif
16835 nmi_swapgs:
16836 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16837 + pax_exit_kernel_user
16838 +#else
16839 + pax_exit_kernel
16840 +#endif
16841 SWAPGS_UNSAFE_STACK
16842 + RESTORE_ALL 8
16843 + jmp irq_return
16844 nmi_restore:
16845 + pax_exit_kernel
16846 RESTORE_ALL 8
16847 + pax_force_retaddr_bts
16848 jmp irq_return
16849 nmi_userspace:
16850 GET_THREAD_INFO(%rcx)
16851 @@ -1573,14 +1987,14 @@ nmi_schedule:
16852 jmp paranoid_exit
16853 CFI_ENDPROC
16854 #endif
16855 -END(nmi)
16856 +ENDPROC(nmi)
16857
16858 ENTRY(ignore_sysret)
16859 CFI_STARTPROC
16860 mov $-ENOSYS,%eax
16861 sysret
16862 CFI_ENDPROC
16863 -END(ignore_sysret)
16864 +ENDPROC(ignore_sysret)
16865
16866 /*
16867 * End of kprobes section
16868 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16869 index 9dbb527..7b3615a 100644
16870 --- a/arch/x86/kernel/ftrace.c
16871 +++ b/arch/x86/kernel/ftrace.c
16872 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16873 static void *mod_code_newcode; /* holds the text to write to the IP */
16874
16875 static unsigned nmi_wait_count;
16876 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
16877 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16878
16879 int ftrace_arch_read_dyn_info(char *buf, int size)
16880 {
16881 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16882
16883 r = snprintf(buf, size, "%u %u",
16884 nmi_wait_count,
16885 - atomic_read(&nmi_update_count));
16886 + atomic_read_unchecked(&nmi_update_count));
16887 return r;
16888 }
16889
16890 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
16891 {
16892 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16893 smp_rmb();
16894 + pax_open_kernel();
16895 ftrace_mod_code();
16896 - atomic_inc(&nmi_update_count);
16897 + pax_close_kernel();
16898 + atomic_inc_unchecked(&nmi_update_count);
16899 }
16900 /* Must have previous changes seen before executions */
16901 smp_mb();
16902 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
16903
16904
16905
16906 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
16907 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
16908
16909 static unsigned char *ftrace_nop_replace(void)
16910 {
16911 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
16912 {
16913 unsigned char replaced[MCOUNT_INSN_SIZE];
16914
16915 + ip = ktla_ktva(ip);
16916 +
16917 /*
16918 * Note: Due to modules and __init, code can
16919 * disappear and change, we need to protect against faulting
16920 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16921 unsigned char old[MCOUNT_INSN_SIZE], *new;
16922 int ret;
16923
16924 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16925 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16926 new = ftrace_call_replace(ip, (unsigned long)func);
16927 ret = ftrace_modify_code(ip, old, new);
16928
16929 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
16930 switch (faulted) {
16931 case 0:
16932 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
16933 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
16934 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
16935 break;
16936 case 1:
16937 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
16938 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
16939 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
16940 break;
16941 case 2:
16942 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
16943 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
16944 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
16945 break;
16946 }
16947
16948 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16949 {
16950 unsigned char code[MCOUNT_INSN_SIZE];
16951
16952 + ip = ktla_ktva(ip);
16953 +
16954 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16955 return -EFAULT;
16956
16957 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16958 index 4f8e250..df24706 100644
16959 --- a/arch/x86/kernel/head32.c
16960 +++ b/arch/x86/kernel/head32.c
16961 @@ -16,6 +16,7 @@
16962 #include <asm/apic.h>
16963 #include <asm/io_apic.h>
16964 #include <asm/bios_ebda.h>
16965 +#include <asm/boot.h>
16966
16967 static void __init i386_default_early_setup(void)
16968 {
16969 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
16970 {
16971 reserve_trampoline_memory();
16972
16973 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16974 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16975
16976 #ifdef CONFIG_BLK_DEV_INITRD
16977 /* Reserve INITRD */
16978 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16979 index 34c3308..6fc4e76 100644
16980 --- a/arch/x86/kernel/head_32.S
16981 +++ b/arch/x86/kernel/head_32.S
16982 @@ -19,10 +19,17 @@
16983 #include <asm/setup.h>
16984 #include <asm/processor-flags.h>
16985 #include <asm/percpu.h>
16986 +#include <asm/msr-index.h>
16987
16988 /* Physical address */
16989 #define pa(X) ((X) - __PAGE_OFFSET)
16990
16991 +#ifdef CONFIG_PAX_KERNEXEC
16992 +#define ta(X) (X)
16993 +#else
16994 +#define ta(X) ((X) - __PAGE_OFFSET)
16995 +#endif
16996 +
16997 /*
16998 * References to members of the new_cpu_data structure.
16999 */
17000 @@ -52,11 +59,7 @@
17001 * and small than max_low_pfn, otherwise will waste some page table entries
17002 */
17003
17004 -#if PTRS_PER_PMD > 1
17005 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
17006 -#else
17007 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
17008 -#endif
17009 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
17010
17011 /* Enough space to fit pagetables for the low memory linear map */
17012 MAPPING_BEYOND_END = \
17013 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
17014 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17015
17016 /*
17017 + * Real beginning of normal "text" segment
17018 + */
17019 +ENTRY(stext)
17020 +ENTRY(_stext)
17021 +
17022 +/*
17023 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
17024 * %esi points to the real-mode code as a 32-bit pointer.
17025 * CS and DS must be 4 GB flat segments, but we don't depend on
17026 @@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17027 * can.
17028 */
17029 __HEAD
17030 +
17031 +#ifdef CONFIG_PAX_KERNEXEC
17032 + jmp startup_32
17033 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
17034 +.fill PAGE_SIZE-5,1,0xcc
17035 +#endif
17036 +
17037 ENTRY(startup_32)
17038 + movl pa(stack_start),%ecx
17039 +
17040 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
17041 us to not reload segments */
17042 testb $(1<<6), BP_loadflags(%esi)
17043 @@ -95,7 +113,60 @@ ENTRY(startup_32)
17044 movl %eax,%es
17045 movl %eax,%fs
17046 movl %eax,%gs
17047 + movl %eax,%ss
17048 2:
17049 + leal -__PAGE_OFFSET(%ecx),%esp
17050 +
17051 +#ifdef CONFIG_SMP
17052 + movl $pa(cpu_gdt_table),%edi
17053 + movl $__per_cpu_load,%eax
17054 + movw %ax,__KERNEL_PERCPU + 2(%edi)
17055 + rorl $16,%eax
17056 + movb %al,__KERNEL_PERCPU + 4(%edi)
17057 + movb %ah,__KERNEL_PERCPU + 7(%edi)
17058 + movl $__per_cpu_end - 1,%eax
17059 + subl $__per_cpu_start,%eax
17060 + movw %ax,__KERNEL_PERCPU + 0(%edi)
17061 +#endif
17062 +
17063 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17064 + movl $NR_CPUS,%ecx
17065 + movl $pa(cpu_gdt_table),%edi
17066 +1:
17067 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
17068 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
17069 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
17070 + addl $PAGE_SIZE_asm,%edi
17071 + loop 1b
17072 +#endif
17073 +
17074 +#ifdef CONFIG_PAX_KERNEXEC
17075 + movl $pa(boot_gdt),%edi
17076 + movl $__LOAD_PHYSICAL_ADDR,%eax
17077 + movw %ax,__BOOT_CS + 2(%edi)
17078 + rorl $16,%eax
17079 + movb %al,__BOOT_CS + 4(%edi)
17080 + movb %ah,__BOOT_CS + 7(%edi)
17081 + rorl $16,%eax
17082 +
17083 + ljmp $(__BOOT_CS),$1f
17084 +1:
17085 +
17086 + movl $NR_CPUS,%ecx
17087 + movl $pa(cpu_gdt_table),%edi
17088 + addl $__PAGE_OFFSET,%eax
17089 +1:
17090 + movw %ax,__KERNEL_CS + 2(%edi)
17091 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
17092 + rorl $16,%eax
17093 + movb %al,__KERNEL_CS + 4(%edi)
17094 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
17095 + movb %ah,__KERNEL_CS + 7(%edi)
17096 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
17097 + rorl $16,%eax
17098 + addl $PAGE_SIZE_asm,%edi
17099 + loop 1b
17100 +#endif
17101
17102 /*
17103 * Clear BSS first so that there are no surprises...
17104 @@ -140,9 +211,7 @@ ENTRY(startup_32)
17105 cmpl $num_subarch_entries, %eax
17106 jae bad_subarch
17107
17108 - movl pa(subarch_entries)(,%eax,4), %eax
17109 - subl $__PAGE_OFFSET, %eax
17110 - jmp *%eax
17111 + jmp *pa(subarch_entries)(,%eax,4)
17112
17113 bad_subarch:
17114 WEAK(lguest_entry)
17115 @@ -154,10 +223,10 @@ WEAK(xen_entry)
17116 __INITDATA
17117
17118 subarch_entries:
17119 - .long default_entry /* normal x86/PC */
17120 - .long lguest_entry /* lguest hypervisor */
17121 - .long xen_entry /* Xen hypervisor */
17122 - .long default_entry /* Moorestown MID */
17123 + .long ta(default_entry) /* normal x86/PC */
17124 + .long ta(lguest_entry) /* lguest hypervisor */
17125 + .long ta(xen_entry) /* Xen hypervisor */
17126 + .long ta(default_entry) /* Moorestown MID */
17127 num_subarch_entries = (. - subarch_entries) / 4
17128 .previous
17129 #endif /* CONFIG_PARAVIRT */
17130 @@ -218,8 +287,11 @@ default_entry:
17131 movl %eax, pa(max_pfn_mapped)
17132
17133 /* Do early initialization of the fixmap area */
17134 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
17135 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17136 +#ifdef CONFIG_COMPAT_VDSO
17137 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17138 +#else
17139 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17140 +#endif
17141 #else /* Not PAE */
17142
17143 page_pde_offset = (__PAGE_OFFSET >> 20);
17144 @@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
17145 movl %eax, pa(max_pfn_mapped)
17146
17147 /* Do early initialization of the fixmap area */
17148 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
17149 - movl %eax,pa(swapper_pg_dir+0xffc)
17150 +#ifdef CONFIG_COMPAT_VDSO
17151 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
17152 +#else
17153 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
17154 +#endif
17155 #endif
17156 jmp 3f
17157 /*
17158 @@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
17159 movl %eax,%es
17160 movl %eax,%fs
17161 movl %eax,%gs
17162 + movl pa(stack_start),%ecx
17163 + movl %eax,%ss
17164 + leal -__PAGE_OFFSET(%ecx),%esp
17165 #endif /* CONFIG_SMP */
17166 3:
17167
17168 @@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
17169 orl %edx,%eax
17170 movl %eax,%cr4
17171
17172 +#ifdef CONFIG_X86_PAE
17173 btl $5, %eax # check if PAE is enabled
17174 jnc 6f
17175
17176 @@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
17177 cpuid
17178 cmpl $0x80000000, %eax
17179 jbe 6f
17180 +
17181 + /* Clear bogus XD_DISABLE bits */
17182 + call verify_cpu
17183 +
17184 mov $0x80000001, %eax
17185 cpuid
17186 /* Execute Disable bit supported? */
17187 @@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
17188 jnc 6f
17189
17190 /* Setup EFER (Extended Feature Enable Register) */
17191 - movl $0xc0000080, %ecx
17192 + movl $MSR_EFER, %ecx
17193 rdmsr
17194
17195 btsl $11, %eax
17196 /* Make changes effective */
17197 wrmsr
17198
17199 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
17200 + movl $1,pa(nx_enabled)
17201 +#endif
17202 +
17203 6:
17204
17205 /*
17206 @@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
17207 movl %eax,%cr0 /* ..and set paging (PG) bit */
17208 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
17209 1:
17210 - /* Set up the stack pointer */
17211 - lss stack_start,%esp
17212 + /* Shift the stack pointer to a virtual address */
17213 + addl $__PAGE_OFFSET, %esp
17214
17215 /*
17216 * Initialize eflags. Some BIOS's leave bits like NT set. This would
17217 @@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
17218
17219 #ifdef CONFIG_SMP
17220 cmpb $0, ready
17221 - jz 1f /* Initial CPU cleans BSS */
17222 - jmp checkCPUtype
17223 -1:
17224 + jnz checkCPUtype
17225 #endif /* CONFIG_SMP */
17226
17227 /*
17228 @@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
17229 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
17230 movl %eax,%ss # after changing gdt.
17231
17232 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
17233 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
17234 movl %eax,%ds
17235 movl %eax,%es
17236
17237 @@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
17238 */
17239 cmpb $0,ready
17240 jne 1f
17241 - movl $per_cpu__gdt_page,%eax
17242 + movl $cpu_gdt_table,%eax
17243 movl $per_cpu__stack_canary,%ecx
17244 +#ifdef CONFIG_SMP
17245 + addl $__per_cpu_load,%ecx
17246 +#endif
17247 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17248 shrl $16, %ecx
17249 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17250 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
17251 1:
17252 -#endif
17253 movl $(__KERNEL_STACK_CANARY),%eax
17254 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17255 + movl $(__USER_DS),%eax
17256 +#else
17257 + xorl %eax,%eax
17258 +#endif
17259 movl %eax,%gs
17260
17261 xorl %eax,%eax # Clear LDT
17262 @@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
17263
17264 cld # gcc2 wants the direction flag cleared at all times
17265 pushl $0 # fake return address for unwinder
17266 -#ifdef CONFIG_SMP
17267 - movb ready, %cl
17268 movb $1, ready
17269 - cmpb $0,%cl # the first CPU calls start_kernel
17270 - je 1f
17271 - movl (stack_start), %esp
17272 -1:
17273 -#endif /* CONFIG_SMP */
17274 jmp *(initial_code)
17275
17276 /*
17277 @@ -546,22 +631,22 @@ early_page_fault:
17278 jmp early_fault
17279
17280 early_fault:
17281 - cld
17282 #ifdef CONFIG_PRINTK
17283 + cmpl $1,%ss:early_recursion_flag
17284 + je hlt_loop
17285 + incl %ss:early_recursion_flag
17286 + cld
17287 pusha
17288 movl $(__KERNEL_DS),%eax
17289 movl %eax,%ds
17290 movl %eax,%es
17291 - cmpl $2,early_recursion_flag
17292 - je hlt_loop
17293 - incl early_recursion_flag
17294 movl %cr2,%eax
17295 pushl %eax
17296 pushl %edx /* trapno */
17297 pushl $fault_msg
17298 call printk
17299 +; call dump_stack
17300 #endif
17301 - call dump_stack
17302 hlt_loop:
17303 hlt
17304 jmp hlt_loop
17305 @@ -569,8 +654,11 @@ hlt_loop:
17306 /* This is the default interrupt "handler" :-) */
17307 ALIGN
17308 ignore_int:
17309 - cld
17310 #ifdef CONFIG_PRINTK
17311 + cmpl $2,%ss:early_recursion_flag
17312 + je hlt_loop
17313 + incl %ss:early_recursion_flag
17314 + cld
17315 pushl %eax
17316 pushl %ecx
17317 pushl %edx
17318 @@ -579,9 +667,6 @@ ignore_int:
17319 movl $(__KERNEL_DS),%eax
17320 movl %eax,%ds
17321 movl %eax,%es
17322 - cmpl $2,early_recursion_flag
17323 - je hlt_loop
17324 - incl early_recursion_flag
17325 pushl 16(%esp)
17326 pushl 24(%esp)
17327 pushl 32(%esp)
17328 @@ -600,6 +685,8 @@ ignore_int:
17329 #endif
17330 iret
17331
17332 +#include "verify_cpu.S"
17333 +
17334 __REFDATA
17335 .align 4
17336 ENTRY(initial_code)
17337 @@ -610,31 +697,47 @@ ENTRY(initial_page_table)
17338 /*
17339 * BSS section
17340 */
17341 -__PAGE_ALIGNED_BSS
17342 - .align PAGE_SIZE_asm
17343 #ifdef CONFIG_X86_PAE
17344 +.section .swapper_pg_pmd,"a",@progbits
17345 swapper_pg_pmd:
17346 .fill 1024*KPMDS,4,0
17347 #else
17348 +.section .swapper_pg_dir,"a",@progbits
17349 ENTRY(swapper_pg_dir)
17350 .fill 1024,4,0
17351 #endif
17352 +.section .swapper_pg_fixmap,"a",@progbits
17353 swapper_pg_fixmap:
17354 .fill 1024,4,0
17355 #ifdef CONFIG_X86_TRAMPOLINE
17356 +.section .trampoline_pg_dir,"a",@progbits
17357 ENTRY(trampoline_pg_dir)
17358 +#ifdef CONFIG_X86_PAE
17359 + .fill 4,8,0
17360 +#else
17361 .fill 1024,4,0
17362 #endif
17363 +#endif
17364 +
17365 +.section .empty_zero_page,"a",@progbits
17366 ENTRY(empty_zero_page)
17367 .fill 4096,1,0
17368
17369 /*
17370 + * The IDT has to be page-aligned to simplify the Pentium
17371 + * F0 0F bug workaround.. We have a special link segment
17372 + * for this.
17373 + */
17374 +.section .idt,"a",@progbits
17375 +ENTRY(idt_table)
17376 + .fill 256,8,0
17377 +
17378 +/*
17379 * This starts the data section.
17380 */
17381 #ifdef CONFIG_X86_PAE
17382 -__PAGE_ALIGNED_DATA
17383 - /* Page-aligned for the benefit of paravirt? */
17384 - .align PAGE_SIZE_asm
17385 +.section .swapper_pg_dir,"a",@progbits
17386 +
17387 ENTRY(swapper_pg_dir)
17388 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17389 # if KPMDS == 3
17390 @@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
17391 # error "Kernel PMDs should be 1, 2 or 3"
17392 # endif
17393 .align PAGE_SIZE_asm /* needs to be page-sized too */
17394 +
17395 +#ifdef CONFIG_PAX_PER_CPU_PGD
17396 +ENTRY(cpu_pgd)
17397 + .rept NR_CPUS
17398 + .fill 4,8,0
17399 + .endr
17400 +#endif
17401 +
17402 #endif
17403
17404 .data
17405 +.balign 4
17406 ENTRY(stack_start)
17407 - .long init_thread_union+THREAD_SIZE
17408 - .long __BOOT_DS
17409 + .long init_thread_union+THREAD_SIZE-8
17410
17411 ready: .byte 0
17412
17413 +.section .rodata,"a",@progbits
17414 early_recursion_flag:
17415 .long 0
17416
17417 @@ -697,7 +809,7 @@ fault_msg:
17418 .word 0 # 32 bit align gdt_desc.address
17419 boot_gdt_descr:
17420 .word __BOOT_DS+7
17421 - .long boot_gdt - __PAGE_OFFSET
17422 + .long pa(boot_gdt)
17423
17424 .word 0 # 32-bit align idt_desc.address
17425 idt_descr:
17426 @@ -708,7 +820,7 @@ idt_descr:
17427 .word 0 # 32 bit align gdt_desc.address
17428 ENTRY(early_gdt_descr)
17429 .word GDT_ENTRIES*8-1
17430 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
17431 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
17432
17433 /*
17434 * The boot_gdt must mirror the equivalent in setup.S and is
17435 @@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
17436 .align L1_CACHE_BYTES
17437 ENTRY(boot_gdt)
17438 .fill GDT_ENTRY_BOOT_CS,8,0
17439 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17440 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17441 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17442 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17443 +
17444 + .align PAGE_SIZE_asm
17445 +ENTRY(cpu_gdt_table)
17446 + .rept NR_CPUS
17447 + .quad 0x0000000000000000 /* NULL descriptor */
17448 + .quad 0x0000000000000000 /* 0x0b reserved */
17449 + .quad 0x0000000000000000 /* 0x13 reserved */
17450 + .quad 0x0000000000000000 /* 0x1b reserved */
17451 +
17452 +#ifdef CONFIG_PAX_KERNEXEC
17453 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17454 +#else
17455 + .quad 0x0000000000000000 /* 0x20 unused */
17456 +#endif
17457 +
17458 + .quad 0x0000000000000000 /* 0x28 unused */
17459 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17460 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17461 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17462 + .quad 0x0000000000000000 /* 0x4b reserved */
17463 + .quad 0x0000000000000000 /* 0x53 reserved */
17464 + .quad 0x0000000000000000 /* 0x5b reserved */
17465 +
17466 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17467 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17468 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17469 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17470 +
17471 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17472 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17473 +
17474 + /*
17475 + * Segments used for calling PnP BIOS have byte granularity.
17476 + * The code segments and data segments have fixed 64k limits,
17477 + * the transfer segment sizes are set at run time.
17478 + */
17479 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
17480 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
17481 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
17482 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
17483 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
17484 +
17485 + /*
17486 + * The APM segments have byte granularity and their bases
17487 + * are set at run time. All have 64k limits.
17488 + */
17489 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17490 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17491 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
17492 +
17493 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17494 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17495 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17496 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17497 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17498 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17499 +
17500 + /* Be sure this is zeroed to avoid false validations in Xen */
17501 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17502 + .endr
17503 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17504 index 780cd92..758b2a6 100644
17505 --- a/arch/x86/kernel/head_64.S
17506 +++ b/arch/x86/kernel/head_64.S
17507 @@ -19,6 +19,8 @@
17508 #include <asm/cache.h>
17509 #include <asm/processor-flags.h>
17510 #include <asm/percpu.h>
17511 +#include <asm/cpufeature.h>
17512 +#include <asm/alternative-asm.h>
17513
17514 #ifdef CONFIG_PARAVIRT
17515 #include <asm/asm-offsets.h>
17516 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17517 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17518 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17519 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17520 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
17521 +L3_VMALLOC_START = pud_index(VMALLOC_START)
17522 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
17523 +L3_VMALLOC_END = pud_index(VMALLOC_END)
17524 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17525 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17526
17527 .text
17528 __HEAD
17529 @@ -85,35 +93,23 @@ startup_64:
17530 */
17531 addq %rbp, init_level4_pgt + 0(%rip)
17532 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17533 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17534 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17535 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17536 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17537
17538 addq %rbp, level3_ident_pgt + 0(%rip)
17539 +#ifndef CONFIG_XEN
17540 + addq %rbp, level3_ident_pgt + 8(%rip)
17541 +#endif
17542
17543 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17544 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17545 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17546 +
17547 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17548 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17549
17550 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17551 -
17552 - /* Add an Identity mapping if I am above 1G */
17553 - leaq _text(%rip), %rdi
17554 - andq $PMD_PAGE_MASK, %rdi
17555 -
17556 - movq %rdi, %rax
17557 - shrq $PUD_SHIFT, %rax
17558 - andq $(PTRS_PER_PUD - 1), %rax
17559 - jz ident_complete
17560 -
17561 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17562 - leaq level3_ident_pgt(%rip), %rbx
17563 - movq %rdx, 0(%rbx, %rax, 8)
17564 -
17565 - movq %rdi, %rax
17566 - shrq $PMD_SHIFT, %rax
17567 - andq $(PTRS_PER_PMD - 1), %rax
17568 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17569 - leaq level2_spare_pgt(%rip), %rbx
17570 - movq %rdx, 0(%rbx, %rax, 8)
17571 -ident_complete:
17572 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17573
17574 /*
17575 * Fixup the kernel text+data virtual addresses. Note that
17576 @@ -161,8 +157,8 @@ ENTRY(secondary_startup_64)
17577 * after the boot processor executes this code.
17578 */
17579
17580 - /* Enable PAE mode and PGE */
17581 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17582 + /* Enable PAE mode and PSE/PGE */
17583 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17584 movq %rax, %cr4
17585
17586 /* Setup early boot stage 4 level pagetables. */
17587 @@ -184,9 +180,16 @@ ENTRY(secondary_startup_64)
17588 movl $MSR_EFER, %ecx
17589 rdmsr
17590 btsl $_EFER_SCE, %eax /* Enable System Call */
17591 - btl $20,%edi /* No Execute supported? */
17592 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17593 jnc 1f
17594 btsl $_EFER_NX, %eax
17595 + leaq init_level4_pgt(%rip), %rdi
17596 +#ifndef CONFIG_EFI
17597 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17598 +#endif
17599 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17600 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17601 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17602 1: wrmsr /* Make changes effective */
17603
17604 /* Setup cr0 */
17605 @@ -249,6 +252,7 @@ ENTRY(secondary_startup_64)
17606 * jump. In addition we need to ensure %cs is set so we make this
17607 * a far return.
17608 */
17609 + pax_set_fptr_mask
17610 movq initial_code(%rip),%rax
17611 pushq $0 # fake return address to stop unwinder
17612 pushq $__KERNEL_CS # set correct cs
17613 @@ -262,16 +266,16 @@ ENTRY(secondary_startup_64)
17614 .quad x86_64_start_kernel
17615 ENTRY(initial_gs)
17616 .quad INIT_PER_CPU_VAR(irq_stack_union)
17617 - __FINITDATA
17618
17619 ENTRY(stack_start)
17620 .quad init_thread_union+THREAD_SIZE-8
17621 .word 0
17622 + __FINITDATA
17623
17624 bad_address:
17625 jmp bad_address
17626
17627 - .section ".init.text","ax"
17628 + __INIT
17629 #ifdef CONFIG_EARLY_PRINTK
17630 .globl early_idt_handlers
17631 early_idt_handlers:
17632 @@ -316,18 +320,23 @@ ENTRY(early_idt_handler)
17633 #endif /* EARLY_PRINTK */
17634 1: hlt
17635 jmp 1b
17636 + .previous
17637
17638 #ifdef CONFIG_EARLY_PRINTK
17639 + __INITDATA
17640 early_recursion_flag:
17641 .long 0
17642 + .previous
17643
17644 + .section .rodata,"a",@progbits
17645 early_idt_msg:
17646 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17647 early_idt_ripmsg:
17648 .asciz "RIP %s\n"
17649 + .previous
17650 #endif /* CONFIG_EARLY_PRINTK */
17651 - .previous
17652
17653 + .section .rodata,"a",@progbits
17654 #define NEXT_PAGE(name) \
17655 .balign PAGE_SIZE; \
17656 ENTRY(name)
17657 @@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
17658 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17659 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17660 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17661 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
17662 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17663 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
17664 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17665 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17666 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17667 .org init_level4_pgt + L4_START_KERNEL*8, 0
17668 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17669 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17670
17671 +#ifdef CONFIG_PAX_PER_CPU_PGD
17672 +NEXT_PAGE(cpu_pgd)
17673 + .rept NR_CPUS
17674 + .fill 512,8,0
17675 + .endr
17676 +#endif
17677 +
17678 NEXT_PAGE(level3_ident_pgt)
17679 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17680 +#ifdef CONFIG_XEN
17681 .fill 511,8,0
17682 +#else
17683 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17684 + .fill 510,8,0
17685 +#endif
17686 +
17687 +NEXT_PAGE(level3_vmalloc_start_pgt)
17688 + .fill 512,8,0
17689 +
17690 +NEXT_PAGE(level3_vmalloc_end_pgt)
17691 + .fill 512,8,0
17692 +
17693 +NEXT_PAGE(level3_vmemmap_pgt)
17694 + .fill L3_VMEMMAP_START,8,0
17695 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17696
17697 NEXT_PAGE(level3_kernel_pgt)
17698 .fill L3_START_KERNEL,8,0
17699 @@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt)
17700 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17701 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17702
17703 +NEXT_PAGE(level2_vmemmap_pgt)
17704 + .fill 512,8,0
17705 +
17706 NEXT_PAGE(level2_fixmap_pgt)
17707 - .fill 506,8,0
17708 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17709 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17710 - .fill 5,8,0
17711 + .fill 507,8,0
17712 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17713 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17714 + .fill 4,8,0
17715
17716 -NEXT_PAGE(level1_fixmap_pgt)
17717 +NEXT_PAGE(level1_vsyscall_pgt)
17718 .fill 512,8,0
17719
17720 -NEXT_PAGE(level2_ident_pgt)
17721 - /* Since I easily can, map the first 1G.
17722 + /* Since I easily can, map the first 2G.
17723 * Don't set NX because code runs from these pages.
17724 */
17725 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17726 +NEXT_PAGE(level2_ident_pgt)
17727 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17728
17729 NEXT_PAGE(level2_kernel_pgt)
17730 /*
17731 @@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt)
17732 * If you want to increase this then increase MODULES_VADDR
17733 * too.)
17734 */
17735 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17736 - KERNEL_IMAGE_SIZE/PMD_SIZE)
17737 -
17738 -NEXT_PAGE(level2_spare_pgt)
17739 - .fill 512, 8, 0
17740 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17741
17742 #undef PMDS
17743 #undef NEXT_PAGE
17744
17745 - .data
17746 + .align PAGE_SIZE
17747 +ENTRY(cpu_gdt_table)
17748 + .rept NR_CPUS
17749 + .quad 0x0000000000000000 /* NULL descriptor */
17750 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17751 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
17752 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
17753 + .quad 0x00cffb000000ffff /* __USER32_CS */
17754 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17755 + .quad 0x00affb000000ffff /* __USER_CS */
17756 +
17757 +#ifdef CONFIG_PAX_KERNEXEC
17758 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17759 +#else
17760 + .quad 0x0 /* unused */
17761 +#endif
17762 +
17763 + .quad 0,0 /* TSS */
17764 + .quad 0,0 /* LDT */
17765 + .quad 0,0,0 /* three TLS descriptors */
17766 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
17767 + /* asm/segment.h:GDT_ENTRIES must match this */
17768 +
17769 + /* zero the remaining page */
17770 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17771 + .endr
17772 +
17773 .align 16
17774 .globl early_gdt_descr
17775 early_gdt_descr:
17776 .word GDT_ENTRIES*8-1
17777 early_gdt_descr_base:
17778 - .quad INIT_PER_CPU_VAR(gdt_page)
17779 + .quad cpu_gdt_table
17780
17781 ENTRY(phys_base)
17782 /* This must match the first entry in level2_kernel_pgt */
17783 .quad 0x0000000000000000
17784
17785 #include "../../x86/xen/xen-head.S"
17786 -
17787 - .section .bss, "aw", @nobits
17788 +
17789 + .section .rodata,"a",@progbits
17790 .align L1_CACHE_BYTES
17791 ENTRY(idt_table)
17792 - .skip IDT_ENTRIES * 16
17793 + .fill 512,8,0
17794
17795 __PAGE_ALIGNED_BSS
17796 .align PAGE_SIZE
17797 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17798 index 9c3bd4a..e1d9b35 100644
17799 --- a/arch/x86/kernel/i386_ksyms_32.c
17800 +++ b/arch/x86/kernel/i386_ksyms_32.c
17801 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17802 EXPORT_SYMBOL(cmpxchg8b_emu);
17803 #endif
17804
17805 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
17806 +
17807 /* Networking helper routines. */
17808 EXPORT_SYMBOL(csum_partial_copy_generic);
17809 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17810 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17811
17812 EXPORT_SYMBOL(__get_user_1);
17813 EXPORT_SYMBOL(__get_user_2);
17814 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17815
17816 EXPORT_SYMBOL(csum_partial);
17817 EXPORT_SYMBOL(empty_zero_page);
17818 +
17819 +#ifdef CONFIG_PAX_KERNEXEC
17820 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17821 +#endif
17822 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17823 index df89102..a244320 100644
17824 --- a/arch/x86/kernel/i8259.c
17825 +++ b/arch/x86/kernel/i8259.c
17826 @@ -208,7 +208,7 @@ spurious_8259A_irq:
17827 "spurious 8259A interrupt: IRQ%d.\n", irq);
17828 spurious_irq_mask |= irqmask;
17829 }
17830 - atomic_inc(&irq_err_count);
17831 + atomic_inc_unchecked(&irq_err_count);
17832 /*
17833 * Theoretically we do not have to handle this IRQ,
17834 * but in Linux this does not cause problems and is
17835 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17836 index 3a54dcb..1c22348 100644
17837 --- a/arch/x86/kernel/init_task.c
17838 +++ b/arch/x86/kernel/init_task.c
17839 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17840 * way process stacks are handled. This is done by having a special
17841 * "init_task" linker map entry..
17842 */
17843 -union thread_union init_thread_union __init_task_data =
17844 - { INIT_THREAD_INFO(init_task) };
17845 +union thread_union init_thread_union __init_task_data;
17846
17847 /*
17848 * Initial task structure.
17849 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17850 * section. Since TSS's are completely CPU-local, we want them
17851 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17852 */
17853 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17854 -
17855 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17856 +EXPORT_SYMBOL(init_tss);
17857 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17858 index 99c4d30..74c84e9 100644
17859 --- a/arch/x86/kernel/ioport.c
17860 +++ b/arch/x86/kernel/ioport.c
17861 @@ -6,6 +6,7 @@
17862 #include <linux/sched.h>
17863 #include <linux/kernel.h>
17864 #include <linux/capability.h>
17865 +#include <linux/security.h>
17866 #include <linux/errno.h>
17867 #include <linux/types.h>
17868 #include <linux/ioport.h>
17869 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17870
17871 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17872 return -EINVAL;
17873 +#ifdef CONFIG_GRKERNSEC_IO
17874 + if (turn_on && grsec_disable_privio) {
17875 + gr_handle_ioperm();
17876 + return -EPERM;
17877 + }
17878 +#endif
17879 if (turn_on && !capable(CAP_SYS_RAWIO))
17880 return -EPERM;
17881
17882 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17883 * because the ->io_bitmap_max value must match the bitmap
17884 * contents:
17885 */
17886 - tss = &per_cpu(init_tss, get_cpu());
17887 + tss = init_tss + get_cpu();
17888
17889 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
17890
17891 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
17892 return -EINVAL;
17893 /* Trying to gain more privileges? */
17894 if (level > old) {
17895 +#ifdef CONFIG_GRKERNSEC_IO
17896 + if (grsec_disable_privio) {
17897 + gr_handle_iopl();
17898 + return -EPERM;
17899 + }
17900 +#endif
17901 if (!capable(CAP_SYS_RAWIO))
17902 return -EPERM;
17903 }
17904 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17905 index 04bbd52..83a07d9 100644
17906 --- a/arch/x86/kernel/irq.c
17907 +++ b/arch/x86/kernel/irq.c
17908 @@ -15,7 +15,7 @@
17909 #include <asm/mce.h>
17910 #include <asm/hw_irq.h>
17911
17912 -atomic_t irq_err_count;
17913 +atomic_unchecked_t irq_err_count;
17914
17915 /* Function pointer for generic interrupt vector handling */
17916 void (*generic_interrupt_extension)(void) = NULL;
17917 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
17918 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17919 seq_printf(p, " Machine check polls\n");
17920 #endif
17921 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17922 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17923 #if defined(CONFIG_X86_IO_APIC)
17924 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17925 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17926 #endif
17927 return 0;
17928 }
17929 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17930
17931 u64 arch_irq_stat(void)
17932 {
17933 - u64 sum = atomic_read(&irq_err_count);
17934 + u64 sum = atomic_read_unchecked(&irq_err_count);
17935
17936 #ifdef CONFIG_X86_IO_APIC
17937 - sum += atomic_read(&irq_mis_count);
17938 + sum += atomic_read_unchecked(&irq_mis_count);
17939 #endif
17940 return sum;
17941 }
17942 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17943 index 7d35d0f..03f1d52 100644
17944 --- a/arch/x86/kernel/irq_32.c
17945 +++ b/arch/x86/kernel/irq_32.c
17946 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
17947 __asm__ __volatile__("andl %%esp,%0" :
17948 "=r" (sp) : "0" (THREAD_SIZE - 1));
17949
17950 - return sp < (sizeof(struct thread_info) + STACK_WARN);
17951 + return sp < STACK_WARN;
17952 }
17953
17954 static void print_stack_overflow(void)
17955 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
17956 * per-CPU IRQ handling contexts (thread information and stack)
17957 */
17958 union irq_ctx {
17959 - struct thread_info tinfo;
17960 - u32 stack[THREAD_SIZE/sizeof(u32)];
17961 -} __attribute__((aligned(PAGE_SIZE)));
17962 + unsigned long previous_esp;
17963 + u32 stack[THREAD_SIZE/sizeof(u32)];
17964 +} __attribute__((aligned(THREAD_SIZE)));
17965
17966 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17967 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
17968 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
17969 static inline int
17970 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17971 {
17972 - union irq_ctx *curctx, *irqctx;
17973 + union irq_ctx *irqctx;
17974 u32 *isp, arg1, arg2;
17975
17976 - curctx = (union irq_ctx *) current_thread_info();
17977 irqctx = __get_cpu_var(hardirq_ctx);
17978
17979 /*
17980 @@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17981 * handler) we can't do that and just have to keep using the
17982 * current stack (which is the irq stack already after all)
17983 */
17984 - if (unlikely(curctx == irqctx))
17985 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17986 return 0;
17987
17988 /* build the stack frame on the IRQ stack */
17989 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17990 - irqctx->tinfo.task = curctx->tinfo.task;
17991 - irqctx->tinfo.previous_esp = current_stack_pointer;
17992 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17993 + irqctx->previous_esp = current_stack_pointer;
17994
17995 - /*
17996 - * Copy the softirq bits in preempt_count so that the
17997 - * softirq checks work in the hardirq context.
17998 - */
17999 - irqctx->tinfo.preempt_count =
18000 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
18001 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
18002 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18003 + __set_fs(MAKE_MM_SEG(0));
18004 +#endif
18005
18006 if (unlikely(overflow))
18007 call_on_stack(print_stack_overflow, isp);
18008 @@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18009 : "0" (irq), "1" (desc), "2" (isp),
18010 "D" (desc->handle_irq)
18011 : "memory", "cc", "ecx");
18012 +
18013 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18014 + __set_fs(current_thread_info()->addr_limit);
18015 +#endif
18016 +
18017 return 1;
18018 }
18019
18020 @@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18021 */
18022 void __cpuinit irq_ctx_init(int cpu)
18023 {
18024 - union irq_ctx *irqctx;
18025 -
18026 if (per_cpu(hardirq_ctx, cpu))
18027 return;
18028
18029 - irqctx = &per_cpu(hardirq_stack, cpu);
18030 - irqctx->tinfo.task = NULL;
18031 - irqctx->tinfo.exec_domain = NULL;
18032 - irqctx->tinfo.cpu = cpu;
18033 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
18034 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18035 -
18036 - per_cpu(hardirq_ctx, cpu) = irqctx;
18037 -
18038 - irqctx = &per_cpu(softirq_stack, cpu);
18039 - irqctx->tinfo.task = NULL;
18040 - irqctx->tinfo.exec_domain = NULL;
18041 - irqctx->tinfo.cpu = cpu;
18042 - irqctx->tinfo.preempt_count = 0;
18043 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18044 -
18045 - per_cpu(softirq_ctx, cpu) = irqctx;
18046 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
18047 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
18048
18049 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
18050 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
18051 @@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
18052 asmlinkage void do_softirq(void)
18053 {
18054 unsigned long flags;
18055 - struct thread_info *curctx;
18056 union irq_ctx *irqctx;
18057 u32 *isp;
18058
18059 @@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
18060 local_irq_save(flags);
18061
18062 if (local_softirq_pending()) {
18063 - curctx = current_thread_info();
18064 irqctx = __get_cpu_var(softirq_ctx);
18065 - irqctx->tinfo.task = curctx->task;
18066 - irqctx->tinfo.previous_esp = current_stack_pointer;
18067 + irqctx->previous_esp = current_stack_pointer;
18068
18069 /* build the stack frame on the softirq stack */
18070 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
18071 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
18072 +
18073 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18074 + __set_fs(MAKE_MM_SEG(0));
18075 +#endif
18076
18077 call_on_stack(__do_softirq, isp);
18078 +
18079 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18080 + __set_fs(current_thread_info()->addr_limit);
18081 +#endif
18082 +
18083 /*
18084 * Shouldnt happen, we returned above if in_interrupt():
18085 */
18086 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
18087 index 8d82a77..0baf312 100644
18088 --- a/arch/x86/kernel/kgdb.c
18089 +++ b/arch/x86/kernel/kgdb.c
18090 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
18091
18092 /* clear the trace bit */
18093 linux_regs->flags &= ~X86_EFLAGS_TF;
18094 - atomic_set(&kgdb_cpu_doing_single_step, -1);
18095 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
18096
18097 /* set the trace bit if we're stepping */
18098 if (remcomInBuffer[0] == 's') {
18099 linux_regs->flags |= X86_EFLAGS_TF;
18100 kgdb_single_step = 1;
18101 - atomic_set(&kgdb_cpu_doing_single_step,
18102 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
18103 raw_smp_processor_id());
18104 }
18105
18106 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
18107 break;
18108
18109 case DIE_DEBUG:
18110 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
18111 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
18112 raw_smp_processor_id()) {
18113 if (user_mode(regs))
18114 return single_step_cont(regs, args);
18115 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
18116 return instruction_pointer(regs);
18117 }
18118
18119 -struct kgdb_arch arch_kgdb_ops = {
18120 +const struct kgdb_arch arch_kgdb_ops = {
18121 /* Breakpoint instruction: */
18122 .gdb_bpt_instr = { 0xcc },
18123 .flags = KGDB_HW_BREAKPOINT,
18124 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
18125 index 7a67820..70ea187 100644
18126 --- a/arch/x86/kernel/kprobes.c
18127 +++ b/arch/x86/kernel/kprobes.c
18128 @@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
18129 char op;
18130 s32 raddr;
18131 } __attribute__((packed)) * jop;
18132 - jop = (struct __arch_jmp_op *)from;
18133 +
18134 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
18135 +
18136 + pax_open_kernel();
18137 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
18138 jop->op = RELATIVEJUMP_INSTRUCTION;
18139 + pax_close_kernel();
18140 }
18141
18142 /*
18143 @@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
18144 kprobe_opcode_t opcode;
18145 kprobe_opcode_t *orig_opcodes = opcodes;
18146
18147 - if (search_exception_tables((unsigned long)opcodes))
18148 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
18149 return 0; /* Page fault may occur on this address. */
18150
18151 retry:
18152 @@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
18153 disp = (u8 *) p->addr + *((s32 *) insn) -
18154 (u8 *) p->ainsn.insn;
18155 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
18156 + pax_open_kernel();
18157 *(s32 *)insn = (s32) disp;
18158 + pax_close_kernel();
18159 }
18160 }
18161 #endif
18162 @@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
18163
18164 static void __kprobes arch_copy_kprobe(struct kprobe *p)
18165 {
18166 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
18167 + pax_open_kernel();
18168 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
18169 + pax_close_kernel();
18170
18171 fix_riprel(p);
18172
18173 - if (can_boost(p->addr))
18174 + if (can_boost(ktla_ktva(p->addr)))
18175 p->ainsn.boostable = 0;
18176 else
18177 p->ainsn.boostable = -1;
18178
18179 - p->opcode = *p->addr;
18180 + p->opcode = *(ktla_ktva(p->addr));
18181 }
18182
18183 int __kprobes arch_prepare_kprobe(struct kprobe *p)
18184 @@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
18185 if (p->opcode == BREAKPOINT_INSTRUCTION)
18186 regs->ip = (unsigned long)p->addr;
18187 else
18188 - regs->ip = (unsigned long)p->ainsn.insn;
18189 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18190 }
18191
18192 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
18193 @@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
18194 if (p->ainsn.boostable == 1 && !p->post_handler) {
18195 /* Boost up -- we can execute copied instructions directly */
18196 reset_current_kprobe();
18197 - regs->ip = (unsigned long)p->ainsn.insn;
18198 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18199 preempt_enable_no_resched();
18200 return;
18201 }
18202 @@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
18203 struct kprobe_ctlblk *kcb;
18204
18205 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
18206 - if (*addr != BREAKPOINT_INSTRUCTION) {
18207 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
18208 /*
18209 * The breakpoint instruction was removed right
18210 * after we hit it. Another cpu has removed
18211 @@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
18212 /* Skip orig_ax, ip, cs */
18213 " addq $24, %rsp\n"
18214 " popfq\n"
18215 +#ifdef KERNEXEC_PLUGIN
18216 + " btsq $63,(%rsp)\n"
18217 +#endif
18218 #else
18219 " pushf\n"
18220 /*
18221 @@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
18222 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
18223 {
18224 unsigned long *tos = stack_addr(regs);
18225 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
18226 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
18227 unsigned long orig_ip = (unsigned long)p->addr;
18228 kprobe_opcode_t *insn = p->ainsn.insn;
18229
18230 @@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
18231 struct die_args *args = data;
18232 int ret = NOTIFY_DONE;
18233
18234 - if (args->regs && user_mode_vm(args->regs))
18235 + if (args->regs && user_mode(args->regs))
18236 return ret;
18237
18238 switch (val) {
18239 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
18240 index 63b0ec8..6d92227 100644
18241 --- a/arch/x86/kernel/kvm.c
18242 +++ b/arch/x86/kernel/kvm.c
18243 @@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
18244 pv_mmu_ops.set_pud = kvm_set_pud;
18245 #if PAGETABLE_LEVELS == 4
18246 pv_mmu_ops.set_pgd = kvm_set_pgd;
18247 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
18248 #endif
18249 #endif
18250 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
18251 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18252 index ec6ef60..ab2c824 100644
18253 --- a/arch/x86/kernel/ldt.c
18254 +++ b/arch/x86/kernel/ldt.c
18255 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18256 if (reload) {
18257 #ifdef CONFIG_SMP
18258 preempt_disable();
18259 - load_LDT(pc);
18260 + load_LDT_nolock(pc);
18261 if (!cpumask_equal(mm_cpumask(current->mm),
18262 cpumask_of(smp_processor_id())))
18263 smp_call_function(flush_ldt, current->mm, 1);
18264 preempt_enable();
18265 #else
18266 - load_LDT(pc);
18267 + load_LDT_nolock(pc);
18268 #endif
18269 }
18270 if (oldsize) {
18271 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18272 return err;
18273
18274 for (i = 0; i < old->size; i++)
18275 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18276 + write_ldt_entry(new->ldt, i, old->ldt + i);
18277 return 0;
18278 }
18279
18280 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18281 retval = copy_ldt(&mm->context, &old_mm->context);
18282 mutex_unlock(&old_mm->context.lock);
18283 }
18284 +
18285 + if (tsk == current) {
18286 + mm->context.vdso = 0;
18287 +
18288 +#ifdef CONFIG_X86_32
18289 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18290 + mm->context.user_cs_base = 0UL;
18291 + mm->context.user_cs_limit = ~0UL;
18292 +
18293 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18294 + cpus_clear(mm->context.cpu_user_cs_mask);
18295 +#endif
18296 +
18297 +#endif
18298 +#endif
18299 +
18300 + }
18301 +
18302 return retval;
18303 }
18304
18305 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18306 }
18307 }
18308
18309 +#ifdef CONFIG_PAX_SEGMEXEC
18310 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18311 + error = -EINVAL;
18312 + goto out_unlock;
18313 + }
18314 +#endif
18315 +
18316 fill_ldt(&ldt, &ldt_info);
18317 if (oldmode)
18318 ldt.avl = 0;
18319 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18320 index c1c429d..f02eaf9 100644
18321 --- a/arch/x86/kernel/machine_kexec_32.c
18322 +++ b/arch/x86/kernel/machine_kexec_32.c
18323 @@ -26,7 +26,7 @@
18324 #include <asm/system.h>
18325 #include <asm/cacheflush.h>
18326
18327 -static void set_idt(void *newidt, __u16 limit)
18328 +static void set_idt(struct desc_struct *newidt, __u16 limit)
18329 {
18330 struct desc_ptr curidt;
18331
18332 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18333 }
18334
18335
18336 -static void set_gdt(void *newgdt, __u16 limit)
18337 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18338 {
18339 struct desc_ptr curgdt;
18340
18341 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
18342 }
18343
18344 control_page = page_address(image->control_code_page);
18345 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18346 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18347
18348 relocate_kernel_ptr = control_page;
18349 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18350 diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
18351 index 1e47679..e73449d 100644
18352 --- a/arch/x86/kernel/microcode_amd.c
18353 +++ b/arch/x86/kernel/microcode_amd.c
18354 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
18355 uci->mc = NULL;
18356 }
18357
18358 -static struct microcode_ops microcode_amd_ops = {
18359 +static const struct microcode_ops microcode_amd_ops = {
18360 .request_microcode_user = request_microcode_user,
18361 .request_microcode_fw = request_microcode_fw,
18362 .collect_cpu_info = collect_cpu_info_amd,
18363 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
18364 .microcode_fini_cpu = microcode_fini_cpu_amd,
18365 };
18366
18367 -struct microcode_ops * __init init_amd_microcode(void)
18368 +const struct microcode_ops * __init init_amd_microcode(void)
18369 {
18370 return &microcode_amd_ops;
18371 }
18372 diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
18373 index 378e9a8..b5a6ea9 100644
18374 --- a/arch/x86/kernel/microcode_core.c
18375 +++ b/arch/x86/kernel/microcode_core.c
18376 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
18377
18378 #define MICROCODE_VERSION "2.00"
18379
18380 -static struct microcode_ops *microcode_ops;
18381 +static const struct microcode_ops *microcode_ops;
18382
18383 /*
18384 * Synchronization.
18385 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18386 index 0d334dd..14cedaf 100644
18387 --- a/arch/x86/kernel/microcode_intel.c
18388 +++ b/arch/x86/kernel/microcode_intel.c
18389 @@ -443,13 +443,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18390
18391 static int get_ucode_user(void *to, const void *from, size_t n)
18392 {
18393 - return copy_from_user(to, from, n);
18394 + return copy_from_user(to, (const void __force_user *)from, n);
18395 }
18396
18397 static enum ucode_state
18398 request_microcode_user(int cpu, const void __user *buf, size_t size)
18399 {
18400 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18401 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18402 }
18403
18404 static void microcode_fini_cpu(int cpu)
18405 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
18406 uci->mc = NULL;
18407 }
18408
18409 -static struct microcode_ops microcode_intel_ops = {
18410 +static const struct microcode_ops microcode_intel_ops = {
18411 .request_microcode_user = request_microcode_user,
18412 .request_microcode_fw = request_microcode_fw,
18413 .collect_cpu_info = collect_cpu_info,
18414 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_intel_ops = {
18415 .microcode_fini_cpu = microcode_fini_cpu,
18416 };
18417
18418 -struct microcode_ops * __init init_intel_microcode(void)
18419 +const struct microcode_ops * __init init_intel_microcode(void)
18420 {
18421 return &microcode_intel_ops;
18422 }
18423 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18424 index 89f386f..9028f51 100644
18425 --- a/arch/x86/kernel/module.c
18426 +++ b/arch/x86/kernel/module.c
18427 @@ -34,7 +34,7 @@
18428 #define DEBUGP(fmt...)
18429 #endif
18430
18431 -void *module_alloc(unsigned long size)
18432 +static void *__module_alloc(unsigned long size, pgprot_t prot)
18433 {
18434 struct vm_struct *area;
18435
18436 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
18437 if (!area)
18438 return NULL;
18439
18440 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
18441 - PAGE_KERNEL_EXEC);
18442 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
18443 +}
18444 +
18445 +void *module_alloc(unsigned long size)
18446 +{
18447 +
18448 +#ifdef CONFIG_PAX_KERNEXEC
18449 + return __module_alloc(size, PAGE_KERNEL);
18450 +#else
18451 + return __module_alloc(size, PAGE_KERNEL_EXEC);
18452 +#endif
18453 +
18454 }
18455
18456 /* Free memory returned from module_alloc */
18457 @@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
18458 vfree(module_region);
18459 }
18460
18461 +#ifdef CONFIG_PAX_KERNEXEC
18462 +#ifdef CONFIG_X86_32
18463 +void *module_alloc_exec(unsigned long size)
18464 +{
18465 + struct vm_struct *area;
18466 +
18467 + if (size == 0)
18468 + return NULL;
18469 +
18470 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18471 + return area ? area->addr : NULL;
18472 +}
18473 +EXPORT_SYMBOL(module_alloc_exec);
18474 +
18475 +void module_free_exec(struct module *mod, void *module_region)
18476 +{
18477 + vunmap(module_region);
18478 +}
18479 +EXPORT_SYMBOL(module_free_exec);
18480 +#else
18481 +void module_free_exec(struct module *mod, void *module_region)
18482 +{
18483 + module_free(mod, module_region);
18484 +}
18485 +EXPORT_SYMBOL(module_free_exec);
18486 +
18487 +void *module_alloc_exec(unsigned long size)
18488 +{
18489 + return __module_alloc(size, PAGE_KERNEL_RX);
18490 +}
18491 +EXPORT_SYMBOL(module_alloc_exec);
18492 +#endif
18493 +#endif
18494 +
18495 /* We don't need anything special. */
18496 int module_frob_arch_sections(Elf_Ehdr *hdr,
18497 Elf_Shdr *sechdrs,
18498 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18499 unsigned int i;
18500 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18501 Elf32_Sym *sym;
18502 - uint32_t *location;
18503 + uint32_t *plocation, location;
18504
18505 DEBUGP("Applying relocate section %u to %u\n", relsec,
18506 sechdrs[relsec].sh_info);
18507 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18508 /* This is where to make the change */
18509 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18510 - + rel[i].r_offset;
18511 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18512 + location = (uint32_t)plocation;
18513 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18514 + plocation = ktla_ktva((void *)plocation);
18515 /* This is the symbol it is referring to. Note that all
18516 undefined symbols have been resolved. */
18517 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18518 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18519 switch (ELF32_R_TYPE(rel[i].r_info)) {
18520 case R_386_32:
18521 /* We add the value into the location given */
18522 - *location += sym->st_value;
18523 + pax_open_kernel();
18524 + *plocation += sym->st_value;
18525 + pax_close_kernel();
18526 break;
18527 case R_386_PC32:
18528 /* Add the value, subtract its postition */
18529 - *location += sym->st_value - (uint32_t)location;
18530 + pax_open_kernel();
18531 + *plocation += sym->st_value - location;
18532 + pax_close_kernel();
18533 break;
18534 default:
18535 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18536 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18537 case R_X86_64_NONE:
18538 break;
18539 case R_X86_64_64:
18540 + pax_open_kernel();
18541 *(u64 *)loc = val;
18542 + pax_close_kernel();
18543 break;
18544 case R_X86_64_32:
18545 + pax_open_kernel();
18546 *(u32 *)loc = val;
18547 + pax_close_kernel();
18548 if (val != *(u32 *)loc)
18549 goto overflow;
18550 break;
18551 case R_X86_64_32S:
18552 + pax_open_kernel();
18553 *(s32 *)loc = val;
18554 + pax_close_kernel();
18555 if ((s64)val != *(s32 *)loc)
18556 goto overflow;
18557 break;
18558 case R_X86_64_PC32:
18559 val -= (u64)loc;
18560 + pax_open_kernel();
18561 *(u32 *)loc = val;
18562 + pax_close_kernel();
18563 +
18564 #if 0
18565 if ((s64)val != *(s32 *)loc)
18566 goto overflow;
18567 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18568 index 3a7c5a4..9191528 100644
18569 --- a/arch/x86/kernel/paravirt-spinlocks.c
18570 +++ b/arch/x86/kernel/paravirt-spinlocks.c
18571 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
18572 __raw_spin_lock(lock);
18573 }
18574
18575 -struct pv_lock_ops pv_lock_ops = {
18576 +struct pv_lock_ops pv_lock_ops __read_only = {
18577 #ifdef CONFIG_SMP
18578 .spin_is_locked = __ticket_spin_is_locked,
18579 .spin_is_contended = __ticket_spin_is_contended,
18580 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18581 index 1b1739d..dea6077 100644
18582 --- a/arch/x86/kernel/paravirt.c
18583 +++ b/arch/x86/kernel/paravirt.c
18584 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
18585 {
18586 return x;
18587 }
18588 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18589 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18590 +#endif
18591
18592 void __init default_banner(void)
18593 {
18594 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
18595 * corresponding structure. */
18596 static void *get_call_destination(u8 type)
18597 {
18598 - struct paravirt_patch_template tmpl = {
18599 + const struct paravirt_patch_template tmpl = {
18600 .pv_init_ops = pv_init_ops,
18601 .pv_time_ops = pv_time_ops,
18602 .pv_cpu_ops = pv_cpu_ops,
18603 @@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
18604 .pv_lock_ops = pv_lock_ops,
18605 #endif
18606 };
18607 +
18608 + pax_track_stack();
18609 return *((void **)&tmpl + type);
18610 }
18611
18612 @@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18613 if (opfunc == NULL)
18614 /* If there's no function, patch it with a ud2a (BUG) */
18615 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18616 - else if (opfunc == _paravirt_nop)
18617 + else if (opfunc == (void *)_paravirt_nop)
18618 /* If the operation is a nop, then nop the callsite */
18619 ret = paravirt_patch_nop();
18620
18621 /* identity functions just return their single argument */
18622 - else if (opfunc == _paravirt_ident_32)
18623 + else if (opfunc == (void *)_paravirt_ident_32)
18624 ret = paravirt_patch_ident_32(insnbuf, len);
18625 - else if (opfunc == _paravirt_ident_64)
18626 + else if (opfunc == (void *)_paravirt_ident_64)
18627 ret = paravirt_patch_ident_64(insnbuf, len);
18628 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18629 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18630 + ret = paravirt_patch_ident_64(insnbuf, len);
18631 +#endif
18632
18633 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18634 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18635 @@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18636 if (insn_len > len || start == NULL)
18637 insn_len = len;
18638 else
18639 - memcpy(insnbuf, start, insn_len);
18640 + memcpy(insnbuf, ktla_ktva(start), insn_len);
18641
18642 return insn_len;
18643 }
18644 @@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
18645 preempt_enable();
18646 }
18647
18648 -struct pv_info pv_info = {
18649 +struct pv_info pv_info __read_only = {
18650 .name = "bare hardware",
18651 .paravirt_enabled = 0,
18652 .kernel_rpl = 0,
18653 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
18654 };
18655
18656 -struct pv_init_ops pv_init_ops = {
18657 +struct pv_init_ops pv_init_ops __read_only = {
18658 .patch = native_patch,
18659 };
18660
18661 -struct pv_time_ops pv_time_ops = {
18662 +struct pv_time_ops pv_time_ops __read_only = {
18663 .sched_clock = native_sched_clock,
18664 };
18665
18666 -struct pv_irq_ops pv_irq_ops = {
18667 +struct pv_irq_ops pv_irq_ops __read_only = {
18668 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18669 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18670 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18671 @@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
18672 #endif
18673 };
18674
18675 -struct pv_cpu_ops pv_cpu_ops = {
18676 +struct pv_cpu_ops pv_cpu_ops __read_only = {
18677 .cpuid = native_cpuid,
18678 .get_debugreg = native_get_debugreg,
18679 .set_debugreg = native_set_debugreg,
18680 @@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18681 .end_context_switch = paravirt_nop,
18682 };
18683
18684 -struct pv_apic_ops pv_apic_ops = {
18685 +struct pv_apic_ops pv_apic_ops __read_only = {
18686 #ifdef CONFIG_X86_LOCAL_APIC
18687 .startup_ipi_hook = paravirt_nop,
18688 #endif
18689 };
18690
18691 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18692 +#ifdef CONFIG_X86_32
18693 +#ifdef CONFIG_X86_PAE
18694 +/* 64-bit pagetable entries */
18695 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18696 +#else
18697 /* 32-bit pagetable entries */
18698 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18699 +#endif
18700 #else
18701 /* 64-bit pagetable entries */
18702 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18703 #endif
18704
18705 -struct pv_mmu_ops pv_mmu_ops = {
18706 +struct pv_mmu_ops pv_mmu_ops __read_only = {
18707
18708 .read_cr2 = native_read_cr2,
18709 .write_cr2 = native_write_cr2,
18710 @@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18711 .make_pud = PTE_IDENT,
18712
18713 .set_pgd = native_set_pgd,
18714 + .set_pgd_batched = native_set_pgd_batched,
18715 #endif
18716 #endif /* PAGETABLE_LEVELS >= 3 */
18717
18718 @@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18719 },
18720
18721 .set_fixmap = native_set_fixmap,
18722 +
18723 +#ifdef CONFIG_PAX_KERNEXEC
18724 + .pax_open_kernel = native_pax_open_kernel,
18725 + .pax_close_kernel = native_pax_close_kernel,
18726 +#endif
18727 +
18728 };
18729
18730 EXPORT_SYMBOL_GPL(pv_time_ops);
18731 diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
18732 index 1a2d4b1..6a0dd55 100644
18733 --- a/arch/x86/kernel/pci-calgary_64.c
18734 +++ b/arch/x86/kernel/pci-calgary_64.c
18735 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
18736 free_pages((unsigned long)vaddr, get_order(size));
18737 }
18738
18739 -static struct dma_map_ops calgary_dma_ops = {
18740 +static const struct dma_map_ops calgary_dma_ops = {
18741 .alloc_coherent = calgary_alloc_coherent,
18742 .free_coherent = calgary_free_coherent,
18743 .map_sg = calgary_map_sg,
18744 diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
18745 index 6ac3931..42b4414 100644
18746 --- a/arch/x86/kernel/pci-dma.c
18747 +++ b/arch/x86/kernel/pci-dma.c
18748 @@ -14,7 +14,7 @@
18749
18750 static int forbid_dac __read_mostly;
18751
18752 -struct dma_map_ops *dma_ops;
18753 +const struct dma_map_ops *dma_ops;
18754 EXPORT_SYMBOL(dma_ops);
18755
18756 static int iommu_sac_force __read_mostly;
18757 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
18758
18759 int dma_supported(struct device *dev, u64 mask)
18760 {
18761 - struct dma_map_ops *ops = get_dma_ops(dev);
18762 + const struct dma_map_ops *ops = get_dma_ops(dev);
18763
18764 #ifdef CONFIG_PCI
18765 if (mask > 0xffffffff && forbid_dac > 0) {
18766 diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
18767 index 1c76691..e3632db 100644
18768 --- a/arch/x86/kernel/pci-gart_64.c
18769 +++ b/arch/x86/kernel/pci-gart_64.c
18770 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
18771 return -1;
18772 }
18773
18774 -static struct dma_map_ops gart_dma_ops = {
18775 +static const struct dma_map_ops gart_dma_ops = {
18776 .map_sg = gart_map_sg,
18777 .unmap_sg = gart_unmap_sg,
18778 .map_page = gart_map_page,
18779 diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
18780 index a3933d4..c898869 100644
18781 --- a/arch/x86/kernel/pci-nommu.c
18782 +++ b/arch/x86/kernel/pci-nommu.c
18783 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
18784 flush_write_buffers();
18785 }
18786
18787 -struct dma_map_ops nommu_dma_ops = {
18788 +const struct dma_map_ops nommu_dma_ops = {
18789 .alloc_coherent = dma_generic_alloc_coherent,
18790 .free_coherent = nommu_free_coherent,
18791 .map_sg = nommu_map_sg,
18792 diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
18793 index aaa6b78..4de1881 100644
18794 --- a/arch/x86/kernel/pci-swiotlb.c
18795 +++ b/arch/x86/kernel/pci-swiotlb.c
18796 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
18797 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
18798 }
18799
18800 -static struct dma_map_ops swiotlb_dma_ops = {
18801 +static const struct dma_map_ops swiotlb_dma_ops = {
18802 .mapping_error = swiotlb_dma_mapping_error,
18803 .alloc_coherent = x86_swiotlb_alloc_coherent,
18804 .free_coherent = swiotlb_free_coherent,
18805 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18806 index fc6c84d..0312ca2 100644
18807 --- a/arch/x86/kernel/process.c
18808 +++ b/arch/x86/kernel/process.c
18809 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
18810
18811 void free_thread_info(struct thread_info *ti)
18812 {
18813 - free_thread_xstate(ti->task);
18814 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
18815 }
18816
18817 +static struct kmem_cache *task_struct_cachep;
18818 +
18819 void arch_task_cache_init(void)
18820 {
18821 - task_xstate_cachep =
18822 - kmem_cache_create("task_xstate", xstate_size,
18823 + /* create a slab on which task_structs can be allocated */
18824 + task_struct_cachep =
18825 + kmem_cache_create("task_struct", sizeof(struct task_struct),
18826 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18827 +
18828 + task_xstate_cachep =
18829 + kmem_cache_create("task_xstate", xstate_size,
18830 __alignof__(union thread_xstate),
18831 - SLAB_PANIC | SLAB_NOTRACK, NULL);
18832 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18833 +}
18834 +
18835 +struct task_struct *alloc_task_struct(void)
18836 +{
18837 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
18838 +}
18839 +
18840 +void free_task_struct(struct task_struct *task)
18841 +{
18842 + free_thread_xstate(task);
18843 + kmem_cache_free(task_struct_cachep, task);
18844 }
18845
18846 /*
18847 @@ -73,7 +90,7 @@ void exit_thread(void)
18848 unsigned long *bp = t->io_bitmap_ptr;
18849
18850 if (bp) {
18851 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18852 + struct tss_struct *tss = init_tss + get_cpu();
18853
18854 t->io_bitmap_ptr = NULL;
18855 clear_thread_flag(TIF_IO_BITMAP);
18856 @@ -93,6 +110,9 @@ void flush_thread(void)
18857
18858 clear_tsk_thread_flag(tsk, TIF_DEBUG);
18859
18860 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18861 + loadsegment(gs, 0);
18862 +#endif
18863 tsk->thread.debugreg0 = 0;
18864 tsk->thread.debugreg1 = 0;
18865 tsk->thread.debugreg2 = 0;
18866 @@ -307,7 +327,7 @@ void default_idle(void)
18867 EXPORT_SYMBOL(default_idle);
18868 #endif
18869
18870 -void stop_this_cpu(void *dummy)
18871 +__noreturn void stop_this_cpu(void *dummy)
18872 {
18873 local_irq_disable();
18874 /*
18875 @@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
18876 }
18877 early_param("idle", idle_setup);
18878
18879 -unsigned long arch_align_stack(unsigned long sp)
18880 +#ifdef CONFIG_PAX_RANDKSTACK
18881 +void pax_randomize_kstack(struct pt_regs *regs)
18882 {
18883 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18884 - sp -= get_random_int() % 8192;
18885 - return sp & ~0xf;
18886 -}
18887 + struct thread_struct *thread = &current->thread;
18888 + unsigned long time;
18889
18890 -unsigned long arch_randomize_brk(struct mm_struct *mm)
18891 -{
18892 - unsigned long range_end = mm->brk + 0x02000000;
18893 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18894 + if (!randomize_va_space)
18895 + return;
18896 +
18897 + if (v8086_mode(regs))
18898 + return;
18899 +
18900 + rdtscl(time);
18901 +
18902 + /* P4 seems to return a 0 LSB, ignore it */
18903 +#ifdef CONFIG_MPENTIUM4
18904 + time &= 0x3EUL;
18905 + time <<= 2;
18906 +#elif defined(CONFIG_X86_64)
18907 + time &= 0xFUL;
18908 + time <<= 4;
18909 +#else
18910 + time &= 0x1FUL;
18911 + time <<= 3;
18912 +#endif
18913 +
18914 + thread->sp0 ^= time;
18915 + load_sp0(init_tss + smp_processor_id(), thread);
18916 +
18917 +#ifdef CONFIG_X86_64
18918 + percpu_write(kernel_stack, thread->sp0);
18919 +#endif
18920 }
18921 +#endif
18922
18923 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18924 index c40c432..6e1df72 100644
18925 --- a/arch/x86/kernel/process_32.c
18926 +++ b/arch/x86/kernel/process_32.c
18927 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18928 unsigned long thread_saved_pc(struct task_struct *tsk)
18929 {
18930 return ((unsigned long *)tsk->thread.sp)[3];
18931 +//XXX return tsk->thread.eip;
18932 }
18933
18934 #ifndef CONFIG_SMP
18935 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
18936 unsigned short ss, gs;
18937 const char *board;
18938
18939 - if (user_mode_vm(regs)) {
18940 + if (user_mode(regs)) {
18941 sp = regs->sp;
18942 ss = regs->ss & 0xffff;
18943 - gs = get_user_gs(regs);
18944 } else {
18945 sp = (unsigned long) (&regs->sp);
18946 savesegment(ss, ss);
18947 - savesegment(gs, gs);
18948 }
18949 + gs = get_user_gs(regs);
18950
18951 printk("\n");
18952
18953 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18954 regs.bx = (unsigned long) fn;
18955 regs.dx = (unsigned long) arg;
18956
18957 - regs.ds = __USER_DS;
18958 - regs.es = __USER_DS;
18959 + regs.ds = __KERNEL_DS;
18960 + regs.es = __KERNEL_DS;
18961 regs.fs = __KERNEL_PERCPU;
18962 - regs.gs = __KERNEL_STACK_CANARY;
18963 + savesegment(gs, regs.gs);
18964 regs.orig_ax = -1;
18965 regs.ip = (unsigned long) kernel_thread_helper;
18966 regs.cs = __KERNEL_CS | get_kernel_rpl();
18967 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18968 struct task_struct *tsk;
18969 int err;
18970
18971 - childregs = task_pt_regs(p);
18972 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18973 *childregs = *regs;
18974 childregs->ax = 0;
18975 childregs->sp = sp;
18976
18977 p->thread.sp = (unsigned long) childregs;
18978 p->thread.sp0 = (unsigned long) (childregs+1);
18979 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18980
18981 p->thread.ip = (unsigned long) ret_from_fork;
18982
18983 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18984 struct thread_struct *prev = &prev_p->thread,
18985 *next = &next_p->thread;
18986 int cpu = smp_processor_id();
18987 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18988 + struct tss_struct *tss = init_tss + cpu;
18989 bool preload_fpu;
18990
18991 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18992 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18993 */
18994 lazy_save_gs(prev->gs);
18995
18996 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18997 + __set_fs(task_thread_info(next_p)->addr_limit);
18998 +#endif
18999 +
19000 /*
19001 * Load the per-thread Thread-Local Storage descriptor.
19002 */
19003 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19004 */
19005 arch_end_context_switch(next_p);
19006
19007 + percpu_write(current_task, next_p);
19008 + percpu_write(current_tinfo, &next_p->tinfo);
19009 +
19010 if (preload_fpu)
19011 __math_state_restore();
19012
19013 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19014 if (prev->gs | next->gs)
19015 lazy_load_gs(next->gs);
19016
19017 - percpu_write(current_task, next_p);
19018 -
19019 return prev_p;
19020 }
19021
19022 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
19023 } while (count++ < 16);
19024 return 0;
19025 }
19026 -
19027 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
19028 index 39493bc..196816d 100644
19029 --- a/arch/x86/kernel/process_64.c
19030 +++ b/arch/x86/kernel/process_64.c
19031 @@ -91,7 +91,7 @@ static void __exit_idle(void)
19032 void exit_idle(void)
19033 {
19034 /* idle loop has pid 0 */
19035 - if (current->pid)
19036 + if (task_pid_nr(current))
19037 return;
19038 __exit_idle();
19039 }
19040 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
19041 if (!board)
19042 board = "";
19043 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
19044 - current->pid, current->comm, print_tainted(),
19045 + task_pid_nr(current), current->comm, print_tainted(),
19046 init_utsname()->release,
19047 (int)strcspn(init_utsname()->version, " "),
19048 init_utsname()->version, board);
19049 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19050 struct pt_regs *childregs;
19051 struct task_struct *me = current;
19052
19053 - childregs = ((struct pt_regs *)
19054 - (THREAD_SIZE + task_stack_page(p))) - 1;
19055 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
19056 *childregs = *regs;
19057
19058 childregs->ax = 0;
19059 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19060 p->thread.sp = (unsigned long) childregs;
19061 p->thread.sp0 = (unsigned long) (childregs+1);
19062 p->thread.usersp = me->thread.usersp;
19063 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
19064
19065 set_tsk_thread_flag(p, TIF_FORK);
19066
19067 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19068 struct thread_struct *prev = &prev_p->thread;
19069 struct thread_struct *next = &next_p->thread;
19070 int cpu = smp_processor_id();
19071 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
19072 + struct tss_struct *tss = init_tss + cpu;
19073 unsigned fsindex, gsindex;
19074 bool preload_fpu;
19075
19076 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19077 prev->usersp = percpu_read(old_rsp);
19078 percpu_write(old_rsp, next->usersp);
19079 percpu_write(current_task, next_p);
19080 + percpu_write(current_tinfo, &next_p->tinfo);
19081
19082 - percpu_write(kernel_stack,
19083 - (unsigned long)task_stack_page(next_p) +
19084 - THREAD_SIZE - KERNEL_STACK_OFFSET);
19085 + percpu_write(kernel_stack, next->sp0);
19086
19087 /*
19088 * Now maybe reload the debug registers and handle I/O bitmaps
19089 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
19090 if (!p || p == current || p->state == TASK_RUNNING)
19091 return 0;
19092 stack = (unsigned long)task_stack_page(p);
19093 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
19094 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
19095 return 0;
19096 fp = *(u64 *)(p->thread.sp);
19097 do {
19098 - if (fp < (unsigned long)stack ||
19099 - fp >= (unsigned long)stack+THREAD_SIZE)
19100 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
19101 return 0;
19102 ip = *(u64 *)(fp+8);
19103 if (!in_sched_functions(ip))
19104 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
19105 index c06acdd..3f5fff5 100644
19106 --- a/arch/x86/kernel/ptrace.c
19107 +++ b/arch/x86/kernel/ptrace.c
19108 @@ -925,7 +925,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
19109 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19110 {
19111 int ret;
19112 - unsigned long __user *datap = (unsigned long __user *)data;
19113 + unsigned long __user *datap = (__force unsigned long __user *)data;
19114
19115 switch (request) {
19116 /* read the word at location addr in the USER area. */
19117 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19118 if (addr < 0)
19119 return -EIO;
19120 ret = do_get_thread_area(child, addr,
19121 - (struct user_desc __user *) data);
19122 + (__force struct user_desc __user *) data);
19123 break;
19124
19125 case PTRACE_SET_THREAD_AREA:
19126 if (addr < 0)
19127 return -EIO;
19128 ret = do_set_thread_area(child, addr,
19129 - (struct user_desc __user *) data, 0);
19130 + (__force struct user_desc __user *) data, 0);
19131 break;
19132 #endif
19133
19134 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19135 #ifdef CONFIG_X86_PTRACE_BTS
19136 case PTRACE_BTS_CONFIG:
19137 ret = ptrace_bts_config
19138 - (child, data, (struct ptrace_bts_config __user *)addr);
19139 + (child, data, (__force struct ptrace_bts_config __user *)addr);
19140 break;
19141
19142 case PTRACE_BTS_STATUS:
19143 ret = ptrace_bts_status
19144 - (child, data, (struct ptrace_bts_config __user *)addr);
19145 + (child, data, (__force struct ptrace_bts_config __user *)addr);
19146 break;
19147
19148 case PTRACE_BTS_SIZE:
19149 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19150
19151 case PTRACE_BTS_GET:
19152 ret = ptrace_bts_read_record
19153 - (child, data, (struct bts_struct __user *) addr);
19154 + (child, data, (__force struct bts_struct __user *) addr);
19155 break;
19156
19157 case PTRACE_BTS_CLEAR:
19158 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19159
19160 case PTRACE_BTS_DRAIN:
19161 ret = ptrace_bts_drain
19162 - (child, data, (struct bts_struct __user *) addr);
19163 + (child, data, (__force struct bts_struct __user *) addr);
19164 break;
19165 #endif /* CONFIG_X86_PTRACE_BTS */
19166
19167 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
19168 info.si_code = si_code;
19169
19170 /* User-mode ip? */
19171 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
19172 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
19173
19174 /* Send us the fake SIGTRAP */
19175 force_sig_info(SIGTRAP, &info, tsk);
19176 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
19177 * We must return the syscall number to actually look up in the table.
19178 * This can be -1L to skip running any syscall at all.
19179 */
19180 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
19181 +long syscall_trace_enter(struct pt_regs *regs)
19182 {
19183 long ret = 0;
19184
19185 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
19186 return ret ?: regs->orig_ax;
19187 }
19188
19189 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
19190 +void syscall_trace_leave(struct pt_regs *regs)
19191 {
19192 if (unlikely(current->audit_context))
19193 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
19194 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
19195 index cf98100..e76e03d 100644
19196 --- a/arch/x86/kernel/reboot.c
19197 +++ b/arch/x86/kernel/reboot.c
19198 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
19199 EXPORT_SYMBOL(pm_power_off);
19200
19201 static const struct desc_ptr no_idt = {};
19202 -static int reboot_mode;
19203 +static unsigned short reboot_mode;
19204 enum reboot_type reboot_type = BOOT_KBD;
19205 int reboot_force;
19206
19207 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
19208 controller to pulse the CPU reset line, which is more thorough, but
19209 doesn't work with at least one type of 486 motherboard. It is easy
19210 to stop this code working; hence the copious comments. */
19211 -static const unsigned long long
19212 -real_mode_gdt_entries [3] =
19213 +static struct desc_struct
19214 +real_mode_gdt_entries [3] __read_only =
19215 {
19216 - 0x0000000000000000ULL, /* Null descriptor */
19217 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
19218 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
19219 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
19220 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
19221 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
19222 };
19223
19224 static const struct desc_ptr
19225 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
19226 * specified by the code and length parameters.
19227 * We assume that length will aways be less that 100!
19228 */
19229 -void machine_real_restart(const unsigned char *code, int length)
19230 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
19231 {
19232 local_irq_disable();
19233
19234 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
19235 /* Remap the kernel at virtual address zero, as well as offset zero
19236 from the kernel segment. This assumes the kernel segment starts at
19237 virtual address PAGE_OFFSET. */
19238 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19239 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
19240 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19241 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
19242
19243 /*
19244 * Use `swapper_pg_dir' as our page directory.
19245 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
19246 boot)". This seems like a fairly standard thing that gets set by
19247 REBOOT.COM programs, and the previous reset routine did this
19248 too. */
19249 - *((unsigned short *)0x472) = reboot_mode;
19250 + *(unsigned short *)(__va(0x472)) = reboot_mode;
19251
19252 /* For the switch to real mode, copy some code to low memory. It has
19253 to be in the first 64k because it is running in 16-bit mode, and it
19254 has to have the same physical and virtual address, because it turns
19255 off paging. Copy it near the end of the first page, out of the way
19256 of BIOS variables. */
19257 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
19258 - real_mode_switch, sizeof (real_mode_switch));
19259 - memcpy((void *)(0x1000 - 100), code, length);
19260 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
19261 + memcpy(__va(0x1000 - 100), code, length);
19262
19263 /* Set up the IDT for real mode. */
19264 load_idt(&real_mode_idt);
19265 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
19266 __asm__ __volatile__ ("ljmp $0x0008,%0"
19267 :
19268 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
19269 + do { } while (1);
19270 }
19271 #ifdef CONFIG_APM_MODULE
19272 EXPORT_SYMBOL(machine_real_restart);
19273 @@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
19274 {
19275 }
19276
19277 -static void native_machine_emergency_restart(void)
19278 +__noreturn static void native_machine_emergency_restart(void)
19279 {
19280 int i;
19281
19282 @@ -659,13 +659,13 @@ void native_machine_shutdown(void)
19283 #endif
19284 }
19285
19286 -static void __machine_emergency_restart(int emergency)
19287 +static __noreturn void __machine_emergency_restart(int emergency)
19288 {
19289 reboot_emergency = emergency;
19290 machine_ops.emergency_restart();
19291 }
19292
19293 -static void native_machine_restart(char *__unused)
19294 +static __noreturn void native_machine_restart(char *__unused)
19295 {
19296 printk("machine restart\n");
19297
19298 @@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
19299 __machine_emergency_restart(0);
19300 }
19301
19302 -static void native_machine_halt(void)
19303 +static __noreturn void native_machine_halt(void)
19304 {
19305 /* stop other cpus and apics */
19306 machine_shutdown();
19307 @@ -685,7 +685,7 @@ static void native_machine_halt(void)
19308 stop_this_cpu(NULL);
19309 }
19310
19311 -static void native_machine_power_off(void)
19312 +__noreturn static void native_machine_power_off(void)
19313 {
19314 if (pm_power_off) {
19315 if (!reboot_force)
19316 @@ -694,6 +694,7 @@ static void native_machine_power_off(void)
19317 }
19318 /* a fallback in case there is no PM info available */
19319 tboot_shutdown(TB_SHUTDOWN_HALT);
19320 + do { } while (1);
19321 }
19322
19323 struct machine_ops machine_ops = {
19324 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19325 index 7a6f3b3..976a959 100644
19326 --- a/arch/x86/kernel/relocate_kernel_64.S
19327 +++ b/arch/x86/kernel/relocate_kernel_64.S
19328 @@ -11,6 +11,7 @@
19329 #include <asm/kexec.h>
19330 #include <asm/processor-flags.h>
19331 #include <asm/pgtable_types.h>
19332 +#include <asm/alternative-asm.h>
19333
19334 /*
19335 * Must be relocatable PIC code callable as a C function
19336 @@ -167,6 +168,7 @@ identity_mapped:
19337 xorq %r14, %r14
19338 xorq %r15, %r15
19339
19340 + pax_force_retaddr 0, 1
19341 ret
19342
19343 1:
19344 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19345 index 5449a26..0b6c759 100644
19346 --- a/arch/x86/kernel/setup.c
19347 +++ b/arch/x86/kernel/setup.c
19348 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
19349
19350 if (!boot_params.hdr.root_flags)
19351 root_mountflags &= ~MS_RDONLY;
19352 - init_mm.start_code = (unsigned long) _text;
19353 - init_mm.end_code = (unsigned long) _etext;
19354 + init_mm.start_code = ktla_ktva((unsigned long) _text);
19355 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
19356 init_mm.end_data = (unsigned long) _edata;
19357 init_mm.brk = _brk_end;
19358
19359 - code_resource.start = virt_to_phys(_text);
19360 - code_resource.end = virt_to_phys(_etext)-1;
19361 - data_resource.start = virt_to_phys(_etext);
19362 + code_resource.start = virt_to_phys(ktla_ktva(_text));
19363 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19364 + data_resource.start = virt_to_phys(_sdata);
19365 data_resource.end = virt_to_phys(_edata)-1;
19366 bss_resource.start = virt_to_phys(&__bss_start);
19367 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19368 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19369 index d559af9..524c6ad 100644
19370 --- a/arch/x86/kernel/setup_percpu.c
19371 +++ b/arch/x86/kernel/setup_percpu.c
19372 @@ -25,19 +25,17 @@
19373 # define DBG(x...)
19374 #endif
19375
19376 -DEFINE_PER_CPU(int, cpu_number);
19377 +#ifdef CONFIG_SMP
19378 +DEFINE_PER_CPU(unsigned int, cpu_number);
19379 EXPORT_PER_CPU_SYMBOL(cpu_number);
19380 +#endif
19381
19382 -#ifdef CONFIG_X86_64
19383 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19384 -#else
19385 -#define BOOT_PERCPU_OFFSET 0
19386 -#endif
19387
19388 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19389 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19390
19391 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19392 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19393 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19394 };
19395 EXPORT_SYMBOL(__per_cpu_offset);
19396 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
19397 {
19398 #ifdef CONFIG_X86_32
19399 struct desc_struct gdt;
19400 + unsigned long base = per_cpu_offset(cpu);
19401
19402 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19403 - 0x2 | DESCTYPE_S, 0x8);
19404 - gdt.s = 1;
19405 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19406 + 0x83 | DESCTYPE_S, 0xC);
19407 write_gdt_entry(get_cpu_gdt_table(cpu),
19408 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19409 #endif
19410 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
19411 /* alrighty, percpu areas up and running */
19412 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19413 for_each_possible_cpu(cpu) {
19414 +#ifdef CONFIG_CC_STACKPROTECTOR
19415 +#ifdef CONFIG_X86_32
19416 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
19417 +#endif
19418 +#endif
19419 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19420 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19421 per_cpu(cpu_number, cpu) = cpu;
19422 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
19423 early_per_cpu_map(x86_cpu_to_node_map, cpu);
19424 #endif
19425 #endif
19426 +#ifdef CONFIG_CC_STACKPROTECTOR
19427 +#ifdef CONFIG_X86_32
19428 + if (!cpu)
19429 + per_cpu(stack_canary.canary, cpu) = canary;
19430 +#endif
19431 +#endif
19432 /*
19433 * Up to this point, the boot CPU has been using .data.init
19434 * area. Reload any changed state for the boot CPU.
19435 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19436 index 6a44a76..a9287a1 100644
19437 --- a/arch/x86/kernel/signal.c
19438 +++ b/arch/x86/kernel/signal.c
19439 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
19440 * Align the stack pointer according to the i386 ABI,
19441 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19442 */
19443 - sp = ((sp + 4) & -16ul) - 4;
19444 + sp = ((sp - 12) & -16ul) - 4;
19445 #else /* !CONFIG_X86_32 */
19446 sp = round_down(sp, 16) - 8;
19447 #endif
19448 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19449 * Return an always-bogus address instead so we will die with SIGSEGV.
19450 */
19451 if (onsigstack && !likely(on_sig_stack(sp)))
19452 - return (void __user *)-1L;
19453 + return (__force void __user *)-1L;
19454
19455 /* save i387 state */
19456 if (used_math() && save_i387_xstate(*fpstate) < 0)
19457 - return (void __user *)-1L;
19458 + return (__force void __user *)-1L;
19459
19460 return (void __user *)sp;
19461 }
19462 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19463 }
19464
19465 if (current->mm->context.vdso)
19466 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19467 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19468 else
19469 - restorer = &frame->retcode;
19470 + restorer = (void __user *)&frame->retcode;
19471 if (ka->sa.sa_flags & SA_RESTORER)
19472 restorer = ka->sa.sa_restorer;
19473
19474 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19475 * reasons and because gdb uses it as a signature to notice
19476 * signal handler stack frames.
19477 */
19478 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19479 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19480
19481 if (err)
19482 return -EFAULT;
19483 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19484 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19485
19486 /* Set up to return from userspace. */
19487 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19488 + if (current->mm->context.vdso)
19489 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19490 + else
19491 + restorer = (void __user *)&frame->retcode;
19492 if (ka->sa.sa_flags & SA_RESTORER)
19493 restorer = ka->sa.sa_restorer;
19494 put_user_ex(restorer, &frame->pretcode);
19495 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19496 * reasons and because gdb uses it as a signature to notice
19497 * signal handler stack frames.
19498 */
19499 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19500 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19501 } put_user_catch(err);
19502
19503 if (err)
19504 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
19505 int signr;
19506 sigset_t *oldset;
19507
19508 + pax_track_stack();
19509 +
19510 /*
19511 * We want the common case to go fast, which is why we may in certain
19512 * cases get here from kernel mode. Just return without doing anything
19513 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
19514 * X86_32: vm86 regs switched out by assembly code before reaching
19515 * here, so testing against kernel CS suffices.
19516 */
19517 - if (!user_mode(regs))
19518 + if (!user_mode_novm(regs))
19519 return;
19520
19521 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
19522 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19523 index 7e8e905..64d5c32 100644
19524 --- a/arch/x86/kernel/smpboot.c
19525 +++ b/arch/x86/kernel/smpboot.c
19526 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
19527 */
19528 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
19529
19530 -void cpu_hotplug_driver_lock()
19531 +void cpu_hotplug_driver_lock(void)
19532 {
19533 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
19534 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
19535 }
19536
19537 -void cpu_hotplug_driver_unlock()
19538 +void cpu_hotplug_driver_unlock(void)
19539 {
19540 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19541 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19542 }
19543
19544 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
19545 @@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
19546 * target processor state.
19547 */
19548 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
19549 - (unsigned long)stack_start.sp);
19550 + stack_start);
19551
19552 /*
19553 * Run STARTUP IPI loop.
19554 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19555 set_idle_for_cpu(cpu, c_idle.idle);
19556 do_rest:
19557 per_cpu(current_task, cpu) = c_idle.idle;
19558 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19559 #ifdef CONFIG_X86_32
19560 /* Stack for startup_32 can be just as for start_secondary onwards */
19561 irq_ctx_init(cpu);
19562 @@ -750,13 +751,15 @@ do_rest:
19563 #else
19564 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19565 initial_gs = per_cpu_offset(cpu);
19566 - per_cpu(kernel_stack, cpu) =
19567 - (unsigned long)task_stack_page(c_idle.idle) -
19568 - KERNEL_STACK_OFFSET + THREAD_SIZE;
19569 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19570 #endif
19571 +
19572 + pax_open_kernel();
19573 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19574 + pax_close_kernel();
19575 +
19576 initial_code = (unsigned long)start_secondary;
19577 - stack_start.sp = (void *) c_idle.idle->thread.sp;
19578 + stack_start = c_idle.idle->thread.sp;
19579
19580 /* start_ip had better be page-aligned! */
19581 start_ip = setup_trampoline();
19582 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19583
19584 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19585
19586 +#ifdef CONFIG_PAX_PER_CPU_PGD
19587 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19588 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19589 + KERNEL_PGD_PTRS);
19590 +#endif
19591 +
19592 err = do_boot_cpu(apicid, cpu);
19593
19594 if (err) {
19595 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19596 index 3149032..14f1053 100644
19597 --- a/arch/x86/kernel/step.c
19598 +++ b/arch/x86/kernel/step.c
19599 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19600 struct desc_struct *desc;
19601 unsigned long base;
19602
19603 - seg &= ~7UL;
19604 + seg >>= 3;
19605
19606 mutex_lock(&child->mm->context.lock);
19607 - if (unlikely((seg >> 3) >= child->mm->context.size))
19608 + if (unlikely(seg >= child->mm->context.size))
19609 addr = -1L; /* bogus selector, access would fault */
19610 else {
19611 desc = child->mm->context.ldt + seg;
19612 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19613 addr += base;
19614 }
19615 mutex_unlock(&child->mm->context.lock);
19616 - }
19617 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19618 + addr = ktla_ktva(addr);
19619
19620 return addr;
19621 }
19622 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19623 unsigned char opcode[15];
19624 unsigned long addr = convert_ip_to_linear(child, regs);
19625
19626 + if (addr == -EINVAL)
19627 + return 0;
19628 +
19629 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19630 for (i = 0; i < copied; i++) {
19631 switch (opcode[i]) {
19632 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19633
19634 #ifdef CONFIG_X86_64
19635 case 0x40 ... 0x4f:
19636 - if (regs->cs != __USER_CS)
19637 + if ((regs->cs & 0xffff) != __USER_CS)
19638 /* 32-bit mode: register increment */
19639 return 0;
19640 /* 64-bit mode: REX prefix */
19641 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19642 index dee1ff7..a397f7f 100644
19643 --- a/arch/x86/kernel/sys_i386_32.c
19644 +++ b/arch/x86/kernel/sys_i386_32.c
19645 @@ -24,6 +24,21 @@
19646
19647 #include <asm/syscalls.h>
19648
19649 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19650 +{
19651 + unsigned long pax_task_size = TASK_SIZE;
19652 +
19653 +#ifdef CONFIG_PAX_SEGMEXEC
19654 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19655 + pax_task_size = SEGMEXEC_TASK_SIZE;
19656 +#endif
19657 +
19658 + if (len > pax_task_size || addr > pax_task_size - len)
19659 + return -EINVAL;
19660 +
19661 + return 0;
19662 +}
19663 +
19664 /*
19665 * Perform the select(nd, in, out, ex, tv) and mmap() system
19666 * calls. Linux/i386 didn't use to be able to handle more than
19667 @@ -58,6 +73,212 @@ out:
19668 return err;
19669 }
19670
19671 +unsigned long
19672 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
19673 + unsigned long len, unsigned long pgoff, unsigned long flags)
19674 +{
19675 + struct mm_struct *mm = current->mm;
19676 + struct vm_area_struct *vma;
19677 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19678 +
19679 +#ifdef CONFIG_PAX_SEGMEXEC
19680 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19681 + pax_task_size = SEGMEXEC_TASK_SIZE;
19682 +#endif
19683 +
19684 + pax_task_size -= PAGE_SIZE;
19685 +
19686 + if (len > pax_task_size)
19687 + return -ENOMEM;
19688 +
19689 + if (flags & MAP_FIXED)
19690 + return addr;
19691 +
19692 +#ifdef CONFIG_PAX_RANDMMAP
19693 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19694 +#endif
19695 +
19696 + if (addr) {
19697 + addr = PAGE_ALIGN(addr);
19698 + if (pax_task_size - len >= addr) {
19699 + vma = find_vma(mm, addr);
19700 + if (check_heap_stack_gap(vma, addr, len))
19701 + return addr;
19702 + }
19703 + }
19704 + if (len > mm->cached_hole_size) {
19705 + start_addr = addr = mm->free_area_cache;
19706 + } else {
19707 + start_addr = addr = mm->mmap_base;
19708 + mm->cached_hole_size = 0;
19709 + }
19710 +
19711 +#ifdef CONFIG_PAX_PAGEEXEC
19712 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19713 + start_addr = 0x00110000UL;
19714 +
19715 +#ifdef CONFIG_PAX_RANDMMAP
19716 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19717 + start_addr += mm->delta_mmap & 0x03FFF000UL;
19718 +#endif
19719 +
19720 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19721 + start_addr = addr = mm->mmap_base;
19722 + else
19723 + addr = start_addr;
19724 + }
19725 +#endif
19726 +
19727 +full_search:
19728 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19729 + /* At this point: (!vma || addr < vma->vm_end). */
19730 + if (pax_task_size - len < addr) {
19731 + /*
19732 + * Start a new search - just in case we missed
19733 + * some holes.
19734 + */
19735 + if (start_addr != mm->mmap_base) {
19736 + start_addr = addr = mm->mmap_base;
19737 + mm->cached_hole_size = 0;
19738 + goto full_search;
19739 + }
19740 + return -ENOMEM;
19741 + }
19742 + if (check_heap_stack_gap(vma, addr, len))
19743 + break;
19744 + if (addr + mm->cached_hole_size < vma->vm_start)
19745 + mm->cached_hole_size = vma->vm_start - addr;
19746 + addr = vma->vm_end;
19747 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
19748 + start_addr = addr = mm->mmap_base;
19749 + mm->cached_hole_size = 0;
19750 + goto full_search;
19751 + }
19752 + }
19753 +
19754 + /*
19755 + * Remember the place where we stopped the search:
19756 + */
19757 + mm->free_area_cache = addr + len;
19758 + return addr;
19759 +}
19760 +
19761 +unsigned long
19762 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19763 + const unsigned long len, const unsigned long pgoff,
19764 + const unsigned long flags)
19765 +{
19766 + struct vm_area_struct *vma;
19767 + struct mm_struct *mm = current->mm;
19768 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19769 +
19770 +#ifdef CONFIG_PAX_SEGMEXEC
19771 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19772 + pax_task_size = SEGMEXEC_TASK_SIZE;
19773 +#endif
19774 +
19775 + pax_task_size -= PAGE_SIZE;
19776 +
19777 + /* requested length too big for entire address space */
19778 + if (len > pax_task_size)
19779 + return -ENOMEM;
19780 +
19781 + if (flags & MAP_FIXED)
19782 + return addr;
19783 +
19784 +#ifdef CONFIG_PAX_PAGEEXEC
19785 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19786 + goto bottomup;
19787 +#endif
19788 +
19789 +#ifdef CONFIG_PAX_RANDMMAP
19790 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19791 +#endif
19792 +
19793 + /* requesting a specific address */
19794 + if (addr) {
19795 + addr = PAGE_ALIGN(addr);
19796 + if (pax_task_size - len >= addr) {
19797 + vma = find_vma(mm, addr);
19798 + if (check_heap_stack_gap(vma, addr, len))
19799 + return addr;
19800 + }
19801 + }
19802 +
19803 + /* check if free_area_cache is useful for us */
19804 + if (len <= mm->cached_hole_size) {
19805 + mm->cached_hole_size = 0;
19806 + mm->free_area_cache = mm->mmap_base;
19807 + }
19808 +
19809 + /* either no address requested or can't fit in requested address hole */
19810 + addr = mm->free_area_cache;
19811 +
19812 + /* make sure it can fit in the remaining address space */
19813 + if (addr > len) {
19814 + vma = find_vma(mm, addr-len);
19815 + if (check_heap_stack_gap(vma, addr - len, len))
19816 + /* remember the address as a hint for next time */
19817 + return (mm->free_area_cache = addr-len);
19818 + }
19819 +
19820 + if (mm->mmap_base < len)
19821 + goto bottomup;
19822 +
19823 + addr = mm->mmap_base-len;
19824 +
19825 + do {
19826 + /*
19827 + * Lookup failure means no vma is above this address,
19828 + * else if new region fits below vma->vm_start,
19829 + * return with success:
19830 + */
19831 + vma = find_vma(mm, addr);
19832 + if (check_heap_stack_gap(vma, addr, len))
19833 + /* remember the address as a hint for next time */
19834 + return (mm->free_area_cache = addr);
19835 +
19836 + /* remember the largest hole we saw so far */
19837 + if (addr + mm->cached_hole_size < vma->vm_start)
19838 + mm->cached_hole_size = vma->vm_start - addr;
19839 +
19840 + /* try just below the current vma->vm_start */
19841 + addr = skip_heap_stack_gap(vma, len);
19842 + } while (!IS_ERR_VALUE(addr));
19843 +
19844 +bottomup:
19845 + /*
19846 + * A failed mmap() very likely causes application failure,
19847 + * so fall back to the bottom-up function here. This scenario
19848 + * can happen with large stack limits and large mmap()
19849 + * allocations.
19850 + */
19851 +
19852 +#ifdef CONFIG_PAX_SEGMEXEC
19853 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19854 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19855 + else
19856 +#endif
19857 +
19858 + mm->mmap_base = TASK_UNMAPPED_BASE;
19859 +
19860 +#ifdef CONFIG_PAX_RANDMMAP
19861 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19862 + mm->mmap_base += mm->delta_mmap;
19863 +#endif
19864 +
19865 + mm->free_area_cache = mm->mmap_base;
19866 + mm->cached_hole_size = ~0UL;
19867 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19868 + /*
19869 + * Restore the topdown base:
19870 + */
19871 + mm->mmap_base = base;
19872 + mm->free_area_cache = base;
19873 + mm->cached_hole_size = ~0UL;
19874 +
19875 + return addr;
19876 +}
19877
19878 struct sel_arg_struct {
19879 unsigned long n;
19880 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19881 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
19882 case SEMTIMEDOP:
19883 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
19884 - (const struct timespec __user *)fifth);
19885 + (__force const struct timespec __user *)fifth);
19886
19887 case SEMGET:
19888 return sys_semget(first, second, third);
19889 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19890 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
19891 if (ret)
19892 return ret;
19893 - return put_user(raddr, (ulong __user *) third);
19894 + return put_user(raddr, (__force ulong __user *) third);
19895 }
19896 case 1: /* iBCS2 emulator entry point */
19897 if (!segment_eq(get_fs(), get_ds()))
19898 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
19899
19900 return error;
19901 }
19902 -
19903 -
19904 -/*
19905 - * Do a system call from kernel instead of calling sys_execve so we
19906 - * end up with proper pt_regs.
19907 - */
19908 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
19909 -{
19910 - long __res;
19911 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
19912 - : "=a" (__res)
19913 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
19914 - return __res;
19915 -}
19916 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19917 index 8aa2057..b604bc1 100644
19918 --- a/arch/x86/kernel/sys_x86_64.c
19919 +++ b/arch/x86/kernel/sys_x86_64.c
19920 @@ -32,8 +32,8 @@ out:
19921 return error;
19922 }
19923
19924 -static void find_start_end(unsigned long flags, unsigned long *begin,
19925 - unsigned long *end)
19926 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
19927 + unsigned long *begin, unsigned long *end)
19928 {
19929 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19930 unsigned long new_begin;
19931 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19932 *begin = new_begin;
19933 }
19934 } else {
19935 - *begin = TASK_UNMAPPED_BASE;
19936 + *begin = mm->mmap_base;
19937 *end = TASK_SIZE;
19938 }
19939 }
19940 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19941 if (flags & MAP_FIXED)
19942 return addr;
19943
19944 - find_start_end(flags, &begin, &end);
19945 + find_start_end(mm, flags, &begin, &end);
19946
19947 if (len > end)
19948 return -ENOMEM;
19949
19950 +#ifdef CONFIG_PAX_RANDMMAP
19951 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19952 +#endif
19953 +
19954 if (addr) {
19955 addr = PAGE_ALIGN(addr);
19956 vma = find_vma(mm, addr);
19957 - if (end - len >= addr &&
19958 - (!vma || addr + len <= vma->vm_start))
19959 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19960 return addr;
19961 }
19962 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19963 @@ -106,7 +109,7 @@ full_search:
19964 }
19965 return -ENOMEM;
19966 }
19967 - if (!vma || addr + len <= vma->vm_start) {
19968 + if (check_heap_stack_gap(vma, addr, len)) {
19969 /*
19970 * Remember the place where we stopped the search:
19971 */
19972 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19973 {
19974 struct vm_area_struct *vma;
19975 struct mm_struct *mm = current->mm;
19976 - unsigned long addr = addr0;
19977 + unsigned long base = mm->mmap_base, addr = addr0;
19978
19979 /* requested length too big for entire address space */
19980 if (len > TASK_SIZE)
19981 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19982 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19983 goto bottomup;
19984
19985 +#ifdef CONFIG_PAX_RANDMMAP
19986 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19987 +#endif
19988 +
19989 /* requesting a specific address */
19990 if (addr) {
19991 addr = PAGE_ALIGN(addr);
19992 - vma = find_vma(mm, addr);
19993 - if (TASK_SIZE - len >= addr &&
19994 - (!vma || addr + len <= vma->vm_start))
19995 - return addr;
19996 + if (TASK_SIZE - len >= addr) {
19997 + vma = find_vma(mm, addr);
19998 + if (check_heap_stack_gap(vma, addr, len))
19999 + return addr;
20000 + }
20001 }
20002
20003 /* check if free_area_cache is useful for us */
20004 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20005 /* make sure it can fit in the remaining address space */
20006 if (addr > len) {
20007 vma = find_vma(mm, addr-len);
20008 - if (!vma || addr <= vma->vm_start)
20009 + if (check_heap_stack_gap(vma, addr - len, len))
20010 /* remember the address as a hint for next time */
20011 return mm->free_area_cache = addr-len;
20012 }
20013 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20014 * return with success:
20015 */
20016 vma = find_vma(mm, addr);
20017 - if (!vma || addr+len <= vma->vm_start)
20018 + if (check_heap_stack_gap(vma, addr, len))
20019 /* remember the address as a hint for next time */
20020 return mm->free_area_cache = addr;
20021
20022 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20023 mm->cached_hole_size = vma->vm_start - addr;
20024
20025 /* try just below the current vma->vm_start */
20026 - addr = vma->vm_start-len;
20027 - } while (len < vma->vm_start);
20028 + addr = skip_heap_stack_gap(vma, len);
20029 + } while (!IS_ERR_VALUE(addr));
20030
20031 bottomup:
20032 /*
20033 @@ -198,13 +206,21 @@ bottomup:
20034 * can happen with large stack limits and large mmap()
20035 * allocations.
20036 */
20037 + mm->mmap_base = TASK_UNMAPPED_BASE;
20038 +
20039 +#ifdef CONFIG_PAX_RANDMMAP
20040 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20041 + mm->mmap_base += mm->delta_mmap;
20042 +#endif
20043 +
20044 + mm->free_area_cache = mm->mmap_base;
20045 mm->cached_hole_size = ~0UL;
20046 - mm->free_area_cache = TASK_UNMAPPED_BASE;
20047 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
20048 /*
20049 * Restore the topdown base:
20050 */
20051 - mm->free_area_cache = mm->mmap_base;
20052 + mm->mmap_base = base;
20053 + mm->free_area_cache = base;
20054 mm->cached_hole_size = ~0UL;
20055
20056 return addr;
20057 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
20058 index 76d70a4..4c94a44 100644
20059 --- a/arch/x86/kernel/syscall_table_32.S
20060 +++ b/arch/x86/kernel/syscall_table_32.S
20061 @@ -1,3 +1,4 @@
20062 +.section .rodata,"a",@progbits
20063 ENTRY(sys_call_table)
20064 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
20065 .long sys_exit
20066 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
20067 index 46b8277..3349d55 100644
20068 --- a/arch/x86/kernel/tboot.c
20069 +++ b/arch/x86/kernel/tboot.c
20070 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
20071
20072 void tboot_shutdown(u32 shutdown_type)
20073 {
20074 - void (*shutdown)(void);
20075 + void (* __noreturn shutdown)(void);
20076
20077 if (!tboot_enabled())
20078 return;
20079 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
20080
20081 switch_to_tboot_pt();
20082
20083 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
20084 + shutdown = (void *)tboot->shutdown_entry;
20085 shutdown();
20086
20087 /* should not reach here */
20088 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
20089 tboot_shutdown(acpi_shutdown_map[sleep_state]);
20090 }
20091
20092 -static atomic_t ap_wfs_count;
20093 +static atomic_unchecked_t ap_wfs_count;
20094
20095 static int tboot_wait_for_aps(int num_aps)
20096 {
20097 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
20098 {
20099 switch (action) {
20100 case CPU_DYING:
20101 - atomic_inc(&ap_wfs_count);
20102 + atomic_inc_unchecked(&ap_wfs_count);
20103 if (num_online_cpus() == 1)
20104 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
20105 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
20106 return NOTIFY_BAD;
20107 break;
20108 }
20109 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
20110
20111 tboot_create_trampoline();
20112
20113 - atomic_set(&ap_wfs_count, 0);
20114 + atomic_set_unchecked(&ap_wfs_count, 0);
20115 register_hotcpu_notifier(&tboot_cpu_notifier);
20116 return 0;
20117 }
20118 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
20119 index be25734..87fe232 100644
20120 --- a/arch/x86/kernel/time.c
20121 +++ b/arch/x86/kernel/time.c
20122 @@ -26,17 +26,13 @@
20123 int timer_ack;
20124 #endif
20125
20126 -#ifdef CONFIG_X86_64
20127 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
20128 -#endif
20129 -
20130 unsigned long profile_pc(struct pt_regs *regs)
20131 {
20132 unsigned long pc = instruction_pointer(regs);
20133
20134 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
20135 + if (!user_mode(regs) && in_lock_functions(pc)) {
20136 #ifdef CONFIG_FRAME_POINTER
20137 - return *(unsigned long *)(regs->bp + sizeof(long));
20138 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
20139 #else
20140 unsigned long *sp =
20141 (unsigned long *)kernel_stack_pointer(regs);
20142 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
20143 * or above a saved flags. Eflags has bits 22-31 zero,
20144 * kernel addresses don't.
20145 */
20146 +
20147 +#ifdef CONFIG_PAX_KERNEXEC
20148 + return ktla_ktva(sp[0]);
20149 +#else
20150 if (sp[0] >> 22)
20151 return sp[0];
20152 if (sp[1] >> 22)
20153 return sp[1];
20154 #endif
20155 +
20156 +#endif
20157 }
20158 return pc;
20159 }
20160 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
20161 index 6bb7b85..dd853e1 100644
20162 --- a/arch/x86/kernel/tls.c
20163 +++ b/arch/x86/kernel/tls.c
20164 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
20165 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
20166 return -EINVAL;
20167
20168 +#ifdef CONFIG_PAX_SEGMEXEC
20169 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
20170 + return -EINVAL;
20171 +#endif
20172 +
20173 set_tls_desc(p, idx, &info, 1);
20174
20175 return 0;
20176 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
20177 index 8508237..229b664 100644
20178 --- a/arch/x86/kernel/trampoline_32.S
20179 +++ b/arch/x86/kernel/trampoline_32.S
20180 @@ -32,6 +32,12 @@
20181 #include <asm/segment.h>
20182 #include <asm/page_types.h>
20183
20184 +#ifdef CONFIG_PAX_KERNEXEC
20185 +#define ta(X) (X)
20186 +#else
20187 +#define ta(X) ((X) - __PAGE_OFFSET)
20188 +#endif
20189 +
20190 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
20191 __CPUINITRODATA
20192 .code16
20193 @@ -60,7 +66,7 @@ r_base = .
20194 inc %ax # protected mode (PE) bit
20195 lmsw %ax # into protected mode
20196 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
20197 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
20198 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
20199
20200 # These need to be in the same 64K segment as the above;
20201 # hence we don't use the boot_gdt_descr defined in head.S
20202 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
20203 index 3af2dff..ba8aa49 100644
20204 --- a/arch/x86/kernel/trampoline_64.S
20205 +++ b/arch/x86/kernel/trampoline_64.S
20206 @@ -91,7 +91,7 @@ startup_32:
20207 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
20208 movl %eax, %ds
20209
20210 - movl $X86_CR4_PAE, %eax
20211 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
20212 movl %eax, %cr4 # Enable PAE mode
20213
20214 # Setup trampoline 4 level pagetables
20215 @@ -127,7 +127,7 @@ startup_64:
20216 no_longmode:
20217 hlt
20218 jmp no_longmode
20219 -#include "verify_cpu_64.S"
20220 +#include "verify_cpu.S"
20221
20222 # Careful these need to be in the same 64K segment as the above;
20223 tidt:
20224 @@ -138,7 +138,7 @@ tidt:
20225 # so the kernel can live anywhere
20226 .balign 4
20227 tgdt:
20228 - .short tgdt_end - tgdt # gdt limit
20229 + .short tgdt_end - tgdt - 1 # gdt limit
20230 .long tgdt - r_base
20231 .short 0
20232 .quad 0x00cf9b000000ffff # __KERNEL32_CS
20233 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
20234 index 7e37dce..ec3f8e5 100644
20235 --- a/arch/x86/kernel/traps.c
20236 +++ b/arch/x86/kernel/traps.c
20237 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
20238
20239 /* Do we ignore FPU interrupts ? */
20240 char ignore_fpu_irq;
20241 -
20242 -/*
20243 - * The IDT has to be page-aligned to simplify the Pentium
20244 - * F0 0F bug workaround.
20245 - */
20246 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
20247 #endif
20248
20249 DECLARE_BITMAP(used_vectors, NR_VECTORS);
20250 @@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
20251 static inline void
20252 die_if_kernel(const char *str, struct pt_regs *regs, long err)
20253 {
20254 - if (!user_mode_vm(regs))
20255 + if (!user_mode(regs))
20256 die(str, regs, err);
20257 }
20258 #endif
20259
20260 static void __kprobes
20261 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20262 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
20263 long error_code, siginfo_t *info)
20264 {
20265 struct task_struct *tsk = current;
20266
20267 #ifdef CONFIG_X86_32
20268 - if (regs->flags & X86_VM_MASK) {
20269 + if (v8086_mode(regs)) {
20270 /*
20271 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
20272 * On nmi (interrupt 2), do_trap should not be called.
20273 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20274 }
20275 #endif
20276
20277 - if (!user_mode(regs))
20278 + if (!user_mode_novm(regs))
20279 goto kernel_trap;
20280
20281 #ifdef CONFIG_X86_32
20282 @@ -158,7 +152,7 @@ trap_signal:
20283 printk_ratelimit()) {
20284 printk(KERN_INFO
20285 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
20286 - tsk->comm, tsk->pid, str,
20287 + tsk->comm, task_pid_nr(tsk), str,
20288 regs->ip, regs->sp, error_code);
20289 print_vma_addr(" in ", regs->ip);
20290 printk("\n");
20291 @@ -175,8 +169,20 @@ kernel_trap:
20292 if (!fixup_exception(regs)) {
20293 tsk->thread.error_code = error_code;
20294 tsk->thread.trap_no = trapnr;
20295 +
20296 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20297 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
20298 + str = "PAX: suspicious stack segment fault";
20299 +#endif
20300 +
20301 die(str, regs, error_code);
20302 }
20303 +
20304 +#ifdef CONFIG_PAX_REFCOUNT
20305 + if (trapnr == 4)
20306 + pax_report_refcount_overflow(regs);
20307 +#endif
20308 +
20309 return;
20310
20311 #ifdef CONFIG_X86_32
20312 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
20313 conditional_sti(regs);
20314
20315 #ifdef CONFIG_X86_32
20316 - if (regs->flags & X86_VM_MASK)
20317 + if (v8086_mode(regs))
20318 goto gp_in_vm86;
20319 #endif
20320
20321 tsk = current;
20322 - if (!user_mode(regs))
20323 + if (!user_mode_novm(regs))
20324 goto gp_in_kernel;
20325
20326 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20327 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
20328 + struct mm_struct *mm = tsk->mm;
20329 + unsigned long limit;
20330 +
20331 + down_write(&mm->mmap_sem);
20332 + limit = mm->context.user_cs_limit;
20333 + if (limit < TASK_SIZE) {
20334 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
20335 + up_write(&mm->mmap_sem);
20336 + return;
20337 + }
20338 + up_write(&mm->mmap_sem);
20339 + }
20340 +#endif
20341 +
20342 tsk->thread.error_code = error_code;
20343 tsk->thread.trap_no = 13;
20344
20345 @@ -305,6 +327,13 @@ gp_in_kernel:
20346 if (notify_die(DIE_GPF, "general protection fault", regs,
20347 error_code, 13, SIGSEGV) == NOTIFY_STOP)
20348 return;
20349 +
20350 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20351 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
20352 + die("PAX: suspicious general protection fault", regs, error_code);
20353 + else
20354 +#endif
20355 +
20356 die("general protection fault", regs, error_code);
20357 }
20358
20359 @@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
20360 dotraplinkage notrace __kprobes void
20361 do_nmi(struct pt_regs *regs, long error_code)
20362 {
20363 +
20364 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20365 + if (!user_mode(regs)) {
20366 + unsigned long cs = regs->cs & 0xFFFF;
20367 + unsigned long ip = ktva_ktla(regs->ip);
20368 +
20369 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
20370 + regs->ip = ip;
20371 + }
20372 +#endif
20373 +
20374 nmi_enter();
20375
20376 inc_irq_stat(__nmi_count);
20377 @@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20378 }
20379
20380 #ifdef CONFIG_X86_32
20381 - if (regs->flags & X86_VM_MASK)
20382 + if (v8086_mode(regs))
20383 goto debug_vm86;
20384 #endif
20385
20386 @@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20387 * kernel space (but re-enable TF when returning to user mode).
20388 */
20389 if (condition & DR_STEP) {
20390 - if (!user_mode(regs))
20391 + if (!user_mode_novm(regs))
20392 goto clear_TF_reenable;
20393 }
20394
20395 @@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
20396 * Handle strange cache flush from user space exception
20397 * in all other cases. This is undocumented behaviour.
20398 */
20399 - if (regs->flags & X86_VM_MASK) {
20400 + if (v8086_mode(regs)) {
20401 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
20402 return;
20403 }
20404 @@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
20405 void __math_state_restore(void)
20406 {
20407 struct thread_info *thread = current_thread_info();
20408 - struct task_struct *tsk = thread->task;
20409 + struct task_struct *tsk = current;
20410
20411 /*
20412 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
20413 @@ -825,8 +865,7 @@ void __math_state_restore(void)
20414 */
20415 asmlinkage void math_state_restore(void)
20416 {
20417 - struct thread_info *thread = current_thread_info();
20418 - struct task_struct *tsk = thread->task;
20419 + struct task_struct *tsk = current;
20420
20421 if (!tsk_used_math(tsk)) {
20422 local_irq_enable();
20423 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20424 new file mode 100644
20425 index 0000000..50c5edd
20426 --- /dev/null
20427 +++ b/arch/x86/kernel/verify_cpu.S
20428 @@ -0,0 +1,140 @@
20429 +/*
20430 + *
20431 + * verify_cpu.S - Code for cpu long mode and SSE verification. This
20432 + * code has been borrowed from boot/setup.S and was introduced by
20433 + * Andi Kleen.
20434 + *
20435 + * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20436 + * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20437 + * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20438 + * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
20439 + *
20440 + * This source code is licensed under the GNU General Public License,
20441 + * Version 2. See the file COPYING for more details.
20442 + *
20443 + * This is a common code for verification whether CPU supports
20444 + * long mode and SSE or not. It is not called directly instead this
20445 + * file is included at various places and compiled in that context.
20446 + * This file is expected to run in 32bit code. Currently:
20447 + *
20448 + * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20449 + * arch/x86/kernel/trampoline_64.S: secondary processor verification
20450 + * arch/x86/kernel/head_32.S: processor startup
20451 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20452 + *
20453 + * verify_cpu, returns the status of longmode and SSE in register %eax.
20454 + * 0: Success 1: Failure
20455 + *
20456 + * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
20457 + *
20458 + * The caller needs to check for the error code and take the action
20459 + * appropriately. Either display a message or halt.
20460 + */
20461 +
20462 +#include <asm/cpufeature.h>
20463 +#include <asm/msr-index.h>
20464 +
20465 +verify_cpu:
20466 + pushfl # Save caller passed flags
20467 + pushl $0 # Kill any dangerous flags
20468 + popfl
20469 +
20470 + pushfl # standard way to check for cpuid
20471 + popl %eax
20472 + movl %eax,%ebx
20473 + xorl $0x200000,%eax
20474 + pushl %eax
20475 + popfl
20476 + pushfl
20477 + popl %eax
20478 + cmpl %eax,%ebx
20479 + jz verify_cpu_no_longmode # cpu has no cpuid
20480 +
20481 + movl $0x0,%eax # See if cpuid 1 is implemented
20482 + cpuid
20483 + cmpl $0x1,%eax
20484 + jb verify_cpu_no_longmode # no cpuid 1
20485 +
20486 + xor %di,%di
20487 + cmpl $0x68747541,%ebx # AuthenticAMD
20488 + jnz verify_cpu_noamd
20489 + cmpl $0x69746e65,%edx
20490 + jnz verify_cpu_noamd
20491 + cmpl $0x444d4163,%ecx
20492 + jnz verify_cpu_noamd
20493 + mov $1,%di # cpu is from AMD
20494 + jmp verify_cpu_check
20495 +
20496 +verify_cpu_noamd:
20497 + cmpl $0x756e6547,%ebx # GenuineIntel?
20498 + jnz verify_cpu_check
20499 + cmpl $0x49656e69,%edx
20500 + jnz verify_cpu_check
20501 + cmpl $0x6c65746e,%ecx
20502 + jnz verify_cpu_check
20503 +
20504 + # only call IA32_MISC_ENABLE when:
20505 + # family > 6 || (family == 6 && model >= 0xd)
20506 + movl $0x1, %eax # check CPU family and model
20507 + cpuid
20508 + movl %eax, %ecx
20509 +
20510 + andl $0x0ff00f00, %eax # mask family and extended family
20511 + shrl $8, %eax
20512 + cmpl $6, %eax
20513 + ja verify_cpu_clear_xd # family > 6, ok
20514 + jb verify_cpu_check # family < 6, skip
20515 +
20516 + andl $0x000f00f0, %ecx # mask model and extended model
20517 + shrl $4, %ecx
20518 + cmpl $0xd, %ecx
20519 + jb verify_cpu_check # family == 6, model < 0xd, skip
20520 +
20521 +verify_cpu_clear_xd:
20522 + movl $MSR_IA32_MISC_ENABLE, %ecx
20523 + rdmsr
20524 + btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
20525 + jnc verify_cpu_check # only write MSR if bit was changed
20526 + wrmsr
20527 +
20528 +verify_cpu_check:
20529 + movl $0x1,%eax # Does the cpu have what it takes
20530 + cpuid
20531 + andl $REQUIRED_MASK0,%edx
20532 + xorl $REQUIRED_MASK0,%edx
20533 + jnz verify_cpu_no_longmode
20534 +
20535 + movl $0x80000000,%eax # See if extended cpuid is implemented
20536 + cpuid
20537 + cmpl $0x80000001,%eax
20538 + jb verify_cpu_no_longmode # no extended cpuid
20539 +
20540 + movl $0x80000001,%eax # Does the cpu have what it takes
20541 + cpuid
20542 + andl $REQUIRED_MASK1,%edx
20543 + xorl $REQUIRED_MASK1,%edx
20544 + jnz verify_cpu_no_longmode
20545 +
20546 +verify_cpu_sse_test:
20547 + movl $1,%eax
20548 + cpuid
20549 + andl $SSE_MASK,%edx
20550 + cmpl $SSE_MASK,%edx
20551 + je verify_cpu_sse_ok
20552 + test %di,%di
20553 + jz verify_cpu_no_longmode # only try to force SSE on AMD
20554 + movl $MSR_K7_HWCR,%ecx
20555 + rdmsr
20556 + btr $15,%eax # enable SSE
20557 + wrmsr
20558 + xor %di,%di # don't loop
20559 + jmp verify_cpu_sse_test # try again
20560 +
20561 +verify_cpu_no_longmode:
20562 + popfl # Restore caller passed flags
20563 + movl $1,%eax
20564 + ret
20565 +verify_cpu_sse_ok:
20566 + popfl # Restore caller passed flags
20567 + xorl %eax, %eax
20568 + ret
20569 diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
20570 deleted file mode 100644
20571 index 45b6f8a..0000000
20572 --- a/arch/x86/kernel/verify_cpu_64.S
20573 +++ /dev/null
20574 @@ -1,105 +0,0 @@
20575 -/*
20576 - *
20577 - * verify_cpu.S - Code for cpu long mode and SSE verification. This
20578 - * code has been borrowed from boot/setup.S and was introduced by
20579 - * Andi Kleen.
20580 - *
20581 - * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20582 - * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20583 - * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20584 - *
20585 - * This source code is licensed under the GNU General Public License,
20586 - * Version 2. See the file COPYING for more details.
20587 - *
20588 - * This is a common code for verification whether CPU supports
20589 - * long mode and SSE or not. It is not called directly instead this
20590 - * file is included at various places and compiled in that context.
20591 - * Following are the current usage.
20592 - *
20593 - * This file is included by both 16bit and 32bit code.
20594 - *
20595 - * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
20596 - * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
20597 - * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
20598 - * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
20599 - *
20600 - * verify_cpu, returns the status of cpu check in register %eax.
20601 - * 0: Success 1: Failure
20602 - *
20603 - * The caller needs to check for the error code and take the action
20604 - * appropriately. Either display a message or halt.
20605 - */
20606 -
20607 -#include <asm/cpufeature.h>
20608 -
20609 -verify_cpu:
20610 - pushfl # Save caller passed flags
20611 - pushl $0 # Kill any dangerous flags
20612 - popfl
20613 -
20614 - pushfl # standard way to check for cpuid
20615 - popl %eax
20616 - movl %eax,%ebx
20617 - xorl $0x200000,%eax
20618 - pushl %eax
20619 - popfl
20620 - pushfl
20621 - popl %eax
20622 - cmpl %eax,%ebx
20623 - jz verify_cpu_no_longmode # cpu has no cpuid
20624 -
20625 - movl $0x0,%eax # See if cpuid 1 is implemented
20626 - cpuid
20627 - cmpl $0x1,%eax
20628 - jb verify_cpu_no_longmode # no cpuid 1
20629 -
20630 - xor %di,%di
20631 - cmpl $0x68747541,%ebx # AuthenticAMD
20632 - jnz verify_cpu_noamd
20633 - cmpl $0x69746e65,%edx
20634 - jnz verify_cpu_noamd
20635 - cmpl $0x444d4163,%ecx
20636 - jnz verify_cpu_noamd
20637 - mov $1,%di # cpu is from AMD
20638 -
20639 -verify_cpu_noamd:
20640 - movl $0x1,%eax # Does the cpu have what it takes
20641 - cpuid
20642 - andl $REQUIRED_MASK0,%edx
20643 - xorl $REQUIRED_MASK0,%edx
20644 - jnz verify_cpu_no_longmode
20645 -
20646 - movl $0x80000000,%eax # See if extended cpuid is implemented
20647 - cpuid
20648 - cmpl $0x80000001,%eax
20649 - jb verify_cpu_no_longmode # no extended cpuid
20650 -
20651 - movl $0x80000001,%eax # Does the cpu have what it takes
20652 - cpuid
20653 - andl $REQUIRED_MASK1,%edx
20654 - xorl $REQUIRED_MASK1,%edx
20655 - jnz verify_cpu_no_longmode
20656 -
20657 -verify_cpu_sse_test:
20658 - movl $1,%eax
20659 - cpuid
20660 - andl $SSE_MASK,%edx
20661 - cmpl $SSE_MASK,%edx
20662 - je verify_cpu_sse_ok
20663 - test %di,%di
20664 - jz verify_cpu_no_longmode # only try to force SSE on AMD
20665 - movl $0xc0010015,%ecx # HWCR
20666 - rdmsr
20667 - btr $15,%eax # enable SSE
20668 - wrmsr
20669 - xor %di,%di # don't loop
20670 - jmp verify_cpu_sse_test # try again
20671 -
20672 -verify_cpu_no_longmode:
20673 - popfl # Restore caller passed flags
20674 - movl $1,%eax
20675 - ret
20676 -verify_cpu_sse_ok:
20677 - popfl # Restore caller passed flags
20678 - xorl %eax, %eax
20679 - ret
20680 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
20681 index 9c4e625..c992817 100644
20682 --- a/arch/x86/kernel/vm86_32.c
20683 +++ b/arch/x86/kernel/vm86_32.c
20684 @@ -41,6 +41,7 @@
20685 #include <linux/ptrace.h>
20686 #include <linux/audit.h>
20687 #include <linux/stddef.h>
20688 +#include <linux/grsecurity.h>
20689
20690 #include <asm/uaccess.h>
20691 #include <asm/io.h>
20692 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20693 do_exit(SIGSEGV);
20694 }
20695
20696 - tss = &per_cpu(init_tss, get_cpu());
20697 + tss = init_tss + get_cpu();
20698 current->thread.sp0 = current->thread.saved_sp0;
20699 current->thread.sysenter_cs = __KERNEL_CS;
20700 load_sp0(tss, &current->thread);
20701 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
20702 struct task_struct *tsk;
20703 int tmp, ret = -EPERM;
20704
20705 +#ifdef CONFIG_GRKERNSEC_VM86
20706 + if (!capable(CAP_SYS_RAWIO)) {
20707 + gr_handle_vm86();
20708 + goto out;
20709 + }
20710 +#endif
20711 +
20712 tsk = current;
20713 if (tsk->thread.saved_sp0)
20714 goto out;
20715 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
20716 int tmp, ret;
20717 struct vm86plus_struct __user *v86;
20718
20719 +#ifdef CONFIG_GRKERNSEC_VM86
20720 + if (!capable(CAP_SYS_RAWIO)) {
20721 + gr_handle_vm86();
20722 + ret = -EPERM;
20723 + goto out;
20724 + }
20725 +#endif
20726 +
20727 tsk = current;
20728 switch (regs->bx) {
20729 case VM86_REQUEST_IRQ:
20730 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20731 tsk->thread.saved_fs = info->regs32->fs;
20732 tsk->thread.saved_gs = get_user_gs(info->regs32);
20733
20734 - tss = &per_cpu(init_tss, get_cpu());
20735 + tss = init_tss + get_cpu();
20736 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20737 if (cpu_has_sep)
20738 tsk->thread.sysenter_cs = 0;
20739 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20740 goto cannot_handle;
20741 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20742 goto cannot_handle;
20743 - intr_ptr = (unsigned long __user *) (i << 2);
20744 + intr_ptr = (__force unsigned long __user *) (i << 2);
20745 if (get_user(segoffs, intr_ptr))
20746 goto cannot_handle;
20747 if ((segoffs >> 16) == BIOSSEG)
20748 diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
20749 index d430e4c..831f817 100644
20750 --- a/arch/x86/kernel/vmi_32.c
20751 +++ b/arch/x86/kernel/vmi_32.c
20752 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
20753 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
20754
20755 #define call_vrom_func(rom,func) \
20756 - (((VROMFUNC *)(rom->func))())
20757 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
20758
20759 #define call_vrom_long_func(rom,func,arg) \
20760 - (((VROMLONGFUNC *)(rom->func)) (arg))
20761 +({\
20762 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
20763 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
20764 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
20765 + __reloc;\
20766 +})
20767
20768 -static struct vrom_header *vmi_rom;
20769 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
20770 static int disable_pge;
20771 static int disable_pse;
20772 static int disable_sep;
20773 @@ -76,10 +81,10 @@ static struct {
20774 void (*set_initial_ap_state)(int, int);
20775 void (*halt)(void);
20776 void (*set_lazy_mode)(int mode);
20777 -} vmi_ops;
20778 +} __no_const vmi_ops __read_only;
20779
20780 /* Cached VMI operations */
20781 -struct vmi_timer_ops vmi_timer_ops;
20782 +struct vmi_timer_ops vmi_timer_ops __read_only;
20783
20784 /*
20785 * VMI patching routines.
20786 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
20787 static inline void patch_offset(void *insnbuf,
20788 unsigned long ip, unsigned long dest)
20789 {
20790 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
20791 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
20792 }
20793
20794 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20795 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20796 {
20797 u64 reloc;
20798 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
20799 +
20800 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
20801 switch(rel->type) {
20802 case VMI_RELOCATION_CALL_REL:
20803 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
20804
20805 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
20806 {
20807 - const pte_t pte = { .pte = 0 };
20808 + const pte_t pte = __pte(0ULL);
20809 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
20810 }
20811
20812 static void vmi_pmd_clear(pmd_t *pmd)
20813 {
20814 - const pte_t pte = { .pte = 0 };
20815 + const pte_t pte = __pte(0ULL);
20816 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
20817 }
20818 #endif
20819 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
20820 ap.ss = __KERNEL_DS;
20821 ap.esp = (unsigned long) start_esp;
20822
20823 - ap.ds = __USER_DS;
20824 - ap.es = __USER_DS;
20825 + ap.ds = __KERNEL_DS;
20826 + ap.es = __KERNEL_DS;
20827 ap.fs = __KERNEL_PERCPU;
20828 - ap.gs = __KERNEL_STACK_CANARY;
20829 + savesegment(gs, ap.gs);
20830
20831 ap.eflags = 0;
20832
20833 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
20834 paravirt_leave_lazy_mmu();
20835 }
20836
20837 +#ifdef CONFIG_PAX_KERNEXEC
20838 +static unsigned long vmi_pax_open_kernel(void)
20839 +{
20840 + return 0;
20841 +}
20842 +
20843 +static unsigned long vmi_pax_close_kernel(void)
20844 +{
20845 + return 0;
20846 +}
20847 +#endif
20848 +
20849 static inline int __init check_vmi_rom(struct vrom_header *rom)
20850 {
20851 struct pci_header *pci;
20852 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
20853 return 0;
20854 if (rom->vrom_signature != VMI_SIGNATURE)
20855 return 0;
20856 + if (rom->rom_length * 512 > sizeof(*rom)) {
20857 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
20858 + return 0;
20859 + }
20860 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
20861 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
20862 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
20863 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
20864 struct vrom_header *romstart;
20865 romstart = (struct vrom_header *)isa_bus_to_virt(base);
20866 if (check_vmi_rom(romstart)) {
20867 - vmi_rom = romstart;
20868 + vmi_rom = *romstart;
20869 return 1;
20870 }
20871 }
20872 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
20873
20874 para_fill(pv_irq_ops.safe_halt, Halt);
20875
20876 +#ifdef CONFIG_PAX_KERNEXEC
20877 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
20878 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
20879 +#endif
20880 +
20881 /*
20882 * Alternative instruction rewriting doesn't happen soon enough
20883 * to convert VMI_IRET to a call instead of a jump; so we have
20884 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
20885
20886 void __init vmi_init(void)
20887 {
20888 - if (!vmi_rom)
20889 + if (!vmi_rom.rom_signature)
20890 probe_vmi_rom();
20891 else
20892 - check_vmi_rom(vmi_rom);
20893 + check_vmi_rom(&vmi_rom);
20894
20895 /* In case probing for or validating the ROM failed, basil */
20896 - if (!vmi_rom)
20897 + if (!vmi_rom.rom_signature)
20898 return;
20899
20900 - reserve_top_address(-vmi_rom->virtual_top);
20901 + reserve_top_address(-vmi_rom.virtual_top);
20902
20903 #ifdef CONFIG_X86_IO_APIC
20904 /* This is virtual hardware; timer routing is wired correctly */
20905 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
20906 {
20907 unsigned long flags;
20908
20909 - if (!vmi_rom)
20910 + if (!vmi_rom.rom_signature)
20911 return;
20912
20913 local_irq_save(flags);
20914 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20915 index 3c68fe2..12c8280 100644
20916 --- a/arch/x86/kernel/vmlinux.lds.S
20917 +++ b/arch/x86/kernel/vmlinux.lds.S
20918 @@ -26,6 +26,13 @@
20919 #include <asm/page_types.h>
20920 #include <asm/cache.h>
20921 #include <asm/boot.h>
20922 +#include <asm/segment.h>
20923 +
20924 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20925 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20926 +#else
20927 +#define __KERNEL_TEXT_OFFSET 0
20928 +#endif
20929
20930 #undef i386 /* in case the preprocessor is a 32bit one */
20931
20932 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
20933 #ifdef CONFIG_X86_32
20934 OUTPUT_ARCH(i386)
20935 ENTRY(phys_startup_32)
20936 -jiffies = jiffies_64;
20937 #else
20938 OUTPUT_ARCH(i386:x86-64)
20939 ENTRY(phys_startup_64)
20940 -jiffies_64 = jiffies;
20941 #endif
20942
20943 PHDRS {
20944 text PT_LOAD FLAGS(5); /* R_E */
20945 - data PT_LOAD FLAGS(7); /* RWE */
20946 +#ifdef CONFIG_X86_32
20947 + module PT_LOAD FLAGS(5); /* R_E */
20948 +#endif
20949 +#ifdef CONFIG_XEN
20950 + rodata PT_LOAD FLAGS(5); /* R_E */
20951 +#else
20952 + rodata PT_LOAD FLAGS(4); /* R__ */
20953 +#endif
20954 + data PT_LOAD FLAGS(6); /* RW_ */
20955 #ifdef CONFIG_X86_64
20956 user PT_LOAD FLAGS(5); /* R_E */
20957 +#endif
20958 + init.begin PT_LOAD FLAGS(6); /* RW_ */
20959 #ifdef CONFIG_SMP
20960 percpu PT_LOAD FLAGS(6); /* RW_ */
20961 #endif
20962 + text.init PT_LOAD FLAGS(5); /* R_E */
20963 + text.exit PT_LOAD FLAGS(5); /* R_E */
20964 init PT_LOAD FLAGS(7); /* RWE */
20965 -#endif
20966 note PT_NOTE FLAGS(0); /* ___ */
20967 }
20968
20969 SECTIONS
20970 {
20971 #ifdef CONFIG_X86_32
20972 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20973 - phys_startup_32 = startup_32 - LOAD_OFFSET;
20974 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20975 #else
20976 - . = __START_KERNEL;
20977 - phys_startup_64 = startup_64 - LOAD_OFFSET;
20978 + . = __START_KERNEL;
20979 #endif
20980
20981 /* Text and read-only data */
20982 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
20983 - _text = .;
20984 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20985 /* bootstrapping code */
20986 +#ifdef CONFIG_X86_32
20987 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20988 +#else
20989 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20990 +#endif
20991 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20992 + _text = .;
20993 HEAD_TEXT
20994 #ifdef CONFIG_X86_32
20995 . = ALIGN(PAGE_SIZE);
20996 @@ -82,28 +102,71 @@ SECTIONS
20997 IRQENTRY_TEXT
20998 *(.fixup)
20999 *(.gnu.warning)
21000 - /* End of text section */
21001 - _etext = .;
21002 } :text = 0x9090
21003
21004 - NOTES :text :note
21005 + . += __KERNEL_TEXT_OFFSET;
21006
21007 - EXCEPTION_TABLE(16) :text = 0x9090
21008 +#ifdef CONFIG_X86_32
21009 + . = ALIGN(PAGE_SIZE);
21010 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
21011 + *(.vmi.rom)
21012 + } :module
21013 +
21014 + . = ALIGN(PAGE_SIZE);
21015 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
21016 +
21017 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
21018 + MODULES_EXEC_VADDR = .;
21019 + BYTE(0)
21020 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
21021 + . = ALIGN(HPAGE_SIZE);
21022 + MODULES_EXEC_END = . - 1;
21023 +#endif
21024 +
21025 + } :module
21026 +#endif
21027 +
21028 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
21029 + /* End of text section */
21030 + _etext = . - __KERNEL_TEXT_OFFSET;
21031 + }
21032 +
21033 +#ifdef CONFIG_X86_32
21034 + . = ALIGN(PAGE_SIZE);
21035 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
21036 + *(.idt)
21037 + . = ALIGN(PAGE_SIZE);
21038 + *(.empty_zero_page)
21039 + *(.swapper_pg_fixmap)
21040 + *(.swapper_pg_pmd)
21041 + *(.swapper_pg_dir)
21042 + *(.trampoline_pg_dir)
21043 + } :rodata
21044 +#endif
21045 +
21046 + . = ALIGN(PAGE_SIZE);
21047 + NOTES :rodata :note
21048 +
21049 + EXCEPTION_TABLE(16) :rodata
21050
21051 RO_DATA(PAGE_SIZE)
21052
21053 /* Data */
21054 .data : AT(ADDR(.data) - LOAD_OFFSET) {
21055 +
21056 +#ifdef CONFIG_PAX_KERNEXEC
21057 + . = ALIGN(HPAGE_SIZE);
21058 +#else
21059 + . = ALIGN(PAGE_SIZE);
21060 +#endif
21061 +
21062 /* Start of data section */
21063 _sdata = .;
21064
21065 /* init_task */
21066 INIT_TASK_DATA(THREAD_SIZE)
21067
21068 -#ifdef CONFIG_X86_32
21069 - /* 32 bit has nosave before _edata */
21070 NOSAVE_DATA
21071 -#endif
21072
21073 PAGE_ALIGNED_DATA(PAGE_SIZE)
21074
21075 @@ -112,6 +175,8 @@ SECTIONS
21076 DATA_DATA
21077 CONSTRUCTORS
21078
21079 + jiffies = jiffies_64;
21080 +
21081 /* rarely changed data like cpu maps */
21082 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
21083
21084 @@ -166,12 +231,6 @@ SECTIONS
21085 }
21086 vgetcpu_mode = VVIRT(.vgetcpu_mode);
21087
21088 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
21089 - .jiffies : AT(VLOAD(.jiffies)) {
21090 - *(.jiffies)
21091 - }
21092 - jiffies = VVIRT(.jiffies);
21093 -
21094 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
21095 *(.vsyscall_3)
21096 }
21097 @@ -187,12 +246,19 @@ SECTIONS
21098 #endif /* CONFIG_X86_64 */
21099
21100 /* Init code and data - will be freed after init */
21101 - . = ALIGN(PAGE_SIZE);
21102 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
21103 + BYTE(0)
21104 +
21105 +#ifdef CONFIG_PAX_KERNEXEC
21106 + . = ALIGN(HPAGE_SIZE);
21107 +#else
21108 + . = ALIGN(PAGE_SIZE);
21109 +#endif
21110 +
21111 __init_begin = .; /* paired with __init_end */
21112 - }
21113 + } :init.begin
21114
21115 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
21116 +#ifdef CONFIG_SMP
21117 /*
21118 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
21119 * output PHDR, so the next output section - .init.text - should
21120 @@ -201,12 +267,27 @@ SECTIONS
21121 PERCPU_VADDR(0, :percpu)
21122 #endif
21123
21124 - INIT_TEXT_SECTION(PAGE_SIZE)
21125 -#ifdef CONFIG_X86_64
21126 - :init
21127 -#endif
21128 + . = ALIGN(PAGE_SIZE);
21129 + init_begin = .;
21130 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
21131 + VMLINUX_SYMBOL(_sinittext) = .;
21132 + INIT_TEXT
21133 + VMLINUX_SYMBOL(_einittext) = .;
21134 + . = ALIGN(PAGE_SIZE);
21135 + } :text.init
21136
21137 - INIT_DATA_SECTION(16)
21138 + /*
21139 + * .exit.text is discard at runtime, not link time, to deal with
21140 + * references from .altinstructions and .eh_frame
21141 + */
21142 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
21143 + EXIT_TEXT
21144 + . = ALIGN(16);
21145 + } :text.exit
21146 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
21147 +
21148 + . = ALIGN(PAGE_SIZE);
21149 + INIT_DATA_SECTION(16) :init
21150
21151 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
21152 __x86_cpu_dev_start = .;
21153 @@ -232,19 +313,11 @@ SECTIONS
21154 *(.altinstr_replacement)
21155 }
21156
21157 - /*
21158 - * .exit.text is discard at runtime, not link time, to deal with
21159 - * references from .altinstructions and .eh_frame
21160 - */
21161 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
21162 - EXIT_TEXT
21163 - }
21164 -
21165 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
21166 EXIT_DATA
21167 }
21168
21169 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
21170 +#ifndef CONFIG_SMP
21171 PERCPU(PAGE_SIZE)
21172 #endif
21173
21174 @@ -267,12 +340,6 @@ SECTIONS
21175 . = ALIGN(PAGE_SIZE);
21176 }
21177
21178 -#ifdef CONFIG_X86_64
21179 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
21180 - NOSAVE_DATA
21181 - }
21182 -#endif
21183 -
21184 /* BSS */
21185 . = ALIGN(PAGE_SIZE);
21186 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
21187 @@ -288,6 +355,7 @@ SECTIONS
21188 __brk_base = .;
21189 . += 64 * 1024; /* 64k alignment slop space */
21190 *(.brk_reservation) /* areas brk users have reserved */
21191 + . = ALIGN(HPAGE_SIZE);
21192 __brk_limit = .;
21193 }
21194
21195 @@ -316,13 +384,12 @@ SECTIONS
21196 * for the boot processor.
21197 */
21198 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
21199 -INIT_PER_CPU(gdt_page);
21200 INIT_PER_CPU(irq_stack_union);
21201
21202 /*
21203 * Build-time check on the image size:
21204 */
21205 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
21206 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
21207 "kernel image bigger than KERNEL_IMAGE_SIZE");
21208
21209 #ifdef CONFIG_SMP
21210 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
21211 index 62f39d7..3bc46a1 100644
21212 --- a/arch/x86/kernel/vsyscall_64.c
21213 +++ b/arch/x86/kernel/vsyscall_64.c
21214 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
21215
21216 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
21217 /* copy vsyscall data */
21218 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
21219 vsyscall_gtod_data.clock.vread = clock->vread;
21220 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
21221 vsyscall_gtod_data.clock.mask = clock->mask;
21222 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
21223 We do this here because otherwise user space would do it on
21224 its own in a likely inferior way (no access to jiffies).
21225 If you don't like it pass NULL. */
21226 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
21227 + if (tcache && tcache->blob[0] == (j = jiffies)) {
21228 p = tcache->blob[1];
21229 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
21230 /* Load per CPU data from RDTSCP */
21231 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
21232 index 3909e3b..5433a97 100644
21233 --- a/arch/x86/kernel/x8664_ksyms_64.c
21234 +++ b/arch/x86/kernel/x8664_ksyms_64.c
21235 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
21236
21237 EXPORT_SYMBOL(copy_user_generic);
21238 EXPORT_SYMBOL(__copy_user_nocache);
21239 -EXPORT_SYMBOL(copy_from_user);
21240 -EXPORT_SYMBOL(copy_to_user);
21241 EXPORT_SYMBOL(__copy_from_user_inatomic);
21242
21243 EXPORT_SYMBOL(copy_page);
21244 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
21245 index c5ee17e..d63218f 100644
21246 --- a/arch/x86/kernel/xsave.c
21247 +++ b/arch/x86/kernel/xsave.c
21248 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
21249 fx_sw_user->xstate_size > fx_sw_user->extended_size)
21250 return -1;
21251
21252 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
21253 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
21254 fx_sw_user->extended_size -
21255 FP_XSTATE_MAGIC2_SIZE));
21256 /*
21257 @@ -196,7 +196,7 @@ fx_only:
21258 * the other extended state.
21259 */
21260 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
21261 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
21262 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
21263 }
21264
21265 /*
21266 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
21267 if (task_thread_info(tsk)->status & TS_XSAVE)
21268 err = restore_user_xstate(buf);
21269 else
21270 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
21271 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
21272 buf);
21273 if (unlikely(err)) {
21274 /*
21275 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
21276 index 1350e43..a94b011 100644
21277 --- a/arch/x86/kvm/emulate.c
21278 +++ b/arch/x86/kvm/emulate.c
21279 @@ -81,8 +81,8 @@
21280 #define Src2CL (1<<29)
21281 #define Src2ImmByte (2<<29)
21282 #define Src2One (3<<29)
21283 -#define Src2Imm16 (4<<29)
21284 -#define Src2Mask (7<<29)
21285 +#define Src2Imm16 (4U<<29)
21286 +#define Src2Mask (7U<<29)
21287
21288 enum {
21289 Group1_80, Group1_81, Group1_82, Group1_83,
21290 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
21291
21292 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
21293 do { \
21294 + unsigned long _tmp; \
21295 __asm__ __volatile__ ( \
21296 _PRE_EFLAGS("0", "4", "2") \
21297 _op _suffix " %"_x"3,%1; " \
21298 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
21299 /* Raw emulation: instruction has two explicit operands. */
21300 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
21301 do { \
21302 - unsigned long _tmp; \
21303 - \
21304 switch ((_dst).bytes) { \
21305 case 2: \
21306 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
21307 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
21308
21309 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
21310 do { \
21311 - unsigned long _tmp; \
21312 switch ((_dst).bytes) { \
21313 case 1: \
21314 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
21315 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21316 index 8dfeaaa..4daa395 100644
21317 --- a/arch/x86/kvm/lapic.c
21318 +++ b/arch/x86/kvm/lapic.c
21319 @@ -52,7 +52,7 @@
21320 #define APIC_BUS_CYCLE_NS 1
21321
21322 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21323 -#define apic_debug(fmt, arg...)
21324 +#define apic_debug(fmt, arg...) do {} while (0)
21325
21326 #define APIC_LVT_NUM 6
21327 /* 14 is the version for Xeon and Pentium 8.4.8*/
21328 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21329 index 3bc2707..dd157e2 100644
21330 --- a/arch/x86/kvm/paging_tmpl.h
21331 +++ b/arch/x86/kvm/paging_tmpl.h
21332 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21333 int level = PT_PAGE_TABLE_LEVEL;
21334 unsigned long mmu_seq;
21335
21336 + pax_track_stack();
21337 +
21338 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
21339 kvm_mmu_audit(vcpu, "pre page fault");
21340
21341 @@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21342 kvm_mmu_free_some_pages(vcpu);
21343 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
21344 level, &write_pt, pfn);
21345 + (void)sptep;
21346 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
21347 sptep, *sptep, write_pt);
21348
21349 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21350 index 7c6e63e..c5d92c1 100644
21351 --- a/arch/x86/kvm/svm.c
21352 +++ b/arch/x86/kvm/svm.c
21353 @@ -2486,7 +2486,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21354 int cpu = raw_smp_processor_id();
21355
21356 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
21357 +
21358 + pax_open_kernel();
21359 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
21360 + pax_close_kernel();
21361 +
21362 load_TR_desc();
21363 }
21364
21365 @@ -2947,7 +2951,7 @@ static bool svm_gb_page_enable(void)
21366 return true;
21367 }
21368
21369 -static struct kvm_x86_ops svm_x86_ops = {
21370 +static const struct kvm_x86_ops svm_x86_ops = {
21371 .cpu_has_kvm_support = has_svm,
21372 .disabled_by_bios = is_disabled,
21373 .hardware_setup = svm_hardware_setup,
21374 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21375 index e6d925f..e7a4af8 100644
21376 --- a/arch/x86/kvm/vmx.c
21377 +++ b/arch/x86/kvm/vmx.c
21378 @@ -570,7 +570,11 @@ static void reload_tss(void)
21379
21380 kvm_get_gdt(&gdt);
21381 descs = (void *)gdt.base;
21382 +
21383 + pax_open_kernel();
21384 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
21385 + pax_close_kernel();
21386 +
21387 load_TR_desc();
21388 }
21389
21390 @@ -1410,8 +1414,11 @@ static __init int hardware_setup(void)
21391 if (!cpu_has_vmx_flexpriority())
21392 flexpriority_enabled = 0;
21393
21394 - if (!cpu_has_vmx_tpr_shadow())
21395 - kvm_x86_ops->update_cr8_intercept = NULL;
21396 + if (!cpu_has_vmx_tpr_shadow()) {
21397 + pax_open_kernel();
21398 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
21399 + pax_close_kernel();
21400 + }
21401
21402 if (enable_ept && !cpu_has_vmx_ept_2m_page())
21403 kvm_disable_largepages();
21404 @@ -2362,7 +2369,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
21405 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
21406
21407 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
21408 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
21409 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
21410 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
21411 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
21412 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
21413 @@ -3718,6 +3725,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21414 "jmp .Lkvm_vmx_return \n\t"
21415 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
21416 ".Lkvm_vmx_return: "
21417 +
21418 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21419 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
21420 + ".Lkvm_vmx_return2: "
21421 +#endif
21422 +
21423 /* Save guest registers, load host registers, keep flags */
21424 "xchg %0, (%%"R"sp) \n\t"
21425 "mov %%"R"ax, %c[rax](%0) \n\t"
21426 @@ -3764,8 +3777,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21427 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
21428 #endif
21429 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
21430 +
21431 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21432 + ,[cs]"i"(__KERNEL_CS)
21433 +#endif
21434 +
21435 : "cc", "memory"
21436 - , R"bx", R"di", R"si"
21437 + , R"ax", R"bx", R"di", R"si"
21438 #ifdef CONFIG_X86_64
21439 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
21440 #endif
21441 @@ -3782,7 +3800,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21442 if (vmx->rmode.irq.pending)
21443 fixup_rmode_irq(vmx);
21444
21445 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
21446 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
21447 +
21448 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21449 + loadsegment(fs, __KERNEL_PERCPU);
21450 +#endif
21451 +
21452 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21453 + __set_fs(current_thread_info()->addr_limit);
21454 +#endif
21455 +
21456 vmx->launched = 1;
21457
21458 vmx_complete_interrupts(vmx);
21459 @@ -3957,7 +3984,7 @@ static bool vmx_gb_page_enable(void)
21460 return false;
21461 }
21462
21463 -static struct kvm_x86_ops vmx_x86_ops = {
21464 +static const struct kvm_x86_ops vmx_x86_ops = {
21465 .cpu_has_kvm_support = cpu_has_kvm_support,
21466 .disabled_by_bios = vmx_disabled_by_bios,
21467 .hardware_setup = hardware_setup,
21468 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
21469 index df1cefb..5e882ad 100644
21470 --- a/arch/x86/kvm/x86.c
21471 +++ b/arch/x86/kvm/x86.c
21472 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
21473 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
21474 struct kvm_cpuid_entry2 __user *entries);
21475
21476 -struct kvm_x86_ops *kvm_x86_ops;
21477 +const struct kvm_x86_ops *kvm_x86_ops;
21478 EXPORT_SYMBOL_GPL(kvm_x86_ops);
21479
21480 int ignore_msrs = 0;
21481 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21482 struct kvm_cpuid2 *cpuid,
21483 struct kvm_cpuid_entry2 __user *entries)
21484 {
21485 - int r;
21486 + int r, i;
21487
21488 r = -E2BIG;
21489 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21490 goto out;
21491 r = -EFAULT;
21492 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21493 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21494 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21495 goto out;
21496 + for (i = 0; i < cpuid->nent; ++i) {
21497 + struct kvm_cpuid_entry2 cpuid_entry;
21498 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21499 + goto out;
21500 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
21501 + }
21502 vcpu->arch.cpuid_nent = cpuid->nent;
21503 kvm_apic_set_version(vcpu);
21504 return 0;
21505 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21506 struct kvm_cpuid2 *cpuid,
21507 struct kvm_cpuid_entry2 __user *entries)
21508 {
21509 - int r;
21510 + int r, i;
21511
21512 vcpu_load(vcpu);
21513 r = -E2BIG;
21514 if (cpuid->nent < vcpu->arch.cpuid_nent)
21515 goto out;
21516 r = -EFAULT;
21517 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21518 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21519 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21520 goto out;
21521 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21522 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21523 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21524 + goto out;
21525 + }
21526 return 0;
21527
21528 out:
21529 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
21530 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
21531 struct kvm_interrupt *irq)
21532 {
21533 - if (irq->irq < 0 || irq->irq >= 256)
21534 + if (irq->irq >= 256)
21535 return -EINVAL;
21536 if (irqchip_in_kernel(vcpu->kvm))
21537 return -ENXIO;
21538 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
21539 .notifier_call = kvmclock_cpufreq_notifier
21540 };
21541
21542 -int kvm_arch_init(void *opaque)
21543 +int kvm_arch_init(const void *opaque)
21544 {
21545 int r, cpu;
21546 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
21547 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
21548
21549 if (kvm_x86_ops) {
21550 printk(KERN_ERR "kvm: already loaded the other module\n");
21551 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
21552 index 7e59dc1..b88c98f 100644
21553 --- a/arch/x86/lguest/boot.c
21554 +++ b/arch/x86/lguest/boot.c
21555 @@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
21556 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
21557 * Launcher to reboot us.
21558 */
21559 -static void lguest_restart(char *reason)
21560 +static __noreturn void lguest_restart(char *reason)
21561 {
21562 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
21563 + BUG();
21564 }
21565
21566 /*G:050
21567 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
21568 index 824fa0b..c619e96 100644
21569 --- a/arch/x86/lib/atomic64_32.c
21570 +++ b/arch/x86/lib/atomic64_32.c
21571 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
21572 }
21573 EXPORT_SYMBOL(atomic64_cmpxchg);
21574
21575 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
21576 +{
21577 + return cmpxchg8b(&ptr->counter, old_val, new_val);
21578 +}
21579 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
21580 +
21581 /**
21582 * atomic64_xchg - xchg atomic64 variable
21583 * @ptr: pointer to type atomic64_t
21584 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
21585 EXPORT_SYMBOL(atomic64_xchg);
21586
21587 /**
21588 + * atomic64_xchg_unchecked - xchg atomic64 variable
21589 + * @ptr: pointer to type atomic64_unchecked_t
21590 + * @new_val: value to assign
21591 + *
21592 + * Atomically xchgs the value of @ptr to @new_val and returns
21593 + * the old value.
21594 + */
21595 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21596 +{
21597 + /*
21598 + * Try first with a (possibly incorrect) assumption about
21599 + * what we have there. We'll do two loops most likely,
21600 + * but we'll get an ownership MESI transaction straight away
21601 + * instead of a read transaction followed by a
21602 + * flush-for-ownership transaction:
21603 + */
21604 + u64 old_val, real_val = 0;
21605 +
21606 + do {
21607 + old_val = real_val;
21608 +
21609 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21610 +
21611 + } while (real_val != old_val);
21612 +
21613 + return old_val;
21614 +}
21615 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
21616 +
21617 +/**
21618 * atomic64_set - set atomic64 variable
21619 * @ptr: pointer to type atomic64_t
21620 * @new_val: value to assign
21621 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
21622 EXPORT_SYMBOL(atomic64_set);
21623
21624 /**
21625 -EXPORT_SYMBOL(atomic64_read);
21626 + * atomic64_unchecked_set - set atomic64 variable
21627 + * @ptr: pointer to type atomic64_unchecked_t
21628 + * @new_val: value to assign
21629 + *
21630 + * Atomically sets the value of @ptr to @new_val.
21631 + */
21632 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21633 +{
21634 + atomic64_xchg_unchecked(ptr, new_val);
21635 +}
21636 +EXPORT_SYMBOL(atomic64_set_unchecked);
21637 +
21638 +/**
21639 * atomic64_add_return - add and return
21640 * @delta: integer value to add
21641 * @ptr: pointer to type atomic64_t
21642 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
21643 }
21644 EXPORT_SYMBOL(atomic64_add_return);
21645
21646 +/**
21647 + * atomic64_add_return_unchecked - add and return
21648 + * @delta: integer value to add
21649 + * @ptr: pointer to type atomic64_unchecked_t
21650 + *
21651 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
21652 + */
21653 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21654 +{
21655 + /*
21656 + * Try first with a (possibly incorrect) assumption about
21657 + * what we have there. We'll do two loops most likely,
21658 + * but we'll get an ownership MESI transaction straight away
21659 + * instead of a read transaction followed by a
21660 + * flush-for-ownership transaction:
21661 + */
21662 + u64 old_val, new_val, real_val = 0;
21663 +
21664 + do {
21665 + old_val = real_val;
21666 + new_val = old_val + delta;
21667 +
21668 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21669 +
21670 + } while (real_val != old_val);
21671 +
21672 + return new_val;
21673 +}
21674 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
21675 +
21676 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
21677 {
21678 return atomic64_add_return(-delta, ptr);
21679 }
21680 EXPORT_SYMBOL(atomic64_sub_return);
21681
21682 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21683 +{
21684 + return atomic64_add_return_unchecked(-delta, ptr);
21685 +}
21686 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
21687 +
21688 u64 atomic64_inc_return(atomic64_t *ptr)
21689 {
21690 return atomic64_add_return(1, ptr);
21691 }
21692 EXPORT_SYMBOL(atomic64_inc_return);
21693
21694 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
21695 +{
21696 + return atomic64_add_return_unchecked(1, ptr);
21697 +}
21698 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
21699 +
21700 u64 atomic64_dec_return(atomic64_t *ptr)
21701 {
21702 return atomic64_sub_return(1, ptr);
21703 }
21704 EXPORT_SYMBOL(atomic64_dec_return);
21705
21706 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
21707 +{
21708 + return atomic64_sub_return_unchecked(1, ptr);
21709 +}
21710 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
21711 +
21712 /**
21713 * atomic64_add - add integer to atomic64 variable
21714 * @delta: integer value to add
21715 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
21716 EXPORT_SYMBOL(atomic64_add);
21717
21718 /**
21719 + * atomic64_add_unchecked - add integer to atomic64 variable
21720 + * @delta: integer value to add
21721 + * @ptr: pointer to type atomic64_unchecked_t
21722 + *
21723 + * Atomically adds @delta to @ptr.
21724 + */
21725 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21726 +{
21727 + atomic64_add_return_unchecked(delta, ptr);
21728 +}
21729 +EXPORT_SYMBOL(atomic64_add_unchecked);
21730 +
21731 +/**
21732 * atomic64_sub - subtract the atomic64 variable
21733 * @delta: integer value to subtract
21734 * @ptr: pointer to type atomic64_t
21735 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
21736 EXPORT_SYMBOL(atomic64_sub);
21737
21738 /**
21739 + * atomic64_sub_unchecked - subtract the atomic64 variable
21740 + * @delta: integer value to subtract
21741 + * @ptr: pointer to type atomic64_unchecked_t
21742 + *
21743 + * Atomically subtracts @delta from @ptr.
21744 + */
21745 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21746 +{
21747 + atomic64_add_unchecked(-delta, ptr);
21748 +}
21749 +EXPORT_SYMBOL(atomic64_sub_unchecked);
21750 +
21751 +/**
21752 * atomic64_sub_and_test - subtract value from variable and test result
21753 * @delta: integer value to subtract
21754 * @ptr: pointer to type atomic64_t
21755 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
21756 EXPORT_SYMBOL(atomic64_inc);
21757
21758 /**
21759 + * atomic64_inc_unchecked - increment atomic64 variable
21760 + * @ptr: pointer to type atomic64_unchecked_t
21761 + *
21762 + * Atomically increments @ptr by 1.
21763 + */
21764 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
21765 +{
21766 + atomic64_add_unchecked(1, ptr);
21767 +}
21768 +EXPORT_SYMBOL(atomic64_inc_unchecked);
21769 +
21770 +/**
21771 * atomic64_dec - decrement atomic64 variable
21772 * @ptr: pointer to type atomic64_t
21773 *
21774 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
21775 EXPORT_SYMBOL(atomic64_dec);
21776
21777 /**
21778 + * atomic64_dec_unchecked - decrement atomic64 variable
21779 + * @ptr: pointer to type atomic64_unchecked_t
21780 + *
21781 + * Atomically decrements @ptr by 1.
21782 + */
21783 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
21784 +{
21785 + atomic64_sub_unchecked(1, ptr);
21786 +}
21787 +EXPORT_SYMBOL(atomic64_dec_unchecked);
21788 +
21789 +/**
21790 * atomic64_dec_and_test - decrement and test
21791 * @ptr: pointer to type atomic64_t
21792 *
21793 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21794 index adbccd0..98f96c8 100644
21795 --- a/arch/x86/lib/checksum_32.S
21796 +++ b/arch/x86/lib/checksum_32.S
21797 @@ -28,7 +28,8 @@
21798 #include <linux/linkage.h>
21799 #include <asm/dwarf2.h>
21800 #include <asm/errno.h>
21801 -
21802 +#include <asm/segment.h>
21803 +
21804 /*
21805 * computes a partial checksum, e.g. for TCP/UDP fragments
21806 */
21807 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21808
21809 #define ARGBASE 16
21810 #define FP 12
21811 -
21812 -ENTRY(csum_partial_copy_generic)
21813 +
21814 +ENTRY(csum_partial_copy_generic_to_user)
21815 CFI_STARTPROC
21816 +
21817 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21818 + pushl %gs
21819 + CFI_ADJUST_CFA_OFFSET 4
21820 + popl %es
21821 + CFI_ADJUST_CFA_OFFSET -4
21822 + jmp csum_partial_copy_generic
21823 +#endif
21824 +
21825 +ENTRY(csum_partial_copy_generic_from_user)
21826 +
21827 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21828 + pushl %gs
21829 + CFI_ADJUST_CFA_OFFSET 4
21830 + popl %ds
21831 + CFI_ADJUST_CFA_OFFSET -4
21832 +#endif
21833 +
21834 +ENTRY(csum_partial_copy_generic)
21835 subl $4,%esp
21836 CFI_ADJUST_CFA_OFFSET 4
21837 pushl %edi
21838 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
21839 jmp 4f
21840 SRC(1: movw (%esi), %bx )
21841 addl $2, %esi
21842 -DST( movw %bx, (%edi) )
21843 +DST( movw %bx, %es:(%edi) )
21844 addl $2, %edi
21845 addw %bx, %ax
21846 adcl $0, %eax
21847 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
21848 SRC(1: movl (%esi), %ebx )
21849 SRC( movl 4(%esi), %edx )
21850 adcl %ebx, %eax
21851 -DST( movl %ebx, (%edi) )
21852 +DST( movl %ebx, %es:(%edi) )
21853 adcl %edx, %eax
21854 -DST( movl %edx, 4(%edi) )
21855 +DST( movl %edx, %es:4(%edi) )
21856
21857 SRC( movl 8(%esi), %ebx )
21858 SRC( movl 12(%esi), %edx )
21859 adcl %ebx, %eax
21860 -DST( movl %ebx, 8(%edi) )
21861 +DST( movl %ebx, %es:8(%edi) )
21862 adcl %edx, %eax
21863 -DST( movl %edx, 12(%edi) )
21864 +DST( movl %edx, %es:12(%edi) )
21865
21866 SRC( movl 16(%esi), %ebx )
21867 SRC( movl 20(%esi), %edx )
21868 adcl %ebx, %eax
21869 -DST( movl %ebx, 16(%edi) )
21870 +DST( movl %ebx, %es:16(%edi) )
21871 adcl %edx, %eax
21872 -DST( movl %edx, 20(%edi) )
21873 +DST( movl %edx, %es:20(%edi) )
21874
21875 SRC( movl 24(%esi), %ebx )
21876 SRC( movl 28(%esi), %edx )
21877 adcl %ebx, %eax
21878 -DST( movl %ebx, 24(%edi) )
21879 +DST( movl %ebx, %es:24(%edi) )
21880 adcl %edx, %eax
21881 -DST( movl %edx, 28(%edi) )
21882 +DST( movl %edx, %es:28(%edi) )
21883
21884 lea 32(%esi), %esi
21885 lea 32(%edi), %edi
21886 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
21887 shrl $2, %edx # This clears CF
21888 SRC(3: movl (%esi), %ebx )
21889 adcl %ebx, %eax
21890 -DST( movl %ebx, (%edi) )
21891 +DST( movl %ebx, %es:(%edi) )
21892 lea 4(%esi), %esi
21893 lea 4(%edi), %edi
21894 dec %edx
21895 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
21896 jb 5f
21897 SRC( movw (%esi), %cx )
21898 leal 2(%esi), %esi
21899 -DST( movw %cx, (%edi) )
21900 +DST( movw %cx, %es:(%edi) )
21901 leal 2(%edi), %edi
21902 je 6f
21903 shll $16,%ecx
21904 SRC(5: movb (%esi), %cl )
21905 -DST( movb %cl, (%edi) )
21906 +DST( movb %cl, %es:(%edi) )
21907 6: addl %ecx, %eax
21908 adcl $0, %eax
21909 7:
21910 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
21911
21912 6001:
21913 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21914 - movl $-EFAULT, (%ebx)
21915 + movl $-EFAULT, %ss:(%ebx)
21916
21917 # zero the complete destination - computing the rest
21918 # is too much work
21919 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
21920
21921 6002:
21922 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21923 - movl $-EFAULT,(%ebx)
21924 + movl $-EFAULT,%ss:(%ebx)
21925 jmp 5000b
21926
21927 .previous
21928
21929 + pushl %ss
21930 + CFI_ADJUST_CFA_OFFSET 4
21931 + popl %ds
21932 + CFI_ADJUST_CFA_OFFSET -4
21933 + pushl %ss
21934 + CFI_ADJUST_CFA_OFFSET 4
21935 + popl %es
21936 + CFI_ADJUST_CFA_OFFSET -4
21937 popl %ebx
21938 CFI_ADJUST_CFA_OFFSET -4
21939 CFI_RESTORE ebx
21940 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
21941 CFI_ADJUST_CFA_OFFSET -4
21942 ret
21943 CFI_ENDPROC
21944 -ENDPROC(csum_partial_copy_generic)
21945 +ENDPROC(csum_partial_copy_generic_to_user)
21946
21947 #else
21948
21949 /* Version for PentiumII/PPro */
21950
21951 #define ROUND1(x) \
21952 + nop; nop; nop; \
21953 SRC(movl x(%esi), %ebx ) ; \
21954 addl %ebx, %eax ; \
21955 - DST(movl %ebx, x(%edi) ) ;
21956 + DST(movl %ebx, %es:x(%edi)) ;
21957
21958 #define ROUND(x) \
21959 + nop; nop; nop; \
21960 SRC(movl x(%esi), %ebx ) ; \
21961 adcl %ebx, %eax ; \
21962 - DST(movl %ebx, x(%edi) ) ;
21963 + DST(movl %ebx, %es:x(%edi)) ;
21964
21965 #define ARGBASE 12
21966 -
21967 -ENTRY(csum_partial_copy_generic)
21968 +
21969 +ENTRY(csum_partial_copy_generic_to_user)
21970 CFI_STARTPROC
21971 +
21972 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21973 + pushl %gs
21974 + CFI_ADJUST_CFA_OFFSET 4
21975 + popl %es
21976 + CFI_ADJUST_CFA_OFFSET -4
21977 + jmp csum_partial_copy_generic
21978 +#endif
21979 +
21980 +ENTRY(csum_partial_copy_generic_from_user)
21981 +
21982 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21983 + pushl %gs
21984 + CFI_ADJUST_CFA_OFFSET 4
21985 + popl %ds
21986 + CFI_ADJUST_CFA_OFFSET -4
21987 +#endif
21988 +
21989 +ENTRY(csum_partial_copy_generic)
21990 pushl %ebx
21991 CFI_ADJUST_CFA_OFFSET 4
21992 CFI_REL_OFFSET ebx, 0
21993 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
21994 subl %ebx, %edi
21995 lea -1(%esi),%edx
21996 andl $-32,%edx
21997 - lea 3f(%ebx,%ebx), %ebx
21998 + lea 3f(%ebx,%ebx,2), %ebx
21999 testl %esi, %esi
22000 jmp *%ebx
22001 1: addl $64,%esi
22002 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
22003 jb 5f
22004 SRC( movw (%esi), %dx )
22005 leal 2(%esi), %esi
22006 -DST( movw %dx, (%edi) )
22007 +DST( movw %dx, %es:(%edi) )
22008 leal 2(%edi), %edi
22009 je 6f
22010 shll $16,%edx
22011 5:
22012 SRC( movb (%esi), %dl )
22013 -DST( movb %dl, (%edi) )
22014 +DST( movb %dl, %es:(%edi) )
22015 6: addl %edx, %eax
22016 adcl $0, %eax
22017 7:
22018 .section .fixup, "ax"
22019 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
22020 - movl $-EFAULT, (%ebx)
22021 + movl $-EFAULT, %ss:(%ebx)
22022 # zero the complete destination (computing the rest is too much work)
22023 movl ARGBASE+8(%esp),%edi # dst
22024 movl ARGBASE+12(%esp),%ecx # len
22025 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
22026 rep; stosb
22027 jmp 7b
22028 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
22029 - movl $-EFAULT, (%ebx)
22030 + movl $-EFAULT, %ss:(%ebx)
22031 jmp 7b
22032 .previous
22033
22034 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22035 + pushl %ss
22036 + CFI_ADJUST_CFA_OFFSET 4
22037 + popl %ds
22038 + CFI_ADJUST_CFA_OFFSET -4
22039 + pushl %ss
22040 + CFI_ADJUST_CFA_OFFSET 4
22041 + popl %es
22042 + CFI_ADJUST_CFA_OFFSET -4
22043 +#endif
22044 +
22045 popl %esi
22046 CFI_ADJUST_CFA_OFFSET -4
22047 CFI_RESTORE esi
22048 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
22049 CFI_RESTORE ebx
22050 ret
22051 CFI_ENDPROC
22052 -ENDPROC(csum_partial_copy_generic)
22053 +ENDPROC(csum_partial_copy_generic_to_user)
22054
22055 #undef ROUND
22056 #undef ROUND1
22057 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
22058 index ebeafcc..1e3a402 100644
22059 --- a/arch/x86/lib/clear_page_64.S
22060 +++ b/arch/x86/lib/clear_page_64.S
22061 @@ -1,5 +1,6 @@
22062 #include <linux/linkage.h>
22063 #include <asm/dwarf2.h>
22064 +#include <asm/alternative-asm.h>
22065
22066 /*
22067 * Zero a page.
22068 @@ -10,6 +11,7 @@ ENTRY(clear_page_c)
22069 movl $4096/8,%ecx
22070 xorl %eax,%eax
22071 rep stosq
22072 + pax_force_retaddr
22073 ret
22074 CFI_ENDPROC
22075 ENDPROC(clear_page_c)
22076 @@ -33,6 +35,7 @@ ENTRY(clear_page)
22077 leaq 64(%rdi),%rdi
22078 jnz .Lloop
22079 nop
22080 + pax_force_retaddr
22081 ret
22082 CFI_ENDPROC
22083 .Lclear_page_end:
22084 @@ -43,7 +46,7 @@ ENDPROC(clear_page)
22085
22086 #include <asm/cpufeature.h>
22087
22088 - .section .altinstr_replacement,"ax"
22089 + .section .altinstr_replacement,"a"
22090 1: .byte 0xeb /* jmp <disp8> */
22091 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
22092 2:
22093 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
22094 index 727a5d4..333818a 100644
22095 --- a/arch/x86/lib/copy_page_64.S
22096 +++ b/arch/x86/lib/copy_page_64.S
22097 @@ -2,12 +2,14 @@
22098
22099 #include <linux/linkage.h>
22100 #include <asm/dwarf2.h>
22101 +#include <asm/alternative-asm.h>
22102
22103 ALIGN
22104 copy_page_c:
22105 CFI_STARTPROC
22106 movl $4096/8,%ecx
22107 rep movsq
22108 + pax_force_retaddr
22109 ret
22110 CFI_ENDPROC
22111 ENDPROC(copy_page_c)
22112 @@ -38,7 +40,7 @@ ENTRY(copy_page)
22113 movq 16 (%rsi), %rdx
22114 movq 24 (%rsi), %r8
22115 movq 32 (%rsi), %r9
22116 - movq 40 (%rsi), %r10
22117 + movq 40 (%rsi), %r13
22118 movq 48 (%rsi), %r11
22119 movq 56 (%rsi), %r12
22120
22121 @@ -49,7 +51,7 @@ ENTRY(copy_page)
22122 movq %rdx, 16 (%rdi)
22123 movq %r8, 24 (%rdi)
22124 movq %r9, 32 (%rdi)
22125 - movq %r10, 40 (%rdi)
22126 + movq %r13, 40 (%rdi)
22127 movq %r11, 48 (%rdi)
22128 movq %r12, 56 (%rdi)
22129
22130 @@ -68,7 +70,7 @@ ENTRY(copy_page)
22131 movq 16 (%rsi), %rdx
22132 movq 24 (%rsi), %r8
22133 movq 32 (%rsi), %r9
22134 - movq 40 (%rsi), %r10
22135 + movq 40 (%rsi), %r13
22136 movq 48 (%rsi), %r11
22137 movq 56 (%rsi), %r12
22138
22139 @@ -77,7 +79,7 @@ ENTRY(copy_page)
22140 movq %rdx, 16 (%rdi)
22141 movq %r8, 24 (%rdi)
22142 movq %r9, 32 (%rdi)
22143 - movq %r10, 40 (%rdi)
22144 + movq %r13, 40 (%rdi)
22145 movq %r11, 48 (%rdi)
22146 movq %r12, 56 (%rdi)
22147
22148 @@ -94,6 +96,7 @@ ENTRY(copy_page)
22149 CFI_RESTORE r13
22150 addq $3*8,%rsp
22151 CFI_ADJUST_CFA_OFFSET -3*8
22152 + pax_force_retaddr
22153 ret
22154 .Lcopy_page_end:
22155 CFI_ENDPROC
22156 @@ -104,7 +107,7 @@ ENDPROC(copy_page)
22157
22158 #include <asm/cpufeature.h>
22159
22160 - .section .altinstr_replacement,"ax"
22161 + .section .altinstr_replacement,"a"
22162 1: .byte 0xeb /* jmp <disp8> */
22163 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
22164 2:
22165 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
22166 index af8debd..40c75f3 100644
22167 --- a/arch/x86/lib/copy_user_64.S
22168 +++ b/arch/x86/lib/copy_user_64.S
22169 @@ -15,13 +15,15 @@
22170 #include <asm/asm-offsets.h>
22171 #include <asm/thread_info.h>
22172 #include <asm/cpufeature.h>
22173 +#include <asm/pgtable.h>
22174 +#include <asm/alternative-asm.h>
22175
22176 .macro ALTERNATIVE_JUMP feature,orig,alt
22177 0:
22178 .byte 0xe9 /* 32bit jump */
22179 .long \orig-1f /* by default jump to orig */
22180 1:
22181 - .section .altinstr_replacement,"ax"
22182 + .section .altinstr_replacement,"a"
22183 2: .byte 0xe9 /* near jump with 32bit immediate */
22184 .long \alt-1b /* offset */ /* or alternatively to alt */
22185 .previous
22186 @@ -64,55 +66,26 @@
22187 #endif
22188 .endm
22189
22190 -/* Standard copy_to_user with segment limit checking */
22191 -ENTRY(copy_to_user)
22192 - CFI_STARTPROC
22193 - GET_THREAD_INFO(%rax)
22194 - movq %rdi,%rcx
22195 - addq %rdx,%rcx
22196 - jc bad_to_user
22197 - cmpq TI_addr_limit(%rax),%rcx
22198 - ja bad_to_user
22199 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22200 - CFI_ENDPROC
22201 -ENDPROC(copy_to_user)
22202 -
22203 -/* Standard copy_from_user with segment limit checking */
22204 -ENTRY(copy_from_user)
22205 - CFI_STARTPROC
22206 - GET_THREAD_INFO(%rax)
22207 - movq %rsi,%rcx
22208 - addq %rdx,%rcx
22209 - jc bad_from_user
22210 - cmpq TI_addr_limit(%rax),%rcx
22211 - ja bad_from_user
22212 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22213 - CFI_ENDPROC
22214 -ENDPROC(copy_from_user)
22215 -
22216 ENTRY(copy_user_generic)
22217 CFI_STARTPROC
22218 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22219 CFI_ENDPROC
22220 ENDPROC(copy_user_generic)
22221
22222 -ENTRY(__copy_from_user_inatomic)
22223 - CFI_STARTPROC
22224 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22225 - CFI_ENDPROC
22226 -ENDPROC(__copy_from_user_inatomic)
22227 -
22228 .section .fixup,"ax"
22229 /* must zero dest */
22230 ENTRY(bad_from_user)
22231 bad_from_user:
22232 CFI_STARTPROC
22233 + testl %edx,%edx
22234 + js bad_to_user
22235 movl %edx,%ecx
22236 xorl %eax,%eax
22237 rep
22238 stosb
22239 bad_to_user:
22240 movl %edx,%eax
22241 + pax_force_retaddr
22242 ret
22243 CFI_ENDPROC
22244 ENDPROC(bad_from_user)
22245 @@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
22246 jz 17f
22247 1: movq (%rsi),%r8
22248 2: movq 1*8(%rsi),%r9
22249 -3: movq 2*8(%rsi),%r10
22250 +3: movq 2*8(%rsi),%rax
22251 4: movq 3*8(%rsi),%r11
22252 5: movq %r8,(%rdi)
22253 6: movq %r9,1*8(%rdi)
22254 -7: movq %r10,2*8(%rdi)
22255 +7: movq %rax,2*8(%rdi)
22256 8: movq %r11,3*8(%rdi)
22257 9: movq 4*8(%rsi),%r8
22258 10: movq 5*8(%rsi),%r9
22259 -11: movq 6*8(%rsi),%r10
22260 +11: movq 6*8(%rsi),%rax
22261 12: movq 7*8(%rsi),%r11
22262 13: movq %r8,4*8(%rdi)
22263 14: movq %r9,5*8(%rdi)
22264 -15: movq %r10,6*8(%rdi)
22265 +15: movq %rax,6*8(%rdi)
22266 16: movq %r11,7*8(%rdi)
22267 leaq 64(%rsi),%rsi
22268 leaq 64(%rdi),%rdi
22269 @@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
22270 decl %ecx
22271 jnz 21b
22272 23: xor %eax,%eax
22273 + pax_force_retaddr
22274 ret
22275
22276 .section .fixup,"ax"
22277 @@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
22278 3: rep
22279 movsb
22280 4: xorl %eax,%eax
22281 + pax_force_retaddr
22282 ret
22283
22284 .section .fixup,"ax"
22285 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
22286 index cb0c112..e3a6895 100644
22287 --- a/arch/x86/lib/copy_user_nocache_64.S
22288 +++ b/arch/x86/lib/copy_user_nocache_64.S
22289 @@ -8,12 +8,14 @@
22290
22291 #include <linux/linkage.h>
22292 #include <asm/dwarf2.h>
22293 +#include <asm/alternative-asm.h>
22294
22295 #define FIX_ALIGNMENT 1
22296
22297 #include <asm/current.h>
22298 #include <asm/asm-offsets.h>
22299 #include <asm/thread_info.h>
22300 +#include <asm/pgtable.h>
22301
22302 .macro ALIGN_DESTINATION
22303 #ifdef FIX_ALIGNMENT
22304 @@ -50,6 +52,15 @@
22305 */
22306 ENTRY(__copy_user_nocache)
22307 CFI_STARTPROC
22308 +
22309 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22310 + mov $PAX_USER_SHADOW_BASE,%rcx
22311 + cmp %rcx,%rsi
22312 + jae 1f
22313 + add %rcx,%rsi
22314 +1:
22315 +#endif
22316 +
22317 cmpl $8,%edx
22318 jb 20f /* less then 8 bytes, go to byte copy loop */
22319 ALIGN_DESTINATION
22320 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
22321 jz 17f
22322 1: movq (%rsi),%r8
22323 2: movq 1*8(%rsi),%r9
22324 -3: movq 2*8(%rsi),%r10
22325 +3: movq 2*8(%rsi),%rax
22326 4: movq 3*8(%rsi),%r11
22327 5: movnti %r8,(%rdi)
22328 6: movnti %r9,1*8(%rdi)
22329 -7: movnti %r10,2*8(%rdi)
22330 +7: movnti %rax,2*8(%rdi)
22331 8: movnti %r11,3*8(%rdi)
22332 9: movq 4*8(%rsi),%r8
22333 10: movq 5*8(%rsi),%r9
22334 -11: movq 6*8(%rsi),%r10
22335 +11: movq 6*8(%rsi),%rax
22336 12: movq 7*8(%rsi),%r11
22337 13: movnti %r8,4*8(%rdi)
22338 14: movnti %r9,5*8(%rdi)
22339 -15: movnti %r10,6*8(%rdi)
22340 +15: movnti %rax,6*8(%rdi)
22341 16: movnti %r11,7*8(%rdi)
22342 leaq 64(%rsi),%rsi
22343 leaq 64(%rdi),%rdi
22344 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
22345 jnz 21b
22346 23: xorl %eax,%eax
22347 sfence
22348 + pax_force_retaddr
22349 ret
22350
22351 .section .fixup,"ax"
22352 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
22353 index f0dba36..48cb4d6 100644
22354 --- a/arch/x86/lib/csum-copy_64.S
22355 +++ b/arch/x86/lib/csum-copy_64.S
22356 @@ -8,6 +8,7 @@
22357 #include <linux/linkage.h>
22358 #include <asm/dwarf2.h>
22359 #include <asm/errno.h>
22360 +#include <asm/alternative-asm.h>
22361
22362 /*
22363 * Checksum copy with exception handling.
22364 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
22365 CFI_RESTORE rbp
22366 addq $7*8,%rsp
22367 CFI_ADJUST_CFA_OFFSET -7*8
22368 + pax_force_retaddr 0, 1
22369 ret
22370 CFI_RESTORE_STATE
22371
22372 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
22373 index 459b58a..9570bc7 100644
22374 --- a/arch/x86/lib/csum-wrappers_64.c
22375 +++ b/arch/x86/lib/csum-wrappers_64.c
22376 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
22377 len -= 2;
22378 }
22379 }
22380 - isum = csum_partial_copy_generic((__force const void *)src,
22381 +
22382 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22383 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22384 + src += PAX_USER_SHADOW_BASE;
22385 +#endif
22386 +
22387 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
22388 dst, len, isum, errp, NULL);
22389 if (unlikely(*errp))
22390 goto out_err;
22391 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
22392 }
22393
22394 *errp = 0;
22395 - return csum_partial_copy_generic(src, (void __force *)dst,
22396 +
22397 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22398 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
22399 + dst += PAX_USER_SHADOW_BASE;
22400 +#endif
22401 +
22402 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
22403 len, isum, NULL, errp);
22404 }
22405 EXPORT_SYMBOL(csum_partial_copy_to_user);
22406 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
22407 index 51f1504..ddac4c1 100644
22408 --- a/arch/x86/lib/getuser.S
22409 +++ b/arch/x86/lib/getuser.S
22410 @@ -33,15 +33,38 @@
22411 #include <asm/asm-offsets.h>
22412 #include <asm/thread_info.h>
22413 #include <asm/asm.h>
22414 +#include <asm/segment.h>
22415 +#include <asm/pgtable.h>
22416 +#include <asm/alternative-asm.h>
22417 +
22418 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22419 +#define __copyuser_seg gs;
22420 +#else
22421 +#define __copyuser_seg
22422 +#endif
22423
22424 .text
22425 ENTRY(__get_user_1)
22426 CFI_STARTPROC
22427 +
22428 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22429 GET_THREAD_INFO(%_ASM_DX)
22430 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22431 jae bad_get_user
22432 -1: movzb (%_ASM_AX),%edx
22433 +
22434 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22435 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22436 + cmp %_ASM_DX,%_ASM_AX
22437 + jae 1234f
22438 + add %_ASM_DX,%_ASM_AX
22439 +1234:
22440 +#endif
22441 +
22442 +#endif
22443 +
22444 +1: __copyuser_seg movzb (%_ASM_AX),%edx
22445 xor %eax,%eax
22446 + pax_force_retaddr
22447 ret
22448 CFI_ENDPROC
22449 ENDPROC(__get_user_1)
22450 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
22451 ENTRY(__get_user_2)
22452 CFI_STARTPROC
22453 add $1,%_ASM_AX
22454 +
22455 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22456 jc bad_get_user
22457 GET_THREAD_INFO(%_ASM_DX)
22458 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22459 jae bad_get_user
22460 -2: movzwl -1(%_ASM_AX),%edx
22461 +
22462 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22463 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22464 + cmp %_ASM_DX,%_ASM_AX
22465 + jae 1234f
22466 + add %_ASM_DX,%_ASM_AX
22467 +1234:
22468 +#endif
22469 +
22470 +#endif
22471 +
22472 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
22473 xor %eax,%eax
22474 + pax_force_retaddr
22475 ret
22476 CFI_ENDPROC
22477 ENDPROC(__get_user_2)
22478 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
22479 ENTRY(__get_user_4)
22480 CFI_STARTPROC
22481 add $3,%_ASM_AX
22482 +
22483 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22484 jc bad_get_user
22485 GET_THREAD_INFO(%_ASM_DX)
22486 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22487 jae bad_get_user
22488 -3: mov -3(%_ASM_AX),%edx
22489 +
22490 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22491 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22492 + cmp %_ASM_DX,%_ASM_AX
22493 + jae 1234f
22494 + add %_ASM_DX,%_ASM_AX
22495 +1234:
22496 +#endif
22497 +
22498 +#endif
22499 +
22500 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
22501 xor %eax,%eax
22502 + pax_force_retaddr
22503 ret
22504 CFI_ENDPROC
22505 ENDPROC(__get_user_4)
22506 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
22507 GET_THREAD_INFO(%_ASM_DX)
22508 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22509 jae bad_get_user
22510 +
22511 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22512 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22513 + cmp %_ASM_DX,%_ASM_AX
22514 + jae 1234f
22515 + add %_ASM_DX,%_ASM_AX
22516 +1234:
22517 +#endif
22518 +
22519 4: movq -7(%_ASM_AX),%_ASM_DX
22520 xor %eax,%eax
22521 + pax_force_retaddr
22522 ret
22523 CFI_ENDPROC
22524 ENDPROC(__get_user_8)
22525 @@ -91,6 +152,7 @@ bad_get_user:
22526 CFI_STARTPROC
22527 xor %edx,%edx
22528 mov $(-EFAULT),%_ASM_AX
22529 + pax_force_retaddr
22530 ret
22531 CFI_ENDPROC
22532 END(bad_get_user)
22533 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
22534 index 05a95e7..326f2fa 100644
22535 --- a/arch/x86/lib/iomap_copy_64.S
22536 +++ b/arch/x86/lib/iomap_copy_64.S
22537 @@ -17,6 +17,7 @@
22538
22539 #include <linux/linkage.h>
22540 #include <asm/dwarf2.h>
22541 +#include <asm/alternative-asm.h>
22542
22543 /*
22544 * override generic version in lib/iomap_copy.c
22545 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
22546 CFI_STARTPROC
22547 movl %edx,%ecx
22548 rep movsd
22549 + pax_force_retaddr
22550 ret
22551 CFI_ENDPROC
22552 ENDPROC(__iowrite32_copy)
22553 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
22554 index ad5441e..610e351 100644
22555 --- a/arch/x86/lib/memcpy_64.S
22556 +++ b/arch/x86/lib/memcpy_64.S
22557 @@ -4,6 +4,7 @@
22558
22559 #include <asm/cpufeature.h>
22560 #include <asm/dwarf2.h>
22561 +#include <asm/alternative-asm.h>
22562
22563 /*
22564 * memcpy - Copy a memory block.
22565 @@ -34,6 +35,7 @@ memcpy_c:
22566 rep movsq
22567 movl %edx, %ecx
22568 rep movsb
22569 + pax_force_retaddr
22570 ret
22571 CFI_ENDPROC
22572 ENDPROC(memcpy_c)
22573 @@ -118,6 +120,7 @@ ENTRY(memcpy)
22574 jnz .Lloop_1
22575
22576 .Lend:
22577 + pax_force_retaddr 0, 1
22578 ret
22579 CFI_ENDPROC
22580 ENDPROC(memcpy)
22581 @@ -128,7 +131,7 @@ ENDPROC(__memcpy)
22582 * It is also a lot simpler. Use this when possible:
22583 */
22584
22585 - .section .altinstr_replacement, "ax"
22586 + .section .altinstr_replacement, "a"
22587 1: .byte 0xeb /* jmp <disp8> */
22588 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
22589 2:
22590 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22591 index 2c59481..7e9ba4e 100644
22592 --- a/arch/x86/lib/memset_64.S
22593 +++ b/arch/x86/lib/memset_64.S
22594 @@ -2,6 +2,7 @@
22595
22596 #include <linux/linkage.h>
22597 #include <asm/dwarf2.h>
22598 +#include <asm/alternative-asm.h>
22599
22600 /*
22601 * ISO C memset - set a memory block to a byte value.
22602 @@ -28,6 +29,7 @@ memset_c:
22603 movl %r8d,%ecx
22604 rep stosb
22605 movq %r9,%rax
22606 + pax_force_retaddr
22607 ret
22608 CFI_ENDPROC
22609 ENDPROC(memset_c)
22610 @@ -35,13 +37,13 @@ ENDPROC(memset_c)
22611 ENTRY(memset)
22612 ENTRY(__memset)
22613 CFI_STARTPROC
22614 - movq %rdi,%r10
22615 movq %rdx,%r11
22616
22617 /* expand byte value */
22618 movzbl %sil,%ecx
22619 movabs $0x0101010101010101,%rax
22620 mul %rcx /* with rax, clobbers rdx */
22621 + movq %rdi,%rdx
22622
22623 /* align dst */
22624 movl %edi,%r9d
22625 @@ -95,7 +97,8 @@ ENTRY(__memset)
22626 jnz .Lloop_1
22627
22628 .Lende:
22629 - movq %r10,%rax
22630 + movq %rdx,%rax
22631 + pax_force_retaddr
22632 ret
22633
22634 CFI_RESTORE_STATE
22635 @@ -118,7 +121,7 @@ ENDPROC(__memset)
22636
22637 #include <asm/cpufeature.h>
22638
22639 - .section .altinstr_replacement,"ax"
22640 + .section .altinstr_replacement,"a"
22641 1: .byte 0xeb /* jmp <disp8> */
22642 .byte (memset_c - memset) - (2f - 1b) /* offset */
22643 2:
22644 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22645 index c9f2d9b..e7fd2c0 100644
22646 --- a/arch/x86/lib/mmx_32.c
22647 +++ b/arch/x86/lib/mmx_32.c
22648 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22649 {
22650 void *p;
22651 int i;
22652 + unsigned long cr0;
22653
22654 if (unlikely(in_interrupt()))
22655 return __memcpy(to, from, len);
22656 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22657 kernel_fpu_begin();
22658
22659 __asm__ __volatile__ (
22660 - "1: prefetch (%0)\n" /* This set is 28 bytes */
22661 - " prefetch 64(%0)\n"
22662 - " prefetch 128(%0)\n"
22663 - " prefetch 192(%0)\n"
22664 - " prefetch 256(%0)\n"
22665 + "1: prefetch (%1)\n" /* This set is 28 bytes */
22666 + " prefetch 64(%1)\n"
22667 + " prefetch 128(%1)\n"
22668 + " prefetch 192(%1)\n"
22669 + " prefetch 256(%1)\n"
22670 "2: \n"
22671 ".section .fixup, \"ax\"\n"
22672 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22673 + "3: \n"
22674 +
22675 +#ifdef CONFIG_PAX_KERNEXEC
22676 + " movl %%cr0, %0\n"
22677 + " movl %0, %%eax\n"
22678 + " andl $0xFFFEFFFF, %%eax\n"
22679 + " movl %%eax, %%cr0\n"
22680 +#endif
22681 +
22682 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22683 +
22684 +#ifdef CONFIG_PAX_KERNEXEC
22685 + " movl %0, %%cr0\n"
22686 +#endif
22687 +
22688 " jmp 2b\n"
22689 ".previous\n"
22690 _ASM_EXTABLE(1b, 3b)
22691 - : : "r" (from));
22692 + : "=&r" (cr0) : "r" (from) : "ax");
22693
22694 for ( ; i > 5; i--) {
22695 __asm__ __volatile__ (
22696 - "1: prefetch 320(%0)\n"
22697 - "2: movq (%0), %%mm0\n"
22698 - " movq 8(%0), %%mm1\n"
22699 - " movq 16(%0), %%mm2\n"
22700 - " movq 24(%0), %%mm3\n"
22701 - " movq %%mm0, (%1)\n"
22702 - " movq %%mm1, 8(%1)\n"
22703 - " movq %%mm2, 16(%1)\n"
22704 - " movq %%mm3, 24(%1)\n"
22705 - " movq 32(%0), %%mm0\n"
22706 - " movq 40(%0), %%mm1\n"
22707 - " movq 48(%0), %%mm2\n"
22708 - " movq 56(%0), %%mm3\n"
22709 - " movq %%mm0, 32(%1)\n"
22710 - " movq %%mm1, 40(%1)\n"
22711 - " movq %%mm2, 48(%1)\n"
22712 - " movq %%mm3, 56(%1)\n"
22713 + "1: prefetch 320(%1)\n"
22714 + "2: movq (%1), %%mm0\n"
22715 + " movq 8(%1), %%mm1\n"
22716 + " movq 16(%1), %%mm2\n"
22717 + " movq 24(%1), %%mm3\n"
22718 + " movq %%mm0, (%2)\n"
22719 + " movq %%mm1, 8(%2)\n"
22720 + " movq %%mm2, 16(%2)\n"
22721 + " movq %%mm3, 24(%2)\n"
22722 + " movq 32(%1), %%mm0\n"
22723 + " movq 40(%1), %%mm1\n"
22724 + " movq 48(%1), %%mm2\n"
22725 + " movq 56(%1), %%mm3\n"
22726 + " movq %%mm0, 32(%2)\n"
22727 + " movq %%mm1, 40(%2)\n"
22728 + " movq %%mm2, 48(%2)\n"
22729 + " movq %%mm3, 56(%2)\n"
22730 ".section .fixup, \"ax\"\n"
22731 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22732 + "3:\n"
22733 +
22734 +#ifdef CONFIG_PAX_KERNEXEC
22735 + " movl %%cr0, %0\n"
22736 + " movl %0, %%eax\n"
22737 + " andl $0xFFFEFFFF, %%eax\n"
22738 + " movl %%eax, %%cr0\n"
22739 +#endif
22740 +
22741 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22742 +
22743 +#ifdef CONFIG_PAX_KERNEXEC
22744 + " movl %0, %%cr0\n"
22745 +#endif
22746 +
22747 " jmp 2b\n"
22748 ".previous\n"
22749 _ASM_EXTABLE(1b, 3b)
22750 - : : "r" (from), "r" (to) : "memory");
22751 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22752
22753 from += 64;
22754 to += 64;
22755 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22756 static void fast_copy_page(void *to, void *from)
22757 {
22758 int i;
22759 + unsigned long cr0;
22760
22761 kernel_fpu_begin();
22762
22763 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22764 * but that is for later. -AV
22765 */
22766 __asm__ __volatile__(
22767 - "1: prefetch (%0)\n"
22768 - " prefetch 64(%0)\n"
22769 - " prefetch 128(%0)\n"
22770 - " prefetch 192(%0)\n"
22771 - " prefetch 256(%0)\n"
22772 + "1: prefetch (%1)\n"
22773 + " prefetch 64(%1)\n"
22774 + " prefetch 128(%1)\n"
22775 + " prefetch 192(%1)\n"
22776 + " prefetch 256(%1)\n"
22777 "2: \n"
22778 ".section .fixup, \"ax\"\n"
22779 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22780 + "3: \n"
22781 +
22782 +#ifdef CONFIG_PAX_KERNEXEC
22783 + " movl %%cr0, %0\n"
22784 + " movl %0, %%eax\n"
22785 + " andl $0xFFFEFFFF, %%eax\n"
22786 + " movl %%eax, %%cr0\n"
22787 +#endif
22788 +
22789 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22790 +
22791 +#ifdef CONFIG_PAX_KERNEXEC
22792 + " movl %0, %%cr0\n"
22793 +#endif
22794 +
22795 " jmp 2b\n"
22796 ".previous\n"
22797 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22798 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22799
22800 for (i = 0; i < (4096-320)/64; i++) {
22801 __asm__ __volatile__ (
22802 - "1: prefetch 320(%0)\n"
22803 - "2: movq (%0), %%mm0\n"
22804 - " movntq %%mm0, (%1)\n"
22805 - " movq 8(%0), %%mm1\n"
22806 - " movntq %%mm1, 8(%1)\n"
22807 - " movq 16(%0), %%mm2\n"
22808 - " movntq %%mm2, 16(%1)\n"
22809 - " movq 24(%0), %%mm3\n"
22810 - " movntq %%mm3, 24(%1)\n"
22811 - " movq 32(%0), %%mm4\n"
22812 - " movntq %%mm4, 32(%1)\n"
22813 - " movq 40(%0), %%mm5\n"
22814 - " movntq %%mm5, 40(%1)\n"
22815 - " movq 48(%0), %%mm6\n"
22816 - " movntq %%mm6, 48(%1)\n"
22817 - " movq 56(%0), %%mm7\n"
22818 - " movntq %%mm7, 56(%1)\n"
22819 + "1: prefetch 320(%1)\n"
22820 + "2: movq (%1), %%mm0\n"
22821 + " movntq %%mm0, (%2)\n"
22822 + " movq 8(%1), %%mm1\n"
22823 + " movntq %%mm1, 8(%2)\n"
22824 + " movq 16(%1), %%mm2\n"
22825 + " movntq %%mm2, 16(%2)\n"
22826 + " movq 24(%1), %%mm3\n"
22827 + " movntq %%mm3, 24(%2)\n"
22828 + " movq 32(%1), %%mm4\n"
22829 + " movntq %%mm4, 32(%2)\n"
22830 + " movq 40(%1), %%mm5\n"
22831 + " movntq %%mm5, 40(%2)\n"
22832 + " movq 48(%1), %%mm6\n"
22833 + " movntq %%mm6, 48(%2)\n"
22834 + " movq 56(%1), %%mm7\n"
22835 + " movntq %%mm7, 56(%2)\n"
22836 ".section .fixup, \"ax\"\n"
22837 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22838 + "3:\n"
22839 +
22840 +#ifdef CONFIG_PAX_KERNEXEC
22841 + " movl %%cr0, %0\n"
22842 + " movl %0, %%eax\n"
22843 + " andl $0xFFFEFFFF, %%eax\n"
22844 + " movl %%eax, %%cr0\n"
22845 +#endif
22846 +
22847 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22848 +
22849 +#ifdef CONFIG_PAX_KERNEXEC
22850 + " movl %0, %%cr0\n"
22851 +#endif
22852 +
22853 " jmp 2b\n"
22854 ".previous\n"
22855 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22856 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22857
22858 from += 64;
22859 to += 64;
22860 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22861 static void fast_copy_page(void *to, void *from)
22862 {
22863 int i;
22864 + unsigned long cr0;
22865
22866 kernel_fpu_begin();
22867
22868 __asm__ __volatile__ (
22869 - "1: prefetch (%0)\n"
22870 - " prefetch 64(%0)\n"
22871 - " prefetch 128(%0)\n"
22872 - " prefetch 192(%0)\n"
22873 - " prefetch 256(%0)\n"
22874 + "1: prefetch (%1)\n"
22875 + " prefetch 64(%1)\n"
22876 + " prefetch 128(%1)\n"
22877 + " prefetch 192(%1)\n"
22878 + " prefetch 256(%1)\n"
22879 "2: \n"
22880 ".section .fixup, \"ax\"\n"
22881 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22882 + "3: \n"
22883 +
22884 +#ifdef CONFIG_PAX_KERNEXEC
22885 + " movl %%cr0, %0\n"
22886 + " movl %0, %%eax\n"
22887 + " andl $0xFFFEFFFF, %%eax\n"
22888 + " movl %%eax, %%cr0\n"
22889 +#endif
22890 +
22891 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22892 +
22893 +#ifdef CONFIG_PAX_KERNEXEC
22894 + " movl %0, %%cr0\n"
22895 +#endif
22896 +
22897 " jmp 2b\n"
22898 ".previous\n"
22899 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22900 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22901
22902 for (i = 0; i < 4096/64; i++) {
22903 __asm__ __volatile__ (
22904 - "1: prefetch 320(%0)\n"
22905 - "2: movq (%0), %%mm0\n"
22906 - " movq 8(%0), %%mm1\n"
22907 - " movq 16(%0), %%mm2\n"
22908 - " movq 24(%0), %%mm3\n"
22909 - " movq %%mm0, (%1)\n"
22910 - " movq %%mm1, 8(%1)\n"
22911 - " movq %%mm2, 16(%1)\n"
22912 - " movq %%mm3, 24(%1)\n"
22913 - " movq 32(%0), %%mm0\n"
22914 - " movq 40(%0), %%mm1\n"
22915 - " movq 48(%0), %%mm2\n"
22916 - " movq 56(%0), %%mm3\n"
22917 - " movq %%mm0, 32(%1)\n"
22918 - " movq %%mm1, 40(%1)\n"
22919 - " movq %%mm2, 48(%1)\n"
22920 - " movq %%mm3, 56(%1)\n"
22921 + "1: prefetch 320(%1)\n"
22922 + "2: movq (%1), %%mm0\n"
22923 + " movq 8(%1), %%mm1\n"
22924 + " movq 16(%1), %%mm2\n"
22925 + " movq 24(%1), %%mm3\n"
22926 + " movq %%mm0, (%2)\n"
22927 + " movq %%mm1, 8(%2)\n"
22928 + " movq %%mm2, 16(%2)\n"
22929 + " movq %%mm3, 24(%2)\n"
22930 + " movq 32(%1), %%mm0\n"
22931 + " movq 40(%1), %%mm1\n"
22932 + " movq 48(%1), %%mm2\n"
22933 + " movq 56(%1), %%mm3\n"
22934 + " movq %%mm0, 32(%2)\n"
22935 + " movq %%mm1, 40(%2)\n"
22936 + " movq %%mm2, 48(%2)\n"
22937 + " movq %%mm3, 56(%2)\n"
22938 ".section .fixup, \"ax\"\n"
22939 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22940 + "3:\n"
22941 +
22942 +#ifdef CONFIG_PAX_KERNEXEC
22943 + " movl %%cr0, %0\n"
22944 + " movl %0, %%eax\n"
22945 + " andl $0xFFFEFFFF, %%eax\n"
22946 + " movl %%eax, %%cr0\n"
22947 +#endif
22948 +
22949 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22950 +
22951 +#ifdef CONFIG_PAX_KERNEXEC
22952 + " movl %0, %%cr0\n"
22953 +#endif
22954 +
22955 " jmp 2b\n"
22956 ".previous\n"
22957 _ASM_EXTABLE(1b, 3b)
22958 - : : "r" (from), "r" (to) : "memory");
22959 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22960
22961 from += 64;
22962 to += 64;
22963 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22964 index 69fa106..adda88b 100644
22965 --- a/arch/x86/lib/msr-reg.S
22966 +++ b/arch/x86/lib/msr-reg.S
22967 @@ -3,6 +3,7 @@
22968 #include <asm/dwarf2.h>
22969 #include <asm/asm.h>
22970 #include <asm/msr.h>
22971 +#include <asm/alternative-asm.h>
22972
22973 #ifdef CONFIG_X86_64
22974 /*
22975 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22976 CFI_STARTPROC
22977 pushq_cfi %rbx
22978 pushq_cfi %rbp
22979 - movq %rdi, %r10 /* Save pointer */
22980 + movq %rdi, %r9 /* Save pointer */
22981 xorl %r11d, %r11d /* Return value */
22982 movl (%rdi), %eax
22983 movl 4(%rdi), %ecx
22984 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22985 movl 28(%rdi), %edi
22986 CFI_REMEMBER_STATE
22987 1: \op
22988 -2: movl %eax, (%r10)
22989 +2: movl %eax, (%r9)
22990 movl %r11d, %eax /* Return value */
22991 - movl %ecx, 4(%r10)
22992 - movl %edx, 8(%r10)
22993 - movl %ebx, 12(%r10)
22994 - movl %ebp, 20(%r10)
22995 - movl %esi, 24(%r10)
22996 - movl %edi, 28(%r10)
22997 + movl %ecx, 4(%r9)
22998 + movl %edx, 8(%r9)
22999 + movl %ebx, 12(%r9)
23000 + movl %ebp, 20(%r9)
23001 + movl %esi, 24(%r9)
23002 + movl %edi, 28(%r9)
23003 popq_cfi %rbp
23004 popq_cfi %rbx
23005 + pax_force_retaddr
23006 ret
23007 3:
23008 CFI_RESTORE_STATE
23009 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
23010 index 36b0d15..d381858 100644
23011 --- a/arch/x86/lib/putuser.S
23012 +++ b/arch/x86/lib/putuser.S
23013 @@ -15,7 +15,9 @@
23014 #include <asm/thread_info.h>
23015 #include <asm/errno.h>
23016 #include <asm/asm.h>
23017 -
23018 +#include <asm/segment.h>
23019 +#include <asm/pgtable.h>
23020 +#include <asm/alternative-asm.h>
23021
23022 /*
23023 * __put_user_X
23024 @@ -29,52 +31,119 @@
23025 * as they get called from within inline assembly.
23026 */
23027
23028 -#define ENTER CFI_STARTPROC ; \
23029 - GET_THREAD_INFO(%_ASM_BX)
23030 -#define EXIT ret ; \
23031 +#define ENTER CFI_STARTPROC
23032 +#define EXIT pax_force_retaddr; ret ; \
23033 CFI_ENDPROC
23034
23035 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23036 +#define _DEST %_ASM_CX,%_ASM_BX
23037 +#else
23038 +#define _DEST %_ASM_CX
23039 +#endif
23040 +
23041 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23042 +#define __copyuser_seg gs;
23043 +#else
23044 +#define __copyuser_seg
23045 +#endif
23046 +
23047 .text
23048 ENTRY(__put_user_1)
23049 ENTER
23050 +
23051 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23052 + GET_THREAD_INFO(%_ASM_BX)
23053 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
23054 jae bad_put_user
23055 -1: movb %al,(%_ASM_CX)
23056 +
23057 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23058 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23059 + cmp %_ASM_BX,%_ASM_CX
23060 + jb 1234f
23061 + xor %ebx,%ebx
23062 +1234:
23063 +#endif
23064 +
23065 +#endif
23066 +
23067 +1: __copyuser_seg movb %al,(_DEST)
23068 xor %eax,%eax
23069 EXIT
23070 ENDPROC(__put_user_1)
23071
23072 ENTRY(__put_user_2)
23073 ENTER
23074 +
23075 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23076 + GET_THREAD_INFO(%_ASM_BX)
23077 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23078 sub $1,%_ASM_BX
23079 cmp %_ASM_BX,%_ASM_CX
23080 jae bad_put_user
23081 -2: movw %ax,(%_ASM_CX)
23082 +
23083 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23084 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23085 + cmp %_ASM_BX,%_ASM_CX
23086 + jb 1234f
23087 + xor %ebx,%ebx
23088 +1234:
23089 +#endif
23090 +
23091 +#endif
23092 +
23093 +2: __copyuser_seg movw %ax,(_DEST)
23094 xor %eax,%eax
23095 EXIT
23096 ENDPROC(__put_user_2)
23097
23098 ENTRY(__put_user_4)
23099 ENTER
23100 +
23101 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23102 + GET_THREAD_INFO(%_ASM_BX)
23103 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23104 sub $3,%_ASM_BX
23105 cmp %_ASM_BX,%_ASM_CX
23106 jae bad_put_user
23107 -3: movl %eax,(%_ASM_CX)
23108 +
23109 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23110 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23111 + cmp %_ASM_BX,%_ASM_CX
23112 + jb 1234f
23113 + xor %ebx,%ebx
23114 +1234:
23115 +#endif
23116 +
23117 +#endif
23118 +
23119 +3: __copyuser_seg movl %eax,(_DEST)
23120 xor %eax,%eax
23121 EXIT
23122 ENDPROC(__put_user_4)
23123
23124 ENTRY(__put_user_8)
23125 ENTER
23126 +
23127 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23128 + GET_THREAD_INFO(%_ASM_BX)
23129 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23130 sub $7,%_ASM_BX
23131 cmp %_ASM_BX,%_ASM_CX
23132 jae bad_put_user
23133 -4: mov %_ASM_AX,(%_ASM_CX)
23134 +
23135 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23136 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23137 + cmp %_ASM_BX,%_ASM_CX
23138 + jb 1234f
23139 + xor %ebx,%ebx
23140 +1234:
23141 +#endif
23142 +
23143 +#endif
23144 +
23145 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
23146 #ifdef CONFIG_X86_32
23147 -5: movl %edx,4(%_ASM_CX)
23148 +5: __copyuser_seg movl %edx,4(_DEST)
23149 #endif
23150 xor %eax,%eax
23151 EXIT
23152 diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
23153 index 05ea55f..6345b9a 100644
23154 --- a/arch/x86/lib/rwlock_64.S
23155 +++ b/arch/x86/lib/rwlock_64.S
23156 @@ -2,6 +2,7 @@
23157
23158 #include <linux/linkage.h>
23159 #include <asm/rwlock.h>
23160 +#include <asm/asm.h>
23161 #include <asm/alternative-asm.h>
23162 #include <asm/dwarf2.h>
23163
23164 @@ -10,13 +11,34 @@ ENTRY(__write_lock_failed)
23165 CFI_STARTPROC
23166 LOCK_PREFIX
23167 addl $RW_LOCK_BIAS,(%rdi)
23168 +
23169 +#ifdef CONFIG_PAX_REFCOUNT
23170 + jno 1234f
23171 + LOCK_PREFIX
23172 + subl $RW_LOCK_BIAS,(%rdi)
23173 + int $4
23174 +1234:
23175 + _ASM_EXTABLE(1234b, 1234b)
23176 +#endif
23177 +
23178 1: rep
23179 nop
23180 cmpl $RW_LOCK_BIAS,(%rdi)
23181 jne 1b
23182 LOCK_PREFIX
23183 subl $RW_LOCK_BIAS,(%rdi)
23184 +
23185 +#ifdef CONFIG_PAX_REFCOUNT
23186 + jno 1234f
23187 + LOCK_PREFIX
23188 + addl $RW_LOCK_BIAS,(%rdi)
23189 + int $4
23190 +1234:
23191 + _ASM_EXTABLE(1234b, 1234b)
23192 +#endif
23193 +
23194 jnz __write_lock_failed
23195 + pax_force_retaddr
23196 ret
23197 CFI_ENDPROC
23198 END(__write_lock_failed)
23199 @@ -26,13 +48,34 @@ ENTRY(__read_lock_failed)
23200 CFI_STARTPROC
23201 LOCK_PREFIX
23202 incl (%rdi)
23203 +
23204 +#ifdef CONFIG_PAX_REFCOUNT
23205 + jno 1234f
23206 + LOCK_PREFIX
23207 + decl (%rdi)
23208 + int $4
23209 +1234:
23210 + _ASM_EXTABLE(1234b, 1234b)
23211 +#endif
23212 +
23213 1: rep
23214 nop
23215 cmpl $1,(%rdi)
23216 js 1b
23217 LOCK_PREFIX
23218 decl (%rdi)
23219 +
23220 +#ifdef CONFIG_PAX_REFCOUNT
23221 + jno 1234f
23222 + LOCK_PREFIX
23223 + incl (%rdi)
23224 + int $4
23225 +1234:
23226 + _ASM_EXTABLE(1234b, 1234b)
23227 +#endif
23228 +
23229 js __read_lock_failed
23230 + pax_force_retaddr
23231 ret
23232 CFI_ENDPROC
23233 END(__read_lock_failed)
23234 diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
23235 index 15acecf..f768b10 100644
23236 --- a/arch/x86/lib/rwsem_64.S
23237 +++ b/arch/x86/lib/rwsem_64.S
23238 @@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
23239 call rwsem_down_read_failed
23240 popq %rdx
23241 restore_common_regs
23242 + pax_force_retaddr
23243 ret
23244 ENDPROC(call_rwsem_down_read_failed)
23245
23246 @@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
23247 movq %rax,%rdi
23248 call rwsem_down_write_failed
23249 restore_common_regs
23250 + pax_force_retaddr
23251 ret
23252 ENDPROC(call_rwsem_down_write_failed)
23253
23254 @@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
23255 movq %rax,%rdi
23256 call rwsem_wake
23257 restore_common_regs
23258 -1: ret
23259 +1: pax_force_retaddr
23260 + ret
23261 ENDPROC(call_rwsem_wake)
23262
23263 /* Fix up special calling conventions */
23264 @@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
23265 call rwsem_downgrade_wake
23266 popq %rdx
23267 restore_common_regs
23268 + pax_force_retaddr
23269 ret
23270 ENDPROC(call_rwsem_downgrade_wake)
23271 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
23272 index bf9a7d5..fb06ab5 100644
23273 --- a/arch/x86/lib/thunk_64.S
23274 +++ b/arch/x86/lib/thunk_64.S
23275 @@ -10,7 +10,8 @@
23276 #include <asm/dwarf2.h>
23277 #include <asm/calling.h>
23278 #include <asm/rwlock.h>
23279 -
23280 + #include <asm/alternative-asm.h>
23281 +
23282 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
23283 .macro thunk name,func
23284 .globl \name
23285 @@ -70,6 +71,7 @@
23286 SAVE_ARGS
23287 restore:
23288 RESTORE_ARGS
23289 + pax_force_retaddr
23290 ret
23291 CFI_ENDPROC
23292
23293 @@ -77,5 +79,6 @@ restore:
23294 SAVE_ARGS
23295 restore_norax:
23296 RESTORE_ARGS 1
23297 + pax_force_retaddr
23298 ret
23299 CFI_ENDPROC
23300 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
23301 index 1f118d4..ec4a953 100644
23302 --- a/arch/x86/lib/usercopy_32.c
23303 +++ b/arch/x86/lib/usercopy_32.c
23304 @@ -43,7 +43,7 @@ do { \
23305 __asm__ __volatile__( \
23306 " testl %1,%1\n" \
23307 " jz 2f\n" \
23308 - "0: lodsb\n" \
23309 + "0: "__copyuser_seg"lodsb\n" \
23310 " stosb\n" \
23311 " testb %%al,%%al\n" \
23312 " jz 1f\n" \
23313 @@ -128,10 +128,12 @@ do { \
23314 int __d0; \
23315 might_fault(); \
23316 __asm__ __volatile__( \
23317 + __COPYUSER_SET_ES \
23318 "0: rep; stosl\n" \
23319 " movl %2,%0\n" \
23320 "1: rep; stosb\n" \
23321 "2:\n" \
23322 + __COPYUSER_RESTORE_ES \
23323 ".section .fixup,\"ax\"\n" \
23324 "3: lea 0(%2,%0,4),%0\n" \
23325 " jmp 2b\n" \
23326 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
23327 might_fault();
23328
23329 __asm__ __volatile__(
23330 + __COPYUSER_SET_ES
23331 " testl %0, %0\n"
23332 " jz 3f\n"
23333 " andl %0,%%ecx\n"
23334 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
23335 " subl %%ecx,%0\n"
23336 " addl %0,%%eax\n"
23337 "1:\n"
23338 + __COPYUSER_RESTORE_ES
23339 ".section .fixup,\"ax\"\n"
23340 "2: xorl %%eax,%%eax\n"
23341 " jmp 1b\n"
23342 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
23343
23344 #ifdef CONFIG_X86_INTEL_USERCOPY
23345 static unsigned long
23346 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
23347 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
23348 {
23349 int d0, d1;
23350 __asm__ __volatile__(
23351 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23352 " .align 2,0x90\n"
23353 "3: movl 0(%4), %%eax\n"
23354 "4: movl 4(%4), %%edx\n"
23355 - "5: movl %%eax, 0(%3)\n"
23356 - "6: movl %%edx, 4(%3)\n"
23357 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
23358 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
23359 "7: movl 8(%4), %%eax\n"
23360 "8: movl 12(%4),%%edx\n"
23361 - "9: movl %%eax, 8(%3)\n"
23362 - "10: movl %%edx, 12(%3)\n"
23363 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
23364 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
23365 "11: movl 16(%4), %%eax\n"
23366 "12: movl 20(%4), %%edx\n"
23367 - "13: movl %%eax, 16(%3)\n"
23368 - "14: movl %%edx, 20(%3)\n"
23369 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
23370 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
23371 "15: movl 24(%4), %%eax\n"
23372 "16: movl 28(%4), %%edx\n"
23373 - "17: movl %%eax, 24(%3)\n"
23374 - "18: movl %%edx, 28(%3)\n"
23375 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
23376 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
23377 "19: movl 32(%4), %%eax\n"
23378 "20: movl 36(%4), %%edx\n"
23379 - "21: movl %%eax, 32(%3)\n"
23380 - "22: movl %%edx, 36(%3)\n"
23381 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23382 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
23383 "23: movl 40(%4), %%eax\n"
23384 "24: movl 44(%4), %%edx\n"
23385 - "25: movl %%eax, 40(%3)\n"
23386 - "26: movl %%edx, 44(%3)\n"
23387 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23388 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
23389 "27: movl 48(%4), %%eax\n"
23390 "28: movl 52(%4), %%edx\n"
23391 - "29: movl %%eax, 48(%3)\n"
23392 - "30: movl %%edx, 52(%3)\n"
23393 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
23394 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
23395 "31: movl 56(%4), %%eax\n"
23396 "32: movl 60(%4), %%edx\n"
23397 - "33: movl %%eax, 56(%3)\n"
23398 - "34: movl %%edx, 60(%3)\n"
23399 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
23400 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
23401 " addl $-64, %0\n"
23402 " addl $64, %4\n"
23403 " addl $64, %3\n"
23404 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23405 " shrl $2, %0\n"
23406 " andl $3, %%eax\n"
23407 " cld\n"
23408 + __COPYUSER_SET_ES
23409 "99: rep; movsl\n"
23410 "36: movl %%eax, %0\n"
23411 "37: rep; movsb\n"
23412 "100:\n"
23413 + __COPYUSER_RESTORE_ES
23414 + ".section .fixup,\"ax\"\n"
23415 + "101: lea 0(%%eax,%0,4),%0\n"
23416 + " jmp 100b\n"
23417 + ".previous\n"
23418 + ".section __ex_table,\"a\"\n"
23419 + " .align 4\n"
23420 + " .long 1b,100b\n"
23421 + " .long 2b,100b\n"
23422 + " .long 3b,100b\n"
23423 + " .long 4b,100b\n"
23424 + " .long 5b,100b\n"
23425 + " .long 6b,100b\n"
23426 + " .long 7b,100b\n"
23427 + " .long 8b,100b\n"
23428 + " .long 9b,100b\n"
23429 + " .long 10b,100b\n"
23430 + " .long 11b,100b\n"
23431 + " .long 12b,100b\n"
23432 + " .long 13b,100b\n"
23433 + " .long 14b,100b\n"
23434 + " .long 15b,100b\n"
23435 + " .long 16b,100b\n"
23436 + " .long 17b,100b\n"
23437 + " .long 18b,100b\n"
23438 + " .long 19b,100b\n"
23439 + " .long 20b,100b\n"
23440 + " .long 21b,100b\n"
23441 + " .long 22b,100b\n"
23442 + " .long 23b,100b\n"
23443 + " .long 24b,100b\n"
23444 + " .long 25b,100b\n"
23445 + " .long 26b,100b\n"
23446 + " .long 27b,100b\n"
23447 + " .long 28b,100b\n"
23448 + " .long 29b,100b\n"
23449 + " .long 30b,100b\n"
23450 + " .long 31b,100b\n"
23451 + " .long 32b,100b\n"
23452 + " .long 33b,100b\n"
23453 + " .long 34b,100b\n"
23454 + " .long 35b,100b\n"
23455 + " .long 36b,100b\n"
23456 + " .long 37b,100b\n"
23457 + " .long 99b,101b\n"
23458 + ".previous"
23459 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
23460 + : "1"(to), "2"(from), "0"(size)
23461 + : "eax", "edx", "memory");
23462 + return size;
23463 +}
23464 +
23465 +static unsigned long
23466 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23467 +{
23468 + int d0, d1;
23469 + __asm__ __volatile__(
23470 + " .align 2,0x90\n"
23471 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23472 + " cmpl $67, %0\n"
23473 + " jbe 3f\n"
23474 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23475 + " .align 2,0x90\n"
23476 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23477 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23478 + "5: movl %%eax, 0(%3)\n"
23479 + "6: movl %%edx, 4(%3)\n"
23480 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23481 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23482 + "9: movl %%eax, 8(%3)\n"
23483 + "10: movl %%edx, 12(%3)\n"
23484 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23485 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23486 + "13: movl %%eax, 16(%3)\n"
23487 + "14: movl %%edx, 20(%3)\n"
23488 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23489 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23490 + "17: movl %%eax, 24(%3)\n"
23491 + "18: movl %%edx, 28(%3)\n"
23492 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23493 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23494 + "21: movl %%eax, 32(%3)\n"
23495 + "22: movl %%edx, 36(%3)\n"
23496 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23497 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23498 + "25: movl %%eax, 40(%3)\n"
23499 + "26: movl %%edx, 44(%3)\n"
23500 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23501 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23502 + "29: movl %%eax, 48(%3)\n"
23503 + "30: movl %%edx, 52(%3)\n"
23504 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23505 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23506 + "33: movl %%eax, 56(%3)\n"
23507 + "34: movl %%edx, 60(%3)\n"
23508 + " addl $-64, %0\n"
23509 + " addl $64, %4\n"
23510 + " addl $64, %3\n"
23511 + " cmpl $63, %0\n"
23512 + " ja 1b\n"
23513 + "35: movl %0, %%eax\n"
23514 + " shrl $2, %0\n"
23515 + " andl $3, %%eax\n"
23516 + " cld\n"
23517 + "99: rep; "__copyuser_seg" movsl\n"
23518 + "36: movl %%eax, %0\n"
23519 + "37: rep; "__copyuser_seg" movsb\n"
23520 + "100:\n"
23521 ".section .fixup,\"ax\"\n"
23522 "101: lea 0(%%eax,%0,4),%0\n"
23523 " jmp 100b\n"
23524 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23525 int d0, d1;
23526 __asm__ __volatile__(
23527 " .align 2,0x90\n"
23528 - "0: movl 32(%4), %%eax\n"
23529 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23530 " cmpl $67, %0\n"
23531 " jbe 2f\n"
23532 - "1: movl 64(%4), %%eax\n"
23533 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23534 " .align 2,0x90\n"
23535 - "2: movl 0(%4), %%eax\n"
23536 - "21: movl 4(%4), %%edx\n"
23537 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23538 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23539 " movl %%eax, 0(%3)\n"
23540 " movl %%edx, 4(%3)\n"
23541 - "3: movl 8(%4), %%eax\n"
23542 - "31: movl 12(%4),%%edx\n"
23543 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23544 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23545 " movl %%eax, 8(%3)\n"
23546 " movl %%edx, 12(%3)\n"
23547 - "4: movl 16(%4), %%eax\n"
23548 - "41: movl 20(%4), %%edx\n"
23549 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23550 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23551 " movl %%eax, 16(%3)\n"
23552 " movl %%edx, 20(%3)\n"
23553 - "10: movl 24(%4), %%eax\n"
23554 - "51: movl 28(%4), %%edx\n"
23555 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23556 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23557 " movl %%eax, 24(%3)\n"
23558 " movl %%edx, 28(%3)\n"
23559 - "11: movl 32(%4), %%eax\n"
23560 - "61: movl 36(%4), %%edx\n"
23561 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23562 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23563 " movl %%eax, 32(%3)\n"
23564 " movl %%edx, 36(%3)\n"
23565 - "12: movl 40(%4), %%eax\n"
23566 - "71: movl 44(%4), %%edx\n"
23567 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23568 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23569 " movl %%eax, 40(%3)\n"
23570 " movl %%edx, 44(%3)\n"
23571 - "13: movl 48(%4), %%eax\n"
23572 - "81: movl 52(%4), %%edx\n"
23573 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23574 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23575 " movl %%eax, 48(%3)\n"
23576 " movl %%edx, 52(%3)\n"
23577 - "14: movl 56(%4), %%eax\n"
23578 - "91: movl 60(%4), %%edx\n"
23579 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23580 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23581 " movl %%eax, 56(%3)\n"
23582 " movl %%edx, 60(%3)\n"
23583 " addl $-64, %0\n"
23584 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23585 " shrl $2, %0\n"
23586 " andl $3, %%eax\n"
23587 " cld\n"
23588 - "6: rep; movsl\n"
23589 + "6: rep; "__copyuser_seg" movsl\n"
23590 " movl %%eax,%0\n"
23591 - "7: rep; movsb\n"
23592 + "7: rep; "__copyuser_seg" movsb\n"
23593 "8:\n"
23594 ".section .fixup,\"ax\"\n"
23595 "9: lea 0(%%eax,%0,4),%0\n"
23596 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23597
23598 __asm__ __volatile__(
23599 " .align 2,0x90\n"
23600 - "0: movl 32(%4), %%eax\n"
23601 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23602 " cmpl $67, %0\n"
23603 " jbe 2f\n"
23604 - "1: movl 64(%4), %%eax\n"
23605 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23606 " .align 2,0x90\n"
23607 - "2: movl 0(%4), %%eax\n"
23608 - "21: movl 4(%4), %%edx\n"
23609 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23610 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23611 " movnti %%eax, 0(%3)\n"
23612 " movnti %%edx, 4(%3)\n"
23613 - "3: movl 8(%4), %%eax\n"
23614 - "31: movl 12(%4),%%edx\n"
23615 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23616 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23617 " movnti %%eax, 8(%3)\n"
23618 " movnti %%edx, 12(%3)\n"
23619 - "4: movl 16(%4), %%eax\n"
23620 - "41: movl 20(%4), %%edx\n"
23621 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23622 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23623 " movnti %%eax, 16(%3)\n"
23624 " movnti %%edx, 20(%3)\n"
23625 - "10: movl 24(%4), %%eax\n"
23626 - "51: movl 28(%4), %%edx\n"
23627 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23628 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23629 " movnti %%eax, 24(%3)\n"
23630 " movnti %%edx, 28(%3)\n"
23631 - "11: movl 32(%4), %%eax\n"
23632 - "61: movl 36(%4), %%edx\n"
23633 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23634 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23635 " movnti %%eax, 32(%3)\n"
23636 " movnti %%edx, 36(%3)\n"
23637 - "12: movl 40(%4), %%eax\n"
23638 - "71: movl 44(%4), %%edx\n"
23639 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23640 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23641 " movnti %%eax, 40(%3)\n"
23642 " movnti %%edx, 44(%3)\n"
23643 - "13: movl 48(%4), %%eax\n"
23644 - "81: movl 52(%4), %%edx\n"
23645 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23646 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23647 " movnti %%eax, 48(%3)\n"
23648 " movnti %%edx, 52(%3)\n"
23649 - "14: movl 56(%4), %%eax\n"
23650 - "91: movl 60(%4), %%edx\n"
23651 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23652 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23653 " movnti %%eax, 56(%3)\n"
23654 " movnti %%edx, 60(%3)\n"
23655 " addl $-64, %0\n"
23656 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23657 " shrl $2, %0\n"
23658 " andl $3, %%eax\n"
23659 " cld\n"
23660 - "6: rep; movsl\n"
23661 + "6: rep; "__copyuser_seg" movsl\n"
23662 " movl %%eax,%0\n"
23663 - "7: rep; movsb\n"
23664 + "7: rep; "__copyuser_seg" movsb\n"
23665 "8:\n"
23666 ".section .fixup,\"ax\"\n"
23667 "9: lea 0(%%eax,%0,4),%0\n"
23668 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
23669
23670 __asm__ __volatile__(
23671 " .align 2,0x90\n"
23672 - "0: movl 32(%4), %%eax\n"
23673 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23674 " cmpl $67, %0\n"
23675 " jbe 2f\n"
23676 - "1: movl 64(%4), %%eax\n"
23677 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23678 " .align 2,0x90\n"
23679 - "2: movl 0(%4), %%eax\n"
23680 - "21: movl 4(%4), %%edx\n"
23681 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23682 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23683 " movnti %%eax, 0(%3)\n"
23684 " movnti %%edx, 4(%3)\n"
23685 - "3: movl 8(%4), %%eax\n"
23686 - "31: movl 12(%4),%%edx\n"
23687 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23688 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23689 " movnti %%eax, 8(%3)\n"
23690 " movnti %%edx, 12(%3)\n"
23691 - "4: movl 16(%4), %%eax\n"
23692 - "41: movl 20(%4), %%edx\n"
23693 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23694 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23695 " movnti %%eax, 16(%3)\n"
23696 " movnti %%edx, 20(%3)\n"
23697 - "10: movl 24(%4), %%eax\n"
23698 - "51: movl 28(%4), %%edx\n"
23699 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23700 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23701 " movnti %%eax, 24(%3)\n"
23702 " movnti %%edx, 28(%3)\n"
23703 - "11: movl 32(%4), %%eax\n"
23704 - "61: movl 36(%4), %%edx\n"
23705 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23706 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23707 " movnti %%eax, 32(%3)\n"
23708 " movnti %%edx, 36(%3)\n"
23709 - "12: movl 40(%4), %%eax\n"
23710 - "71: movl 44(%4), %%edx\n"
23711 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23712 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23713 " movnti %%eax, 40(%3)\n"
23714 " movnti %%edx, 44(%3)\n"
23715 - "13: movl 48(%4), %%eax\n"
23716 - "81: movl 52(%4), %%edx\n"
23717 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23718 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23719 " movnti %%eax, 48(%3)\n"
23720 " movnti %%edx, 52(%3)\n"
23721 - "14: movl 56(%4), %%eax\n"
23722 - "91: movl 60(%4), %%edx\n"
23723 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23724 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23725 " movnti %%eax, 56(%3)\n"
23726 " movnti %%edx, 60(%3)\n"
23727 " addl $-64, %0\n"
23728 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23729 " shrl $2, %0\n"
23730 " andl $3, %%eax\n"
23731 " cld\n"
23732 - "6: rep; movsl\n"
23733 + "6: rep; "__copyuser_seg" movsl\n"
23734 " movl %%eax,%0\n"
23735 - "7: rep; movsb\n"
23736 + "7: rep; "__copyuser_seg" movsb\n"
23737 "8:\n"
23738 ".section .fixup,\"ax\"\n"
23739 "9: lea 0(%%eax,%0,4),%0\n"
23740 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23741 */
23742 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23743 unsigned long size);
23744 -unsigned long __copy_user_intel(void __user *to, const void *from,
23745 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23746 + unsigned long size);
23747 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23748 unsigned long size);
23749 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23750 const void __user *from, unsigned long size);
23751 #endif /* CONFIG_X86_INTEL_USERCOPY */
23752
23753 /* Generic arbitrary sized copy. */
23754 -#define __copy_user(to, from, size) \
23755 +#define __copy_user(to, from, size, prefix, set, restore) \
23756 do { \
23757 int __d0, __d1, __d2; \
23758 __asm__ __volatile__( \
23759 + set \
23760 " cmp $7,%0\n" \
23761 " jbe 1f\n" \
23762 " movl %1,%0\n" \
23763 " negl %0\n" \
23764 " andl $7,%0\n" \
23765 " subl %0,%3\n" \
23766 - "4: rep; movsb\n" \
23767 + "4: rep; "prefix"movsb\n" \
23768 " movl %3,%0\n" \
23769 " shrl $2,%0\n" \
23770 " andl $3,%3\n" \
23771 " .align 2,0x90\n" \
23772 - "0: rep; movsl\n" \
23773 + "0: rep; "prefix"movsl\n" \
23774 " movl %3,%0\n" \
23775 - "1: rep; movsb\n" \
23776 + "1: rep; "prefix"movsb\n" \
23777 "2:\n" \
23778 + restore \
23779 ".section .fixup,\"ax\"\n" \
23780 "5: addl %3,%0\n" \
23781 " jmp 2b\n" \
23782 @@ -682,14 +799,14 @@ do { \
23783 " negl %0\n" \
23784 " andl $7,%0\n" \
23785 " subl %0,%3\n" \
23786 - "4: rep; movsb\n" \
23787 + "4: rep; "__copyuser_seg"movsb\n" \
23788 " movl %3,%0\n" \
23789 " shrl $2,%0\n" \
23790 " andl $3,%3\n" \
23791 " .align 2,0x90\n" \
23792 - "0: rep; movsl\n" \
23793 + "0: rep; "__copyuser_seg"movsl\n" \
23794 " movl %3,%0\n" \
23795 - "1: rep; movsb\n" \
23796 + "1: rep; "__copyuser_seg"movsb\n" \
23797 "2:\n" \
23798 ".section .fixup,\"ax\"\n" \
23799 "5: addl %3,%0\n" \
23800 @@ -775,9 +892,9 @@ survive:
23801 }
23802 #endif
23803 if (movsl_is_ok(to, from, n))
23804 - __copy_user(to, from, n);
23805 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23806 else
23807 - n = __copy_user_intel(to, from, n);
23808 + n = __generic_copy_to_user_intel(to, from, n);
23809 return n;
23810 }
23811 EXPORT_SYMBOL(__copy_to_user_ll);
23812 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23813 unsigned long n)
23814 {
23815 if (movsl_is_ok(to, from, n))
23816 - __copy_user(to, from, n);
23817 + __copy_user(to, from, n, __copyuser_seg, "", "");
23818 else
23819 - n = __copy_user_intel((void __user *)to,
23820 - (const void *)from, n);
23821 + n = __generic_copy_from_user_intel(to, from, n);
23822 return n;
23823 }
23824 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23825 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23826 if (n > 64 && cpu_has_xmm2)
23827 n = __copy_user_intel_nocache(to, from, n);
23828 else
23829 - __copy_user(to, from, n);
23830 + __copy_user(to, from, n, __copyuser_seg, "", "");
23831 #else
23832 - __copy_user(to, from, n);
23833 + __copy_user(to, from, n, __copyuser_seg, "", "");
23834 #endif
23835 return n;
23836 }
23837 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23838
23839 -/**
23840 - * copy_to_user: - Copy a block of data into user space.
23841 - * @to: Destination address, in user space.
23842 - * @from: Source address, in kernel space.
23843 - * @n: Number of bytes to copy.
23844 - *
23845 - * Context: User context only. This function may sleep.
23846 - *
23847 - * Copy data from kernel space to user space.
23848 - *
23849 - * Returns number of bytes that could not be copied.
23850 - * On success, this will be zero.
23851 - */
23852 -unsigned long
23853 -copy_to_user(void __user *to, const void *from, unsigned long n)
23854 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23855 +void __set_fs(mm_segment_t x)
23856 {
23857 - if (access_ok(VERIFY_WRITE, to, n))
23858 - n = __copy_to_user(to, from, n);
23859 - return n;
23860 + switch (x.seg) {
23861 + case 0:
23862 + loadsegment(gs, 0);
23863 + break;
23864 + case TASK_SIZE_MAX:
23865 + loadsegment(gs, __USER_DS);
23866 + break;
23867 + case -1UL:
23868 + loadsegment(gs, __KERNEL_DS);
23869 + break;
23870 + default:
23871 + BUG();
23872 + }
23873 + return;
23874 }
23875 -EXPORT_SYMBOL(copy_to_user);
23876 +EXPORT_SYMBOL(__set_fs);
23877
23878 -/**
23879 - * copy_from_user: - Copy a block of data from user space.
23880 - * @to: Destination address, in kernel space.
23881 - * @from: Source address, in user space.
23882 - * @n: Number of bytes to copy.
23883 - *
23884 - * Context: User context only. This function may sleep.
23885 - *
23886 - * Copy data from user space to kernel space.
23887 - *
23888 - * Returns number of bytes that could not be copied.
23889 - * On success, this will be zero.
23890 - *
23891 - * If some data could not be copied, this function will pad the copied
23892 - * data to the requested size using zero bytes.
23893 - */
23894 -unsigned long
23895 -copy_from_user(void *to, const void __user *from, unsigned long n)
23896 +void set_fs(mm_segment_t x)
23897 {
23898 - if (access_ok(VERIFY_READ, from, n))
23899 - n = __copy_from_user(to, from, n);
23900 - else
23901 - memset(to, 0, n);
23902 - return n;
23903 + current_thread_info()->addr_limit = x;
23904 + __set_fs(x);
23905 }
23906 -EXPORT_SYMBOL(copy_from_user);
23907 +EXPORT_SYMBOL(set_fs);
23908 +#endif
23909 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23910 index b7c2849..8633ad8 100644
23911 --- a/arch/x86/lib/usercopy_64.c
23912 +++ b/arch/x86/lib/usercopy_64.c
23913 @@ -42,6 +42,12 @@ long
23914 __strncpy_from_user(char *dst, const char __user *src, long count)
23915 {
23916 long res;
23917 +
23918 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23919 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23920 + src += PAX_USER_SHADOW_BASE;
23921 +#endif
23922 +
23923 __do_strncpy_from_user(dst, src, count, res);
23924 return res;
23925 }
23926 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23927 {
23928 long __d0;
23929 might_fault();
23930 +
23931 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23932 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23933 + addr += PAX_USER_SHADOW_BASE;
23934 +#endif
23935 +
23936 /* no memory constraint because it doesn't change any memory gcc knows
23937 about */
23938 asm volatile(
23939 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
23940 }
23941 EXPORT_SYMBOL(strlen_user);
23942
23943 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23944 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23945 {
23946 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23947 - return copy_user_generic((__force void *)to, (__force void *)from, len);
23948 - }
23949 - return len;
23950 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23951 +
23952 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23953 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23954 + to += PAX_USER_SHADOW_BASE;
23955 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23956 + from += PAX_USER_SHADOW_BASE;
23957 +#endif
23958 +
23959 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23960 + }
23961 + return len;
23962 }
23963 EXPORT_SYMBOL(copy_in_user);
23964
23965 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23966 * it is not necessary to optimize tail handling.
23967 */
23968 unsigned long
23969 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23970 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23971 {
23972 char c;
23973 unsigned zero_len;
23974 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23975 index 61b41ca..5fef66a 100644
23976 --- a/arch/x86/mm/extable.c
23977 +++ b/arch/x86/mm/extable.c
23978 @@ -1,14 +1,71 @@
23979 #include <linux/module.h>
23980 #include <linux/spinlock.h>
23981 +#include <linux/sort.h>
23982 #include <asm/uaccess.h>
23983 +#include <asm/pgtable.h>
23984
23985 +/*
23986 + * The exception table needs to be sorted so that the binary
23987 + * search that we use to find entries in it works properly.
23988 + * This is used both for the kernel exception table and for
23989 + * the exception tables of modules that get loaded.
23990 + */
23991 +static int cmp_ex(const void *a, const void *b)
23992 +{
23993 + const struct exception_table_entry *x = a, *y = b;
23994 +
23995 + /* avoid overflow */
23996 + if (x->insn > y->insn)
23997 + return 1;
23998 + if (x->insn < y->insn)
23999 + return -1;
24000 + return 0;
24001 +}
24002 +
24003 +static void swap_ex(void *a, void *b, int size)
24004 +{
24005 + struct exception_table_entry t, *x = a, *y = b;
24006 +
24007 + t = *x;
24008 +
24009 + pax_open_kernel();
24010 + *x = *y;
24011 + *y = t;
24012 + pax_close_kernel();
24013 +}
24014 +
24015 +void sort_extable(struct exception_table_entry *start,
24016 + struct exception_table_entry *finish)
24017 +{
24018 + sort(start, finish - start, sizeof(struct exception_table_entry),
24019 + cmp_ex, swap_ex);
24020 +}
24021 +
24022 +#ifdef CONFIG_MODULES
24023 +/*
24024 + * If the exception table is sorted, any referring to the module init
24025 + * will be at the beginning or the end.
24026 + */
24027 +void trim_init_extable(struct module *m)
24028 +{
24029 + /*trim the beginning*/
24030 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
24031 + m->extable++;
24032 + m->num_exentries--;
24033 + }
24034 + /*trim the end*/
24035 + while (m->num_exentries &&
24036 + within_module_init(m->extable[m->num_exentries-1].insn, m))
24037 + m->num_exentries--;
24038 +}
24039 +#endif /* CONFIG_MODULES */
24040
24041 int fixup_exception(struct pt_regs *regs)
24042 {
24043 const struct exception_table_entry *fixup;
24044
24045 #ifdef CONFIG_PNPBIOS
24046 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
24047 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
24048 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
24049 extern u32 pnp_bios_is_utter_crap;
24050 pnp_bios_is_utter_crap = 1;
24051 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
24052 index 8ac0d76..ca501e2 100644
24053 --- a/arch/x86/mm/fault.c
24054 +++ b/arch/x86/mm/fault.c
24055 @@ -11,10 +11,19 @@
24056 #include <linux/kprobes.h> /* __kprobes, ... */
24057 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
24058 #include <linux/perf_event.h> /* perf_sw_event */
24059 +#include <linux/unistd.h>
24060 +#include <linux/compiler.h>
24061
24062 #include <asm/traps.h> /* dotraplinkage, ... */
24063 #include <asm/pgalloc.h> /* pgd_*(), ... */
24064 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
24065 +#include <asm/vsyscall.h>
24066 +#include <asm/tlbflush.h>
24067 +
24068 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24069 +#include <asm/stacktrace.h>
24070 +#include "../kernel/dumpstack.h"
24071 +#endif
24072
24073 /*
24074 * Page fault error code bits:
24075 @@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
24076 int ret = 0;
24077
24078 /* kprobe_running() needs smp_processor_id() */
24079 - if (kprobes_built_in() && !user_mode_vm(regs)) {
24080 + if (kprobes_built_in() && !user_mode(regs)) {
24081 preempt_disable();
24082 if (kprobe_running() && kprobe_fault_handler(regs, 14))
24083 ret = 1;
24084 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
24085 return !instr_lo || (instr_lo>>1) == 1;
24086 case 0x00:
24087 /* Prefetch instruction is 0x0F0D or 0x0F18 */
24088 - if (probe_kernel_address(instr, opcode))
24089 + if (user_mode(regs)) {
24090 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24091 + return 0;
24092 + } else if (probe_kernel_address(instr, opcode))
24093 return 0;
24094
24095 *prefetch = (instr_lo == 0xF) &&
24096 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
24097 while (instr < max_instr) {
24098 unsigned char opcode;
24099
24100 - if (probe_kernel_address(instr, opcode))
24101 + if (user_mode(regs)) {
24102 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24103 + break;
24104 + } else if (probe_kernel_address(instr, opcode))
24105 break;
24106
24107 instr++;
24108 @@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
24109 force_sig_info(si_signo, &info, tsk);
24110 }
24111
24112 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24113 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
24114 +#endif
24115 +
24116 +#ifdef CONFIG_PAX_EMUTRAMP
24117 +static int pax_handle_fetch_fault(struct pt_regs *regs);
24118 +#endif
24119 +
24120 +#ifdef CONFIG_PAX_PAGEEXEC
24121 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
24122 +{
24123 + pgd_t *pgd;
24124 + pud_t *pud;
24125 + pmd_t *pmd;
24126 +
24127 + pgd = pgd_offset(mm, address);
24128 + if (!pgd_present(*pgd))
24129 + return NULL;
24130 + pud = pud_offset(pgd, address);
24131 + if (!pud_present(*pud))
24132 + return NULL;
24133 + pmd = pmd_offset(pud, address);
24134 + if (!pmd_present(*pmd))
24135 + return NULL;
24136 + return pmd;
24137 +}
24138 +#endif
24139 +
24140 DEFINE_SPINLOCK(pgd_lock);
24141 LIST_HEAD(pgd_list);
24142
24143 @@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
24144 address += PMD_SIZE) {
24145
24146 unsigned long flags;
24147 +
24148 +#ifdef CONFIG_PAX_PER_CPU_PGD
24149 + unsigned long cpu;
24150 +#else
24151 struct page *page;
24152 +#endif
24153
24154 spin_lock_irqsave(&pgd_lock, flags);
24155 +
24156 +#ifdef CONFIG_PAX_PER_CPU_PGD
24157 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24158 + pgd_t *pgd = get_cpu_pgd(cpu);
24159 +#else
24160 list_for_each_entry(page, &pgd_list, lru) {
24161 - if (!vmalloc_sync_one(page_address(page), address))
24162 + pgd_t *pgd = page_address(page);
24163 +#endif
24164 +
24165 + if (!vmalloc_sync_one(pgd, address))
24166 break;
24167 }
24168 spin_unlock_irqrestore(&pgd_lock, flags);
24169 @@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
24170 * an interrupt in the middle of a task switch..
24171 */
24172 pgd_paddr = read_cr3();
24173 +
24174 +#ifdef CONFIG_PAX_PER_CPU_PGD
24175 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
24176 +#endif
24177 +
24178 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
24179 if (!pmd_k)
24180 return -1;
24181 @@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
24182
24183 const pgd_t *pgd_ref = pgd_offset_k(address);
24184 unsigned long flags;
24185 +
24186 +#ifdef CONFIG_PAX_PER_CPU_PGD
24187 + unsigned long cpu;
24188 +#else
24189 struct page *page;
24190 +#endif
24191
24192 if (pgd_none(*pgd_ref))
24193 continue;
24194
24195 spin_lock_irqsave(&pgd_lock, flags);
24196 +
24197 +#ifdef CONFIG_PAX_PER_CPU_PGD
24198 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24199 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
24200 +#else
24201 list_for_each_entry(page, &pgd_list, lru) {
24202 pgd_t *pgd;
24203 pgd = (pgd_t *)page_address(page) + pgd_index(address);
24204 +#endif
24205 +
24206 if (pgd_none(*pgd))
24207 set_pgd(pgd, *pgd_ref);
24208 else
24209 @@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
24210 * happen within a race in page table update. In the later
24211 * case just flush:
24212 */
24213 +
24214 +#ifdef CONFIG_PAX_PER_CPU_PGD
24215 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
24216 + pgd = pgd_offset_cpu(smp_processor_id(), address);
24217 +#else
24218 pgd = pgd_offset(current->active_mm, address);
24219 +#endif
24220 +
24221 pgd_ref = pgd_offset_k(address);
24222 if (pgd_none(*pgd_ref))
24223 return -1;
24224 @@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
24225 static int is_errata100(struct pt_regs *regs, unsigned long address)
24226 {
24227 #ifdef CONFIG_X86_64
24228 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
24229 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
24230 return 1;
24231 #endif
24232 return 0;
24233 @@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
24234 }
24235
24236 static const char nx_warning[] = KERN_CRIT
24237 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
24238 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
24239
24240 static void
24241 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24242 @@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24243 if (!oops_may_print())
24244 return;
24245
24246 - if (error_code & PF_INSTR) {
24247 + if (nx_enabled && (error_code & PF_INSTR)) {
24248 unsigned int level;
24249
24250 pte_t *pte = lookup_address(address, &level);
24251
24252 if (pte && pte_present(*pte) && !pte_exec(*pte))
24253 - printk(nx_warning, current_uid());
24254 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
24255 }
24256
24257 +#ifdef CONFIG_PAX_KERNEXEC
24258 + if (init_mm.start_code <= address && address < init_mm.end_code) {
24259 + if (current->signal->curr_ip)
24260 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24261 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
24262 + else
24263 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24264 + current->comm, task_pid_nr(current), current_uid(), current_euid());
24265 + }
24266 +#endif
24267 +
24268 printk(KERN_ALERT "BUG: unable to handle kernel ");
24269 if (address < PAGE_SIZE)
24270 printk(KERN_CONT "NULL pointer dereference");
24271 @@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24272 {
24273 struct task_struct *tsk = current;
24274
24275 +#ifdef CONFIG_X86_64
24276 + struct mm_struct *mm = tsk->mm;
24277 +
24278 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
24279 + if (regs->ip == (unsigned long)vgettimeofday) {
24280 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
24281 + return;
24282 + } else if (regs->ip == (unsigned long)vtime) {
24283 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
24284 + return;
24285 + } else if (regs->ip == (unsigned long)vgetcpu) {
24286 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
24287 + return;
24288 + }
24289 + }
24290 +#endif
24291 +
24292 /* User mode accesses just cause a SIGSEGV */
24293 if (error_code & PF_USER) {
24294 /*
24295 @@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24296 if (is_errata100(regs, address))
24297 return;
24298
24299 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24300 + if (pax_is_fetch_fault(regs, error_code, address)) {
24301 +
24302 +#ifdef CONFIG_PAX_EMUTRAMP
24303 + switch (pax_handle_fetch_fault(regs)) {
24304 + case 2:
24305 + return;
24306 + }
24307 +#endif
24308 +
24309 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24310 + do_group_exit(SIGKILL);
24311 + }
24312 +#endif
24313 +
24314 if (unlikely(show_unhandled_signals))
24315 show_signal_msg(regs, error_code, address, tsk);
24316
24317 @@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
24318 if (fault & VM_FAULT_HWPOISON) {
24319 printk(KERN_ERR
24320 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
24321 - tsk->comm, tsk->pid, address);
24322 + tsk->comm, task_pid_nr(tsk), address);
24323 code = BUS_MCEERR_AR;
24324 }
24325 #endif
24326 @@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
24327 return 1;
24328 }
24329
24330 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24331 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
24332 +{
24333 + pte_t *pte;
24334 + pmd_t *pmd;
24335 + spinlock_t *ptl;
24336 + unsigned char pte_mask;
24337 +
24338 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
24339 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
24340 + return 0;
24341 +
24342 + /* PaX: it's our fault, let's handle it if we can */
24343 +
24344 + /* PaX: take a look at read faults before acquiring any locks */
24345 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
24346 + /* instruction fetch attempt from a protected page in user mode */
24347 + up_read(&mm->mmap_sem);
24348 +
24349 +#ifdef CONFIG_PAX_EMUTRAMP
24350 + switch (pax_handle_fetch_fault(regs)) {
24351 + case 2:
24352 + return 1;
24353 + }
24354 +#endif
24355 +
24356 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24357 + do_group_exit(SIGKILL);
24358 + }
24359 +
24360 + pmd = pax_get_pmd(mm, address);
24361 + if (unlikely(!pmd))
24362 + return 0;
24363 +
24364 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
24365 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
24366 + pte_unmap_unlock(pte, ptl);
24367 + return 0;
24368 + }
24369 +
24370 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
24371 + /* write attempt to a protected page in user mode */
24372 + pte_unmap_unlock(pte, ptl);
24373 + return 0;
24374 + }
24375 +
24376 +#ifdef CONFIG_SMP
24377 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
24378 +#else
24379 + if (likely(address > get_limit(regs->cs)))
24380 +#endif
24381 + {
24382 + set_pte(pte, pte_mkread(*pte));
24383 + __flush_tlb_one(address);
24384 + pte_unmap_unlock(pte, ptl);
24385 + up_read(&mm->mmap_sem);
24386 + return 1;
24387 + }
24388 +
24389 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24390 +
24391 + /*
24392 + * PaX: fill DTLB with user rights and retry
24393 + */
24394 + __asm__ __volatile__ (
24395 + "orb %2,(%1)\n"
24396 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24397 +/*
24398 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24399 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24400 + * page fault when examined during a TLB load attempt. this is true not only
24401 + * for PTEs holding a non-present entry but also present entries that will
24402 + * raise a page fault (such as those set up by PaX, or the copy-on-write
24403 + * mechanism). in effect it means that we do *not* need to flush the TLBs
24404 + * for our target pages since their PTEs are simply not in the TLBs at all.
24405 +
24406 + * the best thing in omitting it is that we gain around 15-20% speed in the
24407 + * fast path of the page fault handler and can get rid of tracing since we
24408 + * can no longer flush unintended entries.
24409 + */
24410 + "invlpg (%0)\n"
24411 +#endif
24412 + __copyuser_seg"testb $0,(%0)\n"
24413 + "xorb %3,(%1)\n"
24414 + :
24415 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
24416 + : "memory", "cc");
24417 + pte_unmap_unlock(pte, ptl);
24418 + up_read(&mm->mmap_sem);
24419 + return 1;
24420 +}
24421 +#endif
24422 +
24423 /*
24424 * Handle a spurious fault caused by a stale TLB entry.
24425 *
24426 @@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
24427 static inline int
24428 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
24429 {
24430 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
24431 + return 1;
24432 +
24433 if (write) {
24434 /* write, present and write, not present: */
24435 if (unlikely(!(vma->vm_flags & VM_WRITE)))
24436 @@ -956,16 +1175,30 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24437 {
24438 struct vm_area_struct *vma;
24439 struct task_struct *tsk;
24440 - unsigned long address;
24441 struct mm_struct *mm;
24442 int write;
24443 int fault;
24444
24445 - tsk = current;
24446 - mm = tsk->mm;
24447 -
24448 /* Get the faulting address: */
24449 - address = read_cr2();
24450 + unsigned long address = read_cr2();
24451 +
24452 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24453 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
24454 + if (!search_exception_tables(regs->ip)) {
24455 + bad_area_nosemaphore(regs, error_code, address);
24456 + return;
24457 + }
24458 + if (address < PAX_USER_SHADOW_BASE) {
24459 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
24460 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
24461 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
24462 + } else
24463 + address -= PAX_USER_SHADOW_BASE;
24464 + }
24465 +#endif
24466 +
24467 + tsk = current;
24468 + mm = tsk->mm;
24469
24470 /*
24471 * Detect and handle instructions that would cause a page fault for
24472 @@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24473 * User-mode registers count as a user access even for any
24474 * potential system fault or CPU buglet:
24475 */
24476 - if (user_mode_vm(regs)) {
24477 + if (user_mode(regs)) {
24478 local_irq_enable();
24479 error_code |= PF_USER;
24480 } else {
24481 @@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24482 might_sleep();
24483 }
24484
24485 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24486 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24487 + return;
24488 +#endif
24489 +
24490 vma = find_vma(mm, address);
24491 if (unlikely(!vma)) {
24492 bad_area(regs, error_code, address);
24493 @@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24494 bad_area(regs, error_code, address);
24495 return;
24496 }
24497 - if (error_code & PF_USER) {
24498 - /*
24499 - * Accessing the stack below %sp is always a bug.
24500 - * The large cushion allows instructions like enter
24501 - * and pusha to work. ("enter $65535, $31" pushes
24502 - * 32 pointers and then decrements %sp by 65535.)
24503 - */
24504 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24505 - bad_area(regs, error_code, address);
24506 - return;
24507 - }
24508 + /*
24509 + * Accessing the stack below %sp is always a bug.
24510 + * The large cushion allows instructions like enter
24511 + * and pusha to work. ("enter $65535, $31" pushes
24512 + * 32 pointers and then decrements %sp by 65535.)
24513 + */
24514 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24515 + bad_area(regs, error_code, address);
24516 + return;
24517 }
24518 +
24519 +#ifdef CONFIG_PAX_SEGMEXEC
24520 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24521 + bad_area(regs, error_code, address);
24522 + return;
24523 + }
24524 +#endif
24525 +
24526 if (unlikely(expand_stack(vma, address))) {
24527 bad_area(regs, error_code, address);
24528 return;
24529 @@ -1146,3 +1390,292 @@ good_area:
24530
24531 up_read(&mm->mmap_sem);
24532 }
24533 +
24534 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24535 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24536 +{
24537 + struct mm_struct *mm = current->mm;
24538 + unsigned long ip = regs->ip;
24539 +
24540 + if (v8086_mode(regs))
24541 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24542 +
24543 +#ifdef CONFIG_PAX_PAGEEXEC
24544 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24545 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24546 + return true;
24547 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24548 + return true;
24549 + return false;
24550 + }
24551 +#endif
24552 +
24553 +#ifdef CONFIG_PAX_SEGMEXEC
24554 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24555 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24556 + return true;
24557 + return false;
24558 + }
24559 +#endif
24560 +
24561 + return false;
24562 +}
24563 +#endif
24564 +
24565 +#ifdef CONFIG_PAX_EMUTRAMP
24566 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24567 +{
24568 + int err;
24569 +
24570 + do { /* PaX: libffi trampoline emulation */
24571 + unsigned char mov, jmp;
24572 + unsigned int addr1, addr2;
24573 +
24574 +#ifdef CONFIG_X86_64
24575 + if ((regs->ip + 9) >> 32)
24576 + break;
24577 +#endif
24578 +
24579 + err = get_user(mov, (unsigned char __user *)regs->ip);
24580 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24581 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24582 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24583 +
24584 + if (err)
24585 + break;
24586 +
24587 + if (mov == 0xB8 && jmp == 0xE9) {
24588 + regs->ax = addr1;
24589 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24590 + return 2;
24591 + }
24592 + } while (0);
24593 +
24594 + do { /* PaX: gcc trampoline emulation #1 */
24595 + unsigned char mov1, mov2;
24596 + unsigned short jmp;
24597 + unsigned int addr1, addr2;
24598 +
24599 +#ifdef CONFIG_X86_64
24600 + if ((regs->ip + 11) >> 32)
24601 + break;
24602 +#endif
24603 +
24604 + err = get_user(mov1, (unsigned char __user *)regs->ip);
24605 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24606 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24607 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24608 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24609 +
24610 + if (err)
24611 + break;
24612 +
24613 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24614 + regs->cx = addr1;
24615 + regs->ax = addr2;
24616 + regs->ip = addr2;
24617 + return 2;
24618 + }
24619 + } while (0);
24620 +
24621 + do { /* PaX: gcc trampoline emulation #2 */
24622 + unsigned char mov, jmp;
24623 + unsigned int addr1, addr2;
24624 +
24625 +#ifdef CONFIG_X86_64
24626 + if ((regs->ip + 9) >> 32)
24627 + break;
24628 +#endif
24629 +
24630 + err = get_user(mov, (unsigned char __user *)regs->ip);
24631 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24632 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24633 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24634 +
24635 + if (err)
24636 + break;
24637 +
24638 + if (mov == 0xB9 && jmp == 0xE9) {
24639 + regs->cx = addr1;
24640 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24641 + return 2;
24642 + }
24643 + } while (0);
24644 +
24645 + return 1; /* PaX in action */
24646 +}
24647 +
24648 +#ifdef CONFIG_X86_64
24649 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24650 +{
24651 + int err;
24652 +
24653 + do { /* PaX: libffi trampoline emulation */
24654 + unsigned short mov1, mov2, jmp1;
24655 + unsigned char stcclc, jmp2;
24656 + unsigned long addr1, addr2;
24657 +
24658 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24659 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24660 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24661 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24662 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24663 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24664 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24665 +
24666 + if (err)
24667 + break;
24668 +
24669 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24670 + regs->r11 = addr1;
24671 + regs->r10 = addr2;
24672 + if (stcclc == 0xF8)
24673 + regs->flags &= ~X86_EFLAGS_CF;
24674 + else
24675 + regs->flags |= X86_EFLAGS_CF;
24676 + regs->ip = addr1;
24677 + return 2;
24678 + }
24679 + } while (0);
24680 +
24681 + do { /* PaX: gcc trampoline emulation #1 */
24682 + unsigned short mov1, mov2, jmp1;
24683 + unsigned char jmp2;
24684 + unsigned int addr1;
24685 + unsigned long addr2;
24686 +
24687 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24688 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24689 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24690 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24691 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24692 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24693 +
24694 + if (err)
24695 + break;
24696 +
24697 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24698 + regs->r11 = addr1;
24699 + regs->r10 = addr2;
24700 + regs->ip = addr1;
24701 + return 2;
24702 + }
24703 + } while (0);
24704 +
24705 + do { /* PaX: gcc trampoline emulation #2 */
24706 + unsigned short mov1, mov2, jmp1;
24707 + unsigned char jmp2;
24708 + unsigned long addr1, addr2;
24709 +
24710 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24711 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24712 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24713 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24714 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24715 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24716 +
24717 + if (err)
24718 + break;
24719 +
24720 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24721 + regs->r11 = addr1;
24722 + regs->r10 = addr2;
24723 + regs->ip = addr1;
24724 + return 2;
24725 + }
24726 + } while (0);
24727 +
24728 + return 1; /* PaX in action */
24729 +}
24730 +#endif
24731 +
24732 +/*
24733 + * PaX: decide what to do with offenders (regs->ip = fault address)
24734 + *
24735 + * returns 1 when task should be killed
24736 + * 2 when gcc trampoline was detected
24737 + */
24738 +static int pax_handle_fetch_fault(struct pt_regs *regs)
24739 +{
24740 + if (v8086_mode(regs))
24741 + return 1;
24742 +
24743 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24744 + return 1;
24745 +
24746 +#ifdef CONFIG_X86_32
24747 + return pax_handle_fetch_fault_32(regs);
24748 +#else
24749 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24750 + return pax_handle_fetch_fault_32(regs);
24751 + else
24752 + return pax_handle_fetch_fault_64(regs);
24753 +#endif
24754 +}
24755 +#endif
24756 +
24757 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24758 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24759 +{
24760 + long i;
24761 +
24762 + printk(KERN_ERR "PAX: bytes at PC: ");
24763 + for (i = 0; i < 20; i++) {
24764 + unsigned char c;
24765 + if (get_user(c, (unsigned char __force_user *)pc+i))
24766 + printk(KERN_CONT "?? ");
24767 + else
24768 + printk(KERN_CONT "%02x ", c);
24769 + }
24770 + printk("\n");
24771 +
24772 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24773 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
24774 + unsigned long c;
24775 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
24776 +#ifdef CONFIG_X86_32
24777 + printk(KERN_CONT "???????? ");
24778 +#else
24779 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24780 + printk(KERN_CONT "???????? ???????? ");
24781 + else
24782 + printk(KERN_CONT "???????????????? ");
24783 +#endif
24784 + } else {
24785 +#ifdef CONFIG_X86_64
24786 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24787 + printk(KERN_CONT "%08x ", (unsigned int)c);
24788 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24789 + } else
24790 +#endif
24791 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24792 + }
24793 + }
24794 + printk("\n");
24795 +}
24796 +#endif
24797 +
24798 +/**
24799 + * probe_kernel_write(): safely attempt to write to a location
24800 + * @dst: address to write to
24801 + * @src: pointer to the data that shall be written
24802 + * @size: size of the data chunk
24803 + *
24804 + * Safely write to address @dst from the buffer at @src. If a kernel fault
24805 + * happens, handle that and return -EFAULT.
24806 + */
24807 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24808 +{
24809 + long ret;
24810 + mm_segment_t old_fs = get_fs();
24811 +
24812 + set_fs(KERNEL_DS);
24813 + pagefault_disable();
24814 + pax_open_kernel();
24815 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24816 + pax_close_kernel();
24817 + pagefault_enable();
24818 + set_fs(old_fs);
24819 +
24820 + return ret ? -EFAULT : 0;
24821 +}
24822 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24823 index 71da1bc..7a16bf4 100644
24824 --- a/arch/x86/mm/gup.c
24825 +++ b/arch/x86/mm/gup.c
24826 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24827 addr = start;
24828 len = (unsigned long) nr_pages << PAGE_SHIFT;
24829 end = start + len;
24830 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24831 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24832 (void __user *)start, len)))
24833 return 0;
24834
24835 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24836 index 63a6ba6..79abd7a 100644
24837 --- a/arch/x86/mm/highmem_32.c
24838 +++ b/arch/x86/mm/highmem_32.c
24839 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
24840 idx = type + KM_TYPE_NR*smp_processor_id();
24841 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24842 BUG_ON(!pte_none(*(kmap_pte-idx)));
24843 +
24844 + pax_open_kernel();
24845 set_pte(kmap_pte-idx, mk_pte(page, prot));
24846 + pax_close_kernel();
24847
24848 return (void *)vaddr;
24849 }
24850 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24851 index f46c340..6ff9a26 100644
24852 --- a/arch/x86/mm/hugetlbpage.c
24853 +++ b/arch/x86/mm/hugetlbpage.c
24854 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24855 struct hstate *h = hstate_file(file);
24856 struct mm_struct *mm = current->mm;
24857 struct vm_area_struct *vma;
24858 - unsigned long start_addr;
24859 + unsigned long start_addr, pax_task_size = TASK_SIZE;
24860 +
24861 +#ifdef CONFIG_PAX_SEGMEXEC
24862 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24863 + pax_task_size = SEGMEXEC_TASK_SIZE;
24864 +#endif
24865 +
24866 + pax_task_size -= PAGE_SIZE;
24867
24868 if (len > mm->cached_hole_size) {
24869 - start_addr = mm->free_area_cache;
24870 + start_addr = mm->free_area_cache;
24871 } else {
24872 - start_addr = TASK_UNMAPPED_BASE;
24873 - mm->cached_hole_size = 0;
24874 + start_addr = mm->mmap_base;
24875 + mm->cached_hole_size = 0;
24876 }
24877
24878 full_search:
24879 @@ -281,26 +288,27 @@ full_search:
24880
24881 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24882 /* At this point: (!vma || addr < vma->vm_end). */
24883 - if (TASK_SIZE - len < addr) {
24884 + if (pax_task_size - len < addr) {
24885 /*
24886 * Start a new search - just in case we missed
24887 * some holes.
24888 */
24889 - if (start_addr != TASK_UNMAPPED_BASE) {
24890 - start_addr = TASK_UNMAPPED_BASE;
24891 + if (start_addr != mm->mmap_base) {
24892 + start_addr = mm->mmap_base;
24893 mm->cached_hole_size = 0;
24894 goto full_search;
24895 }
24896 return -ENOMEM;
24897 }
24898 - if (!vma || addr + len <= vma->vm_start) {
24899 - mm->free_area_cache = addr + len;
24900 - return addr;
24901 - }
24902 + if (check_heap_stack_gap(vma, addr, len))
24903 + break;
24904 if (addr + mm->cached_hole_size < vma->vm_start)
24905 mm->cached_hole_size = vma->vm_start - addr;
24906 addr = ALIGN(vma->vm_end, huge_page_size(h));
24907 }
24908 +
24909 + mm->free_area_cache = addr + len;
24910 + return addr;
24911 }
24912
24913 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24914 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24915 {
24916 struct hstate *h = hstate_file(file);
24917 struct mm_struct *mm = current->mm;
24918 - struct vm_area_struct *vma, *prev_vma;
24919 - unsigned long base = mm->mmap_base, addr = addr0;
24920 + struct vm_area_struct *vma;
24921 + unsigned long base = mm->mmap_base, addr;
24922 unsigned long largest_hole = mm->cached_hole_size;
24923 - int first_time = 1;
24924
24925 /* don't allow allocations above current base */
24926 if (mm->free_area_cache > base)
24927 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24928 largest_hole = 0;
24929 mm->free_area_cache = base;
24930 }
24931 -try_again:
24932 +
24933 /* make sure it can fit in the remaining address space */
24934 if (mm->free_area_cache < len)
24935 goto fail;
24936
24937 /* either no address requested or cant fit in requested address hole */
24938 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
24939 + addr = (mm->free_area_cache - len);
24940 do {
24941 + addr &= huge_page_mask(h);
24942 + vma = find_vma(mm, addr);
24943 /*
24944 * Lookup failure means no vma is above this address,
24945 * i.e. return with success:
24946 - */
24947 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
24948 - return addr;
24949 -
24950 - /*
24951 * new region fits between prev_vma->vm_end and
24952 * vma->vm_start, use it:
24953 */
24954 - if (addr + len <= vma->vm_start &&
24955 - (!prev_vma || (addr >= prev_vma->vm_end))) {
24956 + if (check_heap_stack_gap(vma, addr, len)) {
24957 /* remember the address as a hint for next time */
24958 - mm->cached_hole_size = largest_hole;
24959 - return (mm->free_area_cache = addr);
24960 - } else {
24961 - /* pull free_area_cache down to the first hole */
24962 - if (mm->free_area_cache == vma->vm_end) {
24963 - mm->free_area_cache = vma->vm_start;
24964 - mm->cached_hole_size = largest_hole;
24965 - }
24966 + mm->cached_hole_size = largest_hole;
24967 + return (mm->free_area_cache = addr);
24968 + }
24969 + /* pull free_area_cache down to the first hole */
24970 + if (mm->free_area_cache == vma->vm_end) {
24971 + mm->free_area_cache = vma->vm_start;
24972 + mm->cached_hole_size = largest_hole;
24973 }
24974
24975 /* remember the largest hole we saw so far */
24976 if (addr + largest_hole < vma->vm_start)
24977 - largest_hole = vma->vm_start - addr;
24978 + largest_hole = vma->vm_start - addr;
24979
24980 /* try just below the current vma->vm_start */
24981 - addr = (vma->vm_start - len) & huge_page_mask(h);
24982 - } while (len <= vma->vm_start);
24983 + addr = skip_heap_stack_gap(vma, len);
24984 + } while (!IS_ERR_VALUE(addr));
24985
24986 fail:
24987 /*
24988 - * if hint left us with no space for the requested
24989 - * mapping then try again:
24990 - */
24991 - if (first_time) {
24992 - mm->free_area_cache = base;
24993 - largest_hole = 0;
24994 - first_time = 0;
24995 - goto try_again;
24996 - }
24997 - /*
24998 * A failed mmap() very likely causes application failure,
24999 * so fall back to the bottom-up function here. This scenario
25000 * can happen with large stack limits and large mmap()
25001 * allocations.
25002 */
25003 - mm->free_area_cache = TASK_UNMAPPED_BASE;
25004 +
25005 +#ifdef CONFIG_PAX_SEGMEXEC
25006 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25007 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
25008 + else
25009 +#endif
25010 +
25011 + mm->mmap_base = TASK_UNMAPPED_BASE;
25012 +
25013 +#ifdef CONFIG_PAX_RANDMMAP
25014 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25015 + mm->mmap_base += mm->delta_mmap;
25016 +#endif
25017 +
25018 + mm->free_area_cache = mm->mmap_base;
25019 mm->cached_hole_size = ~0UL;
25020 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
25021 len, pgoff, flags);
25022 @@ -387,6 +393,7 @@ fail:
25023 /*
25024 * Restore the topdown base:
25025 */
25026 + mm->mmap_base = base;
25027 mm->free_area_cache = base;
25028 mm->cached_hole_size = ~0UL;
25029
25030 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25031 struct hstate *h = hstate_file(file);
25032 struct mm_struct *mm = current->mm;
25033 struct vm_area_struct *vma;
25034 + unsigned long pax_task_size = TASK_SIZE;
25035
25036 if (len & ~huge_page_mask(h))
25037 return -EINVAL;
25038 - if (len > TASK_SIZE)
25039 +
25040 +#ifdef CONFIG_PAX_SEGMEXEC
25041 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25042 + pax_task_size = SEGMEXEC_TASK_SIZE;
25043 +#endif
25044 +
25045 + pax_task_size -= PAGE_SIZE;
25046 +
25047 + if (len > pax_task_size)
25048 return -ENOMEM;
25049
25050 if (flags & MAP_FIXED) {
25051 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25052 if (addr) {
25053 addr = ALIGN(addr, huge_page_size(h));
25054 vma = find_vma(mm, addr);
25055 - if (TASK_SIZE - len >= addr &&
25056 - (!vma || addr + len <= vma->vm_start))
25057 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
25058 return addr;
25059 }
25060 if (mm->get_unmapped_area == arch_get_unmapped_area)
25061 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
25062 index 73ffd55..f61c2a7 100644
25063 --- a/arch/x86/mm/init.c
25064 +++ b/arch/x86/mm/init.c
25065 @@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
25066 * cause a hotspot and fill up ZONE_DMA. The page tables
25067 * need roughly 0.5KB per GB.
25068 */
25069 -#ifdef CONFIG_X86_32
25070 - start = 0x7000;
25071 -#else
25072 - start = 0x8000;
25073 -#endif
25074 + start = 0x100000;
25075 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
25076 tables, PAGE_SIZE);
25077 if (e820_table_start == -1UL)
25078 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
25079 #endif
25080
25081 set_nx();
25082 - if (nx_enabled)
25083 + if (nx_enabled && cpu_has_nx)
25084 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
25085
25086 /* Enable PSE if available */
25087 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
25088 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
25089 * mmio resources as well as potential bios/acpi data regions.
25090 */
25091 +
25092 int devmem_is_allowed(unsigned long pagenr)
25093 {
25094 +#ifdef CONFIG_GRKERNSEC_KMEM
25095 + /* allow BDA */
25096 + if (!pagenr)
25097 + return 1;
25098 + /* allow EBDA */
25099 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
25100 + return 1;
25101 + /* allow ISA/video mem */
25102 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25103 + return 1;
25104 + /* throw out everything else below 1MB */
25105 + if (pagenr <= 256)
25106 + return 0;
25107 +#else
25108 if (pagenr <= 256)
25109 return 1;
25110 +#endif
25111 +
25112 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
25113 return 0;
25114 if (!page_is_ram(pagenr))
25115 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
25116
25117 void free_initmem(void)
25118 {
25119 +
25120 +#ifdef CONFIG_PAX_KERNEXEC
25121 +#ifdef CONFIG_X86_32
25122 + /* PaX: limit KERNEL_CS to actual size */
25123 + unsigned long addr, limit;
25124 + struct desc_struct d;
25125 + int cpu;
25126 +
25127 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
25128 + limit = (limit - 1UL) >> PAGE_SHIFT;
25129 +
25130 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
25131 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
25132 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
25133 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
25134 + }
25135 +
25136 + /* PaX: make KERNEL_CS read-only */
25137 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
25138 + if (!paravirt_enabled())
25139 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
25140 +/*
25141 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
25142 + pgd = pgd_offset_k(addr);
25143 + pud = pud_offset(pgd, addr);
25144 + pmd = pmd_offset(pud, addr);
25145 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25146 + }
25147 +*/
25148 +#ifdef CONFIG_X86_PAE
25149 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
25150 +/*
25151 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
25152 + pgd = pgd_offset_k(addr);
25153 + pud = pud_offset(pgd, addr);
25154 + pmd = pmd_offset(pud, addr);
25155 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25156 + }
25157 +*/
25158 +#endif
25159 +
25160 +#ifdef CONFIG_MODULES
25161 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
25162 +#endif
25163 +
25164 +#else
25165 + pgd_t *pgd;
25166 + pud_t *pud;
25167 + pmd_t *pmd;
25168 + unsigned long addr, end;
25169 +
25170 + /* PaX: make kernel code/rodata read-only, rest non-executable */
25171 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
25172 + pgd = pgd_offset_k(addr);
25173 + pud = pud_offset(pgd, addr);
25174 + pmd = pmd_offset(pud, addr);
25175 + if (!pmd_present(*pmd))
25176 + continue;
25177 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
25178 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25179 + else
25180 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25181 + }
25182 +
25183 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
25184 + end = addr + KERNEL_IMAGE_SIZE;
25185 + for (; addr < end; addr += PMD_SIZE) {
25186 + pgd = pgd_offset_k(addr);
25187 + pud = pud_offset(pgd, addr);
25188 + pmd = pmd_offset(pud, addr);
25189 + if (!pmd_present(*pmd))
25190 + continue;
25191 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
25192 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25193 + }
25194 +#endif
25195 +
25196 + flush_tlb_all();
25197 +#endif
25198 +
25199 free_init_pages("unused kernel memory",
25200 (unsigned long)(&__init_begin),
25201 (unsigned long)(&__init_end));
25202 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
25203 index 30938c1..bda3d5d 100644
25204 --- a/arch/x86/mm/init_32.c
25205 +++ b/arch/x86/mm/init_32.c
25206 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
25207 }
25208
25209 /*
25210 - * Creates a middle page table and puts a pointer to it in the
25211 - * given global directory entry. This only returns the gd entry
25212 - * in non-PAE compilation mode, since the middle layer is folded.
25213 - */
25214 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
25215 -{
25216 - pud_t *pud;
25217 - pmd_t *pmd_table;
25218 -
25219 -#ifdef CONFIG_X86_PAE
25220 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
25221 - if (after_bootmem)
25222 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
25223 - else
25224 - pmd_table = (pmd_t *)alloc_low_page();
25225 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
25226 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
25227 - pud = pud_offset(pgd, 0);
25228 - BUG_ON(pmd_table != pmd_offset(pud, 0));
25229 -
25230 - return pmd_table;
25231 - }
25232 -#endif
25233 - pud = pud_offset(pgd, 0);
25234 - pmd_table = pmd_offset(pud, 0);
25235 -
25236 - return pmd_table;
25237 -}
25238 -
25239 -/*
25240 * Create a page table and place a pointer to it in a middle page
25241 * directory entry:
25242 */
25243 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
25244 page_table = (pte_t *)alloc_low_page();
25245
25246 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
25247 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25248 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
25249 +#else
25250 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
25251 +#endif
25252 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
25253 }
25254
25255 return pte_offset_kernel(pmd, 0);
25256 }
25257
25258 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
25259 +{
25260 + pud_t *pud;
25261 + pmd_t *pmd_table;
25262 +
25263 + pud = pud_offset(pgd, 0);
25264 + pmd_table = pmd_offset(pud, 0);
25265 +
25266 + return pmd_table;
25267 +}
25268 +
25269 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
25270 {
25271 int pgd_idx = pgd_index(vaddr);
25272 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25273 int pgd_idx, pmd_idx;
25274 unsigned long vaddr;
25275 pgd_t *pgd;
25276 + pud_t *pud;
25277 pmd_t *pmd;
25278 pte_t *pte = NULL;
25279
25280 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25281 pgd = pgd_base + pgd_idx;
25282
25283 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
25284 - pmd = one_md_table_init(pgd);
25285 - pmd = pmd + pmd_index(vaddr);
25286 + pud = pud_offset(pgd, vaddr);
25287 + pmd = pmd_offset(pud, vaddr);
25288 +
25289 +#ifdef CONFIG_X86_PAE
25290 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25291 +#endif
25292 +
25293 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
25294 pmd++, pmd_idx++) {
25295 pte = page_table_kmap_check(one_page_table_init(pmd),
25296 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25297 }
25298 }
25299
25300 -static inline int is_kernel_text(unsigned long addr)
25301 +static inline int is_kernel_text(unsigned long start, unsigned long end)
25302 {
25303 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
25304 - return 1;
25305 - return 0;
25306 + if ((start > ktla_ktva((unsigned long)_etext) ||
25307 + end <= ktla_ktva((unsigned long)_stext)) &&
25308 + (start > ktla_ktva((unsigned long)_einittext) ||
25309 + end <= ktla_ktva((unsigned long)_sinittext)) &&
25310 +
25311 +#ifdef CONFIG_ACPI_SLEEP
25312 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
25313 +#endif
25314 +
25315 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
25316 + return 0;
25317 + return 1;
25318 }
25319
25320 /*
25321 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
25322 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
25323 unsigned long start_pfn, end_pfn;
25324 pgd_t *pgd_base = swapper_pg_dir;
25325 - int pgd_idx, pmd_idx, pte_ofs;
25326 + unsigned int pgd_idx, pmd_idx, pte_ofs;
25327 unsigned long pfn;
25328 pgd_t *pgd;
25329 + pud_t *pud;
25330 pmd_t *pmd;
25331 pte_t *pte;
25332 unsigned pages_2m, pages_4k;
25333 @@ -278,8 +279,13 @@ repeat:
25334 pfn = start_pfn;
25335 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25336 pgd = pgd_base + pgd_idx;
25337 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
25338 - pmd = one_md_table_init(pgd);
25339 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
25340 + pud = pud_offset(pgd, 0);
25341 + pmd = pmd_offset(pud, 0);
25342 +
25343 +#ifdef CONFIG_X86_PAE
25344 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25345 +#endif
25346
25347 if (pfn >= end_pfn)
25348 continue;
25349 @@ -291,14 +297,13 @@ repeat:
25350 #endif
25351 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
25352 pmd++, pmd_idx++) {
25353 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
25354 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
25355
25356 /*
25357 * Map with big pages if possible, otherwise
25358 * create normal page tables:
25359 */
25360 if (use_pse) {
25361 - unsigned int addr2;
25362 pgprot_t prot = PAGE_KERNEL_LARGE;
25363 /*
25364 * first pass will use the same initial
25365 @@ -308,11 +313,7 @@ repeat:
25366 __pgprot(PTE_IDENT_ATTR |
25367 _PAGE_PSE);
25368
25369 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
25370 - PAGE_OFFSET + PAGE_SIZE-1;
25371 -
25372 - if (is_kernel_text(addr) ||
25373 - is_kernel_text(addr2))
25374 + if (is_kernel_text(address, address + PMD_SIZE))
25375 prot = PAGE_KERNEL_LARGE_EXEC;
25376
25377 pages_2m++;
25378 @@ -329,7 +330,7 @@ repeat:
25379 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25380 pte += pte_ofs;
25381 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
25382 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
25383 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
25384 pgprot_t prot = PAGE_KERNEL;
25385 /*
25386 * first pass will use the same initial
25387 @@ -337,7 +338,7 @@ repeat:
25388 */
25389 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
25390
25391 - if (is_kernel_text(addr))
25392 + if (is_kernel_text(address, address + PAGE_SIZE))
25393 prot = PAGE_KERNEL_EXEC;
25394
25395 pages_4k++;
25396 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
25397
25398 pud = pud_offset(pgd, va);
25399 pmd = pmd_offset(pud, va);
25400 - if (!pmd_present(*pmd))
25401 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
25402 break;
25403
25404 pte = pte_offset_kernel(pmd, va);
25405 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
25406
25407 static void __init pagetable_init(void)
25408 {
25409 - pgd_t *pgd_base = swapper_pg_dir;
25410 -
25411 - permanent_kmaps_init(pgd_base);
25412 + permanent_kmaps_init(swapper_pg_dir);
25413 }
25414
25415 #ifdef CONFIG_ACPI_SLEEP
25416 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
25417 * ACPI suspend needs this for resume, because things like the intel-agp
25418 * driver might have split up a kernel 4MB mapping.
25419 */
25420 -char swsusp_pg_dir[PAGE_SIZE]
25421 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
25422 __attribute__ ((aligned(PAGE_SIZE)));
25423
25424 static inline void save_pg_dir(void)
25425 {
25426 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
25427 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
25428 }
25429 #else /* !CONFIG_ACPI_SLEEP */
25430 static inline void save_pg_dir(void)
25431 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
25432 flush_tlb_all();
25433 }
25434
25435 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25436 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25437 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25438
25439 /* user-defined highmem size */
25440 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
25441 * Initialize the boot-time allocator (with low memory only):
25442 */
25443 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
25444 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25445 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25446 PAGE_SIZE);
25447 if (bootmap == -1L)
25448 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
25449 @@ -864,6 +863,12 @@ void __init mem_init(void)
25450
25451 pci_iommu_alloc();
25452
25453 +#ifdef CONFIG_PAX_PER_CPU_PGD
25454 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25455 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25456 + KERNEL_PGD_PTRS);
25457 +#endif
25458 +
25459 #ifdef CONFIG_FLATMEM
25460 BUG_ON(!mem_map);
25461 #endif
25462 @@ -881,7 +886,7 @@ void __init mem_init(void)
25463 set_highmem_pages_init();
25464
25465 codesize = (unsigned long) &_etext - (unsigned long) &_text;
25466 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
25467 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
25468 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25469
25470 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
25471 @@ -923,10 +928,10 @@ void __init mem_init(void)
25472 ((unsigned long)&__init_end -
25473 (unsigned long)&__init_begin) >> 10,
25474
25475 - (unsigned long)&_etext, (unsigned long)&_edata,
25476 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25477 + (unsigned long)&_sdata, (unsigned long)&_edata,
25478 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25479
25480 - (unsigned long)&_text, (unsigned long)&_etext,
25481 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25482 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25483
25484 /*
25485 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
25486 if (!kernel_set_to_readonly)
25487 return;
25488
25489 + start = ktla_ktva(start);
25490 pr_debug("Set kernel text: %lx - %lx for read write\n",
25491 start, start+size);
25492
25493 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
25494 if (!kernel_set_to_readonly)
25495 return;
25496
25497 + start = ktla_ktva(start);
25498 pr_debug("Set kernel text: %lx - %lx for read only\n",
25499 start, start+size);
25500
25501 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
25502 unsigned long start = PFN_ALIGN(_text);
25503 unsigned long size = PFN_ALIGN(_etext) - start;
25504
25505 + start = ktla_ktva(start);
25506 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25507 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25508 size >> 10);
25509 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25510 index 7d095ad..25d2549 100644
25511 --- a/arch/x86/mm/init_64.c
25512 +++ b/arch/x86/mm/init_64.c
25513 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25514 pmd = fill_pmd(pud, vaddr);
25515 pte = fill_pte(pmd, vaddr);
25516
25517 + pax_open_kernel();
25518 set_pte(pte, new_pte);
25519 + pax_close_kernel();
25520
25521 /*
25522 * It's enough to flush this one mapping.
25523 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25524 pgd = pgd_offset_k((unsigned long)__va(phys));
25525 if (pgd_none(*pgd)) {
25526 pud = (pud_t *) spp_getpage();
25527 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25528 - _PAGE_USER));
25529 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25530 }
25531 pud = pud_offset(pgd, (unsigned long)__va(phys));
25532 if (pud_none(*pud)) {
25533 pmd = (pmd_t *) spp_getpage();
25534 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25535 - _PAGE_USER));
25536 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25537 }
25538 pmd = pmd_offset(pud, phys);
25539 BUG_ON(!pmd_none(*pmd));
25540 @@ -675,6 +675,12 @@ void __init mem_init(void)
25541
25542 pci_iommu_alloc();
25543
25544 +#ifdef CONFIG_PAX_PER_CPU_PGD
25545 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25546 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25547 + KERNEL_PGD_PTRS);
25548 +#endif
25549 +
25550 /* clear_bss() already clear the empty_zero_page */
25551
25552 reservedpages = 0;
25553 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
25554 static struct vm_area_struct gate_vma = {
25555 .vm_start = VSYSCALL_START,
25556 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25557 - .vm_page_prot = PAGE_READONLY_EXEC,
25558 - .vm_flags = VM_READ | VM_EXEC
25559 + .vm_page_prot = PAGE_READONLY,
25560 + .vm_flags = VM_READ
25561 };
25562
25563 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
25564 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
25565
25566 const char *arch_vma_name(struct vm_area_struct *vma)
25567 {
25568 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25569 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25570 return "[vdso]";
25571 if (vma == &gate_vma)
25572 return "[vsyscall]";
25573 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25574 index 84e236c..69bd3f6 100644
25575 --- a/arch/x86/mm/iomap_32.c
25576 +++ b/arch/x86/mm/iomap_32.c
25577 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
25578 debug_kmap_atomic(type);
25579 idx = type + KM_TYPE_NR * smp_processor_id();
25580 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25581 +
25582 + pax_open_kernel();
25583 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25584 + pax_close_kernel();
25585 +
25586 arch_flush_lazy_mmu_mode();
25587
25588 return (void *)vaddr;
25589 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25590 index 2feb9bd..ab91e7b 100644
25591 --- a/arch/x86/mm/ioremap.c
25592 +++ b/arch/x86/mm/ioremap.c
25593 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
25594 * Second special case: Some BIOSen report the PC BIOS
25595 * area (640->1Mb) as ram even though it is not.
25596 */
25597 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
25598 - pagenr < (BIOS_END >> PAGE_SHIFT))
25599 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
25600 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25601 return 0;
25602
25603 for (i = 0; i < e820.nr_map; i++) {
25604 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25605 /*
25606 * Don't allow anybody to remap normal RAM that we're using..
25607 */
25608 - for (pfn = phys_addr >> PAGE_SHIFT;
25609 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
25610 - pfn++) {
25611 -
25612 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
25613 int is_ram = page_is_ram(pfn);
25614
25615 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25616 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25617 return NULL;
25618 WARN_ON_ONCE(is_ram);
25619 }
25620 @@ -378,6 +375,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25621
25622 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25623 if (page_is_ram(start >> PAGE_SHIFT))
25624 +#ifdef CONFIG_HIGHMEM
25625 + if ((start >> PAGE_SHIFT) < max_low_pfn)
25626 +#endif
25627 return __va(phys);
25628
25629 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
25630 @@ -407,7 +407,7 @@ static int __init early_ioremap_debug_setup(char *str)
25631 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25632
25633 static __initdata int after_paging_init;
25634 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25635 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25636
25637 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25638 {
25639 @@ -439,8 +439,7 @@ void __init early_ioremap_init(void)
25640 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25641
25642 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25643 - memset(bm_pte, 0, sizeof(bm_pte));
25644 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
25645 + pmd_populate_user(&init_mm, pmd, bm_pte);
25646
25647 /*
25648 * The boot-ioremap range spans multiple pmds, for which
25649 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25650 index 8cc1833..1abbc5b 100644
25651 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
25652 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25653 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25654 * memory (e.g. tracked pages)? For now, we need this to avoid
25655 * invoking kmemcheck for PnP BIOS calls.
25656 */
25657 - if (regs->flags & X86_VM_MASK)
25658 + if (v8086_mode(regs))
25659 return false;
25660 - if (regs->cs != __KERNEL_CS)
25661 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25662 return false;
25663
25664 pte = kmemcheck_pte_lookup(address);
25665 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25666 index c9e57af..07a321b 100644
25667 --- a/arch/x86/mm/mmap.c
25668 +++ b/arch/x86/mm/mmap.c
25669 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
25670 * Leave an at least ~128 MB hole with possible stack randomization.
25671 */
25672 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25673 -#define MAX_GAP (TASK_SIZE/6*5)
25674 +#define MAX_GAP (pax_task_size/6*5)
25675
25676 /*
25677 * True on X86_32 or when emulating IA32 on X86_64
25678 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
25679 return rnd << PAGE_SHIFT;
25680 }
25681
25682 -static unsigned long mmap_base(void)
25683 +static unsigned long mmap_base(struct mm_struct *mm)
25684 {
25685 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
25686 + unsigned long pax_task_size = TASK_SIZE;
25687 +
25688 +#ifdef CONFIG_PAX_SEGMEXEC
25689 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25690 + pax_task_size = SEGMEXEC_TASK_SIZE;
25691 +#endif
25692
25693 if (gap < MIN_GAP)
25694 gap = MIN_GAP;
25695 else if (gap > MAX_GAP)
25696 gap = MAX_GAP;
25697
25698 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25699 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25700 }
25701
25702 /*
25703 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25704 * does, but not when emulating X86_32
25705 */
25706 -static unsigned long mmap_legacy_base(void)
25707 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
25708 {
25709 - if (mmap_is_ia32())
25710 + if (mmap_is_ia32()) {
25711 +
25712 +#ifdef CONFIG_PAX_SEGMEXEC
25713 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25714 + return SEGMEXEC_TASK_UNMAPPED_BASE;
25715 + else
25716 +#endif
25717 +
25718 return TASK_UNMAPPED_BASE;
25719 - else
25720 + } else
25721 return TASK_UNMAPPED_BASE + mmap_rnd();
25722 }
25723
25724 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
25725 void arch_pick_mmap_layout(struct mm_struct *mm)
25726 {
25727 if (mmap_is_legacy()) {
25728 - mm->mmap_base = mmap_legacy_base();
25729 + mm->mmap_base = mmap_legacy_base(mm);
25730 +
25731 +#ifdef CONFIG_PAX_RANDMMAP
25732 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25733 + mm->mmap_base += mm->delta_mmap;
25734 +#endif
25735 +
25736 mm->get_unmapped_area = arch_get_unmapped_area;
25737 mm->unmap_area = arch_unmap_area;
25738 } else {
25739 - mm->mmap_base = mmap_base();
25740 + mm->mmap_base = mmap_base(mm);
25741 +
25742 +#ifdef CONFIG_PAX_RANDMMAP
25743 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25744 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25745 +#endif
25746 +
25747 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25748 mm->unmap_area = arch_unmap_area_topdown;
25749 }
25750 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25751 index 132772a..b961f11 100644
25752 --- a/arch/x86/mm/mmio-mod.c
25753 +++ b/arch/x86/mm/mmio-mod.c
25754 @@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25755 break;
25756 default:
25757 {
25758 - unsigned char *ip = (unsigned char *)instptr;
25759 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25760 my_trace->opcode = MMIO_UNKNOWN_OP;
25761 my_trace->width = 0;
25762 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25763 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25764 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25765 void __iomem *addr)
25766 {
25767 - static atomic_t next_id;
25768 + static atomic_unchecked_t next_id;
25769 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25770 /* These are page-unaligned. */
25771 struct mmiotrace_map map = {
25772 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25773 .private = trace
25774 },
25775 .phys = offset,
25776 - .id = atomic_inc_return(&next_id)
25777 + .id = atomic_inc_return_unchecked(&next_id)
25778 };
25779 map.map_id = trace->id;
25780
25781 diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
25782 index d253006..e56dd6a 100644
25783 --- a/arch/x86/mm/numa_32.c
25784 +++ b/arch/x86/mm/numa_32.c
25785 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
25786 }
25787 #endif
25788
25789 -extern unsigned long find_max_low_pfn(void);
25790 extern unsigned long highend_pfn, highstart_pfn;
25791
25792 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
25793 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25794 index e1d1069..2251ff3 100644
25795 --- a/arch/x86/mm/pageattr-test.c
25796 +++ b/arch/x86/mm/pageattr-test.c
25797 @@ -36,7 +36,7 @@ enum {
25798
25799 static int pte_testbit(pte_t pte)
25800 {
25801 - return pte_flags(pte) & _PAGE_UNUSED1;
25802 + return pte_flags(pte) & _PAGE_CPA_TEST;
25803 }
25804
25805 struct split_state {
25806 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25807 index dd38bfb..b72c63e 100644
25808 --- a/arch/x86/mm/pageattr.c
25809 +++ b/arch/x86/mm/pageattr.c
25810 @@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25811 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
25812 */
25813 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25814 - pgprot_val(forbidden) |= _PAGE_NX;
25815 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25816
25817 /*
25818 * The kernel text needs to be executable for obvious reasons
25819 * Does not cover __inittext since that is gone later on. On
25820 * 64bit we do not enforce !NX on the low mapping
25821 */
25822 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
25823 - pgprot_val(forbidden) |= _PAGE_NX;
25824 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25825 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25826
25827 +#ifdef CONFIG_DEBUG_RODATA
25828 /*
25829 * The .rodata section needs to be read-only. Using the pfn
25830 * catches all aliases.
25831 @@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25832 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25833 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25834 pgprot_val(forbidden) |= _PAGE_RW;
25835 +#endif
25836 +
25837 +#ifdef CONFIG_PAX_KERNEXEC
25838 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25839 + pgprot_val(forbidden) |= _PAGE_RW;
25840 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25841 + }
25842 +#endif
25843
25844 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25845
25846 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25847 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25848 {
25849 /* change init_mm */
25850 + pax_open_kernel();
25851 set_pte_atomic(kpte, pte);
25852 +
25853 #ifdef CONFIG_X86_32
25854 if (!SHARED_KERNEL_PMD) {
25855 +
25856 +#ifdef CONFIG_PAX_PER_CPU_PGD
25857 + unsigned long cpu;
25858 +#else
25859 struct page *page;
25860 +#endif
25861
25862 +#ifdef CONFIG_PAX_PER_CPU_PGD
25863 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25864 + pgd_t *pgd = get_cpu_pgd(cpu);
25865 +#else
25866 list_for_each_entry(page, &pgd_list, lru) {
25867 - pgd_t *pgd;
25868 + pgd_t *pgd = (pgd_t *)page_address(page);
25869 +#endif
25870 +
25871 pud_t *pud;
25872 pmd_t *pmd;
25873
25874 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
25875 + pgd += pgd_index(address);
25876 pud = pud_offset(pgd, address);
25877 pmd = pmd_offset(pud, address);
25878 set_pte_atomic((pte_t *)pmd, pte);
25879 }
25880 }
25881 #endif
25882 + pax_close_kernel();
25883 }
25884
25885 static int
25886 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25887 index e78cd0e..de0a817 100644
25888 --- a/arch/x86/mm/pat.c
25889 +++ b/arch/x86/mm/pat.c
25890 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
25891
25892 conflict:
25893 printk(KERN_INFO "%s:%d conflicting memory types "
25894 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
25895 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
25896 new->end, cattr_name(new->type), cattr_name(entry->type));
25897 return -EBUSY;
25898 }
25899 @@ -559,7 +559,7 @@ unlock_ret:
25900
25901 if (err) {
25902 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25903 - current->comm, current->pid, start, end);
25904 + current->comm, task_pid_nr(current), start, end);
25905 }
25906
25907 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
25908 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25909 while (cursor < to) {
25910 if (!devmem_is_allowed(pfn)) {
25911 printk(KERN_INFO
25912 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25913 - current->comm, from, to);
25914 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25915 + current->comm, from, to, cursor);
25916 return 0;
25917 }
25918 cursor += PAGE_SIZE;
25919 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25920 printk(KERN_INFO
25921 "%s:%d ioremap_change_attr failed %s "
25922 "for %Lx-%Lx\n",
25923 - current->comm, current->pid,
25924 + current->comm, task_pid_nr(current),
25925 cattr_name(flags),
25926 base, (unsigned long long)(base + size));
25927 return -EINVAL;
25928 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25929 free_memtype(paddr, paddr + size);
25930 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25931 " for %Lx-%Lx, got %s\n",
25932 - current->comm, current->pid,
25933 + current->comm, task_pid_nr(current),
25934 cattr_name(want_flags),
25935 (unsigned long long)paddr,
25936 (unsigned long long)(paddr + size),
25937 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25938 index df3d5c8..c2223e1 100644
25939 --- a/arch/x86/mm/pf_in.c
25940 +++ b/arch/x86/mm/pf_in.c
25941 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25942 int i;
25943 enum reason_type rv = OTHERS;
25944
25945 - p = (unsigned char *)ins_addr;
25946 + p = (unsigned char *)ktla_ktva(ins_addr);
25947 p += skip_prefix(p, &prf);
25948 p += get_opcode(p, &opcode);
25949
25950 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25951 struct prefix_bits prf;
25952 int i;
25953
25954 - p = (unsigned char *)ins_addr;
25955 + p = (unsigned char *)ktla_ktva(ins_addr);
25956 p += skip_prefix(p, &prf);
25957 p += get_opcode(p, &opcode);
25958
25959 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25960 struct prefix_bits prf;
25961 int i;
25962
25963 - p = (unsigned char *)ins_addr;
25964 + p = (unsigned char *)ktla_ktva(ins_addr);
25965 p += skip_prefix(p, &prf);
25966 p += get_opcode(p, &opcode);
25967
25968 @@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25969 int i;
25970 unsigned long rv;
25971
25972 - p = (unsigned char *)ins_addr;
25973 + p = (unsigned char *)ktla_ktva(ins_addr);
25974 p += skip_prefix(p, &prf);
25975 p += get_opcode(p, &opcode);
25976 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25977 @@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25978 int i;
25979 unsigned long rv;
25980
25981 - p = (unsigned char *)ins_addr;
25982 + p = (unsigned char *)ktla_ktva(ins_addr);
25983 p += skip_prefix(p, &prf);
25984 p += get_opcode(p, &opcode);
25985 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25986 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25987 index e0e6fad..c56b495 100644
25988 --- a/arch/x86/mm/pgtable.c
25989 +++ b/arch/x86/mm/pgtable.c
25990 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
25991 list_del(&page->lru);
25992 }
25993
25994 -#define UNSHARED_PTRS_PER_PGD \
25995 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25996 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25997 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25998
25999 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
26000 +{
26001 + while (count--)
26002 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
26003 +}
26004 +#endif
26005 +
26006 +#ifdef CONFIG_PAX_PER_CPU_PGD
26007 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
26008 +{
26009 + while (count--)
26010 +
26011 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26012 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
26013 +#else
26014 + *dst++ = *src++;
26015 +#endif
26016 +
26017 +}
26018 +#endif
26019 +
26020 +#ifdef CONFIG_X86_64
26021 +#define pxd_t pud_t
26022 +#define pyd_t pgd_t
26023 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
26024 +#define pxd_free(mm, pud) pud_free((mm), (pud))
26025 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
26026 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
26027 +#define PYD_SIZE PGDIR_SIZE
26028 +#else
26029 +#define pxd_t pmd_t
26030 +#define pyd_t pud_t
26031 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
26032 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
26033 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
26034 +#define pyd_offset(mm, address) pud_offset((mm), (address))
26035 +#define PYD_SIZE PUD_SIZE
26036 +#endif
26037 +
26038 +#ifdef CONFIG_PAX_PER_CPU_PGD
26039 +static inline void pgd_ctor(pgd_t *pgd) {}
26040 +static inline void pgd_dtor(pgd_t *pgd) {}
26041 +#else
26042 static void pgd_ctor(pgd_t *pgd)
26043 {
26044 /* If the pgd points to a shared pagetable level (either the
26045 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
26046 pgd_list_del(pgd);
26047 spin_unlock_irqrestore(&pgd_lock, flags);
26048 }
26049 +#endif
26050
26051 /*
26052 * List of all pgd's needed for non-PAE so it can invalidate entries
26053 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
26054 * -- wli
26055 */
26056
26057 -#ifdef CONFIG_X86_PAE
26058 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26059 /*
26060 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
26061 * updating the top-level pagetable entries to guarantee the
26062 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
26063 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
26064 * and initialize the kernel pmds here.
26065 */
26066 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
26067 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
26068
26069 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26070 {
26071 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26072 */
26073 flush_tlb_mm(mm);
26074 }
26075 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
26076 +#define PREALLOCATED_PXDS USER_PGD_PTRS
26077 #else /* !CONFIG_X86_PAE */
26078
26079 /* No need to prepopulate any pagetable entries in non-PAE modes. */
26080 -#define PREALLOCATED_PMDS 0
26081 +#define PREALLOCATED_PXDS 0
26082
26083 #endif /* CONFIG_X86_PAE */
26084
26085 -static void free_pmds(pmd_t *pmds[])
26086 +static void free_pxds(pxd_t *pxds[])
26087 {
26088 int i;
26089
26090 - for(i = 0; i < PREALLOCATED_PMDS; i++)
26091 - if (pmds[i])
26092 - free_page((unsigned long)pmds[i]);
26093 + for(i = 0; i < PREALLOCATED_PXDS; i++)
26094 + if (pxds[i])
26095 + free_page((unsigned long)pxds[i]);
26096 }
26097
26098 -static int preallocate_pmds(pmd_t *pmds[])
26099 +static int preallocate_pxds(pxd_t *pxds[])
26100 {
26101 int i;
26102 bool failed = false;
26103
26104 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
26105 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
26106 - if (pmd == NULL)
26107 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
26108 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
26109 + if (pxd == NULL)
26110 failed = true;
26111 - pmds[i] = pmd;
26112 + pxds[i] = pxd;
26113 }
26114
26115 if (failed) {
26116 - free_pmds(pmds);
26117 + free_pxds(pxds);
26118 return -ENOMEM;
26119 }
26120
26121 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
26122 * preallocate which never got a corresponding vma will need to be
26123 * freed manually.
26124 */
26125 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
26126 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
26127 {
26128 int i;
26129
26130 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
26131 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
26132 pgd_t pgd = pgdp[i];
26133
26134 if (pgd_val(pgd) != 0) {
26135 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
26136 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
26137
26138 - pgdp[i] = native_make_pgd(0);
26139 + set_pgd(pgdp + i, native_make_pgd(0));
26140
26141 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
26142 - pmd_free(mm, pmd);
26143 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
26144 + pxd_free(mm, pxd);
26145 }
26146 }
26147 }
26148
26149 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
26150 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
26151 {
26152 - pud_t *pud;
26153 + pyd_t *pyd;
26154 unsigned long addr;
26155 int i;
26156
26157 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
26158 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
26159 return;
26160
26161 - pud = pud_offset(pgd, 0);
26162 +#ifdef CONFIG_X86_64
26163 + pyd = pyd_offset(mm, 0L);
26164 +#else
26165 + pyd = pyd_offset(pgd, 0L);
26166 +#endif
26167
26168 - for (addr = i = 0; i < PREALLOCATED_PMDS;
26169 - i++, pud++, addr += PUD_SIZE) {
26170 - pmd_t *pmd = pmds[i];
26171 + for (addr = i = 0; i < PREALLOCATED_PXDS;
26172 + i++, pyd++, addr += PYD_SIZE) {
26173 + pxd_t *pxd = pxds[i];
26174
26175 if (i >= KERNEL_PGD_BOUNDARY)
26176 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26177 - sizeof(pmd_t) * PTRS_PER_PMD);
26178 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26179 + sizeof(pxd_t) * PTRS_PER_PMD);
26180
26181 - pud_populate(mm, pud, pmd);
26182 + pyd_populate(mm, pyd, pxd);
26183 }
26184 }
26185
26186 pgd_t *pgd_alloc(struct mm_struct *mm)
26187 {
26188 pgd_t *pgd;
26189 - pmd_t *pmds[PREALLOCATED_PMDS];
26190 + pxd_t *pxds[PREALLOCATED_PXDS];
26191 +
26192 unsigned long flags;
26193
26194 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
26195 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
26196
26197 mm->pgd = pgd;
26198
26199 - if (preallocate_pmds(pmds) != 0)
26200 + if (preallocate_pxds(pxds) != 0)
26201 goto out_free_pgd;
26202
26203 if (paravirt_pgd_alloc(mm) != 0)
26204 - goto out_free_pmds;
26205 + goto out_free_pxds;
26206
26207 /*
26208 * Make sure that pre-populating the pmds is atomic with
26209 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
26210 spin_lock_irqsave(&pgd_lock, flags);
26211
26212 pgd_ctor(pgd);
26213 - pgd_prepopulate_pmd(mm, pgd, pmds);
26214 + pgd_prepopulate_pxd(mm, pgd, pxds);
26215
26216 spin_unlock_irqrestore(&pgd_lock, flags);
26217
26218 return pgd;
26219
26220 -out_free_pmds:
26221 - free_pmds(pmds);
26222 +out_free_pxds:
26223 + free_pxds(pxds);
26224 out_free_pgd:
26225 free_page((unsigned long)pgd);
26226 out:
26227 @@ -287,7 +338,7 @@ out:
26228
26229 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
26230 {
26231 - pgd_mop_up_pmds(mm, pgd);
26232 + pgd_mop_up_pxds(mm, pgd);
26233 pgd_dtor(pgd);
26234 paravirt_pgd_free(mm, pgd);
26235 free_page((unsigned long)pgd);
26236 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
26237 index 46c8834..fcab43d 100644
26238 --- a/arch/x86/mm/pgtable_32.c
26239 +++ b/arch/x86/mm/pgtable_32.c
26240 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
26241 return;
26242 }
26243 pte = pte_offset_kernel(pmd, vaddr);
26244 +
26245 + pax_open_kernel();
26246 if (pte_val(pteval))
26247 set_pte_at(&init_mm, vaddr, pte, pteval);
26248 else
26249 pte_clear(&init_mm, vaddr, pte);
26250 + pax_close_kernel();
26251
26252 /*
26253 * It's enough to flush this one mapping.
26254 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
26255 index 513d8ed..978c161 100644
26256 --- a/arch/x86/mm/setup_nx.c
26257 +++ b/arch/x86/mm/setup_nx.c
26258 @@ -4,11 +4,10 @@
26259
26260 #include <asm/pgtable.h>
26261
26262 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26263 int nx_enabled;
26264
26265 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26266 -static int disable_nx __cpuinitdata;
26267 -
26268 +#ifndef CONFIG_PAX_PAGEEXEC
26269 /*
26270 * noexec = on|off
26271 *
26272 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
26273 if (!str)
26274 return -EINVAL;
26275 if (!strncmp(str, "on", 2)) {
26276 - __supported_pte_mask |= _PAGE_NX;
26277 - disable_nx = 0;
26278 + nx_enabled = 1;
26279 } else if (!strncmp(str, "off", 3)) {
26280 - disable_nx = 1;
26281 - __supported_pte_mask &= ~_PAGE_NX;
26282 + nx_enabled = 0;
26283 }
26284 return 0;
26285 }
26286 early_param("noexec", noexec_setup);
26287 #endif
26288 +#endif
26289
26290 #ifdef CONFIG_X86_PAE
26291 void __init set_nx(void)
26292 {
26293 - unsigned int v[4], l, h;
26294 + if (!nx_enabled && cpu_has_nx) {
26295 + unsigned l, h;
26296
26297 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
26298 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
26299 -
26300 - if ((v[3] & (1 << 20)) && !disable_nx) {
26301 - rdmsr(MSR_EFER, l, h);
26302 - l |= EFER_NX;
26303 - wrmsr(MSR_EFER, l, h);
26304 - nx_enabled = 1;
26305 - __supported_pte_mask |= _PAGE_NX;
26306 - }
26307 + __supported_pte_mask &= ~_PAGE_NX;
26308 + rdmsr(MSR_EFER, l, h);
26309 + l &= ~EFER_NX;
26310 + wrmsr(MSR_EFER, l, h);
26311 }
26312 }
26313 #else
26314 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
26315 unsigned long efer;
26316
26317 rdmsrl(MSR_EFER, efer);
26318 - if (!(efer & EFER_NX) || disable_nx)
26319 + if (!(efer & EFER_NX) || !nx_enabled)
26320 __supported_pte_mask &= ~_PAGE_NX;
26321 }
26322 #endif
26323 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
26324 index 36fe08e..b123d3a 100644
26325 --- a/arch/x86/mm/tlb.c
26326 +++ b/arch/x86/mm/tlb.c
26327 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
26328 BUG();
26329 cpumask_clear_cpu(cpu,
26330 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
26331 +
26332 +#ifndef CONFIG_PAX_PER_CPU_PGD
26333 load_cr3(swapper_pg_dir);
26334 +#endif
26335 +
26336 }
26337 EXPORT_SYMBOL_GPL(leave_mm);
26338
26339 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26340 index 829edf0..672adb3 100644
26341 --- a/arch/x86/oprofile/backtrace.c
26342 +++ b/arch/x86/oprofile/backtrace.c
26343 @@ -115,7 +115,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26344 {
26345 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
26346
26347 - if (!user_mode_vm(regs)) {
26348 + if (!user_mode(regs)) {
26349 unsigned long stack = kernel_stack_pointer(regs);
26350 if (depth)
26351 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26352 diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
26353 index e6a160a..36deff6 100644
26354 --- a/arch/x86/oprofile/op_model_p4.c
26355 +++ b/arch/x86/oprofile/op_model_p4.c
26356 @@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
26357 #endif
26358 }
26359
26360 -static int inline addr_increment(void)
26361 +static inline int addr_increment(void)
26362 {
26363 #ifdef CONFIG_SMP
26364 return smp_num_siblings == 2 ? 2 : 1;
26365 diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
26366 index 1331fcf..03901b2 100644
26367 --- a/arch/x86/pci/common.c
26368 +++ b/arch/x86/pci/common.c
26369 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
26370 int pcibios_last_bus = -1;
26371 unsigned long pirq_table_addr;
26372 struct pci_bus *pci_root_bus;
26373 -struct pci_raw_ops *raw_pci_ops;
26374 -struct pci_raw_ops *raw_pci_ext_ops;
26375 +const struct pci_raw_ops *raw_pci_ops;
26376 +const struct pci_raw_ops *raw_pci_ext_ops;
26377
26378 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
26379 int reg, int len, u32 *val)
26380 diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
26381 index 347d882..4baf6b6 100644
26382 --- a/arch/x86/pci/direct.c
26383 +++ b/arch/x86/pci/direct.c
26384 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
26385
26386 #undef PCI_CONF1_ADDRESS
26387
26388 -struct pci_raw_ops pci_direct_conf1 = {
26389 +const struct pci_raw_ops pci_direct_conf1 = {
26390 .read = pci_conf1_read,
26391 .write = pci_conf1_write,
26392 };
26393 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
26394
26395 #undef PCI_CONF2_ADDRESS
26396
26397 -struct pci_raw_ops pci_direct_conf2 = {
26398 +const struct pci_raw_ops pci_direct_conf2 = {
26399 .read = pci_conf2_read,
26400 .write = pci_conf2_write,
26401 };
26402 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
26403 * This should be close to trivial, but it isn't, because there are buggy
26404 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
26405 */
26406 -static int __init pci_sanity_check(struct pci_raw_ops *o)
26407 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
26408 {
26409 u32 x = 0;
26410 int year, devfn;
26411 diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
26412 index f10a7e9..0425342 100644
26413 --- a/arch/x86/pci/mmconfig_32.c
26414 +++ b/arch/x86/pci/mmconfig_32.c
26415 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26416 return 0;
26417 }
26418
26419 -static struct pci_raw_ops pci_mmcfg = {
26420 +static const struct pci_raw_ops pci_mmcfg = {
26421 .read = pci_mmcfg_read,
26422 .write = pci_mmcfg_write,
26423 };
26424 diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
26425 index 94349f8..41600a7 100644
26426 --- a/arch/x86/pci/mmconfig_64.c
26427 +++ b/arch/x86/pci/mmconfig_64.c
26428 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26429 return 0;
26430 }
26431
26432 -static struct pci_raw_ops pci_mmcfg = {
26433 +static const struct pci_raw_ops pci_mmcfg = {
26434 .read = pci_mmcfg_read,
26435 .write = pci_mmcfg_write,
26436 };
26437 diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
26438 index 8eb295e..86bd657 100644
26439 --- a/arch/x86/pci/numaq_32.c
26440 +++ b/arch/x86/pci/numaq_32.c
26441 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
26442
26443 #undef PCI_CONF1_MQ_ADDRESS
26444
26445 -static struct pci_raw_ops pci_direct_conf1_mq = {
26446 +static const struct pci_raw_ops pci_direct_conf1_mq = {
26447 .read = pci_conf1_mq_read,
26448 .write = pci_conf1_mq_write
26449 };
26450 diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
26451 index b889d82..5a58a0a 100644
26452 --- a/arch/x86/pci/olpc.c
26453 +++ b/arch/x86/pci/olpc.c
26454 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
26455 return 0;
26456 }
26457
26458 -static struct pci_raw_ops pci_olpc_conf = {
26459 +static const struct pci_raw_ops pci_olpc_conf = {
26460 .read = pci_olpc_read,
26461 .write = pci_olpc_write,
26462 };
26463 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26464 index 1c975cc..b8e16c2 100644
26465 --- a/arch/x86/pci/pcbios.c
26466 +++ b/arch/x86/pci/pcbios.c
26467 @@ -56,50 +56,93 @@ union bios32 {
26468 static struct {
26469 unsigned long address;
26470 unsigned short segment;
26471 -} bios32_indirect = { 0, __KERNEL_CS };
26472 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26473
26474 /*
26475 * Returns the entry point for the given service, NULL on error
26476 */
26477
26478 -static unsigned long bios32_service(unsigned long service)
26479 +static unsigned long __devinit bios32_service(unsigned long service)
26480 {
26481 unsigned char return_code; /* %al */
26482 unsigned long address; /* %ebx */
26483 unsigned long length; /* %ecx */
26484 unsigned long entry; /* %edx */
26485 unsigned long flags;
26486 + struct desc_struct d, *gdt;
26487
26488 local_irq_save(flags);
26489 - __asm__("lcall *(%%edi); cld"
26490 +
26491 + gdt = get_cpu_gdt_table(smp_processor_id());
26492 +
26493 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26494 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26495 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26496 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26497 +
26498 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26499 : "=a" (return_code),
26500 "=b" (address),
26501 "=c" (length),
26502 "=d" (entry)
26503 : "0" (service),
26504 "1" (0),
26505 - "D" (&bios32_indirect));
26506 + "D" (&bios32_indirect),
26507 + "r"(__PCIBIOS_DS)
26508 + : "memory");
26509 +
26510 + pax_open_kernel();
26511 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26512 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26513 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26514 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26515 + pax_close_kernel();
26516 +
26517 local_irq_restore(flags);
26518
26519 switch (return_code) {
26520 - case 0:
26521 - return address + entry;
26522 - case 0x80: /* Not present */
26523 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26524 - return 0;
26525 - default: /* Shouldn't happen */
26526 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26527 - service, return_code);
26528 + case 0: {
26529 + int cpu;
26530 + unsigned char flags;
26531 +
26532 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26533 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26534 + printk(KERN_WARNING "bios32_service: not valid\n");
26535 return 0;
26536 + }
26537 + address = address + PAGE_OFFSET;
26538 + length += 16UL; /* some BIOSs underreport this... */
26539 + flags = 4;
26540 + if (length >= 64*1024*1024) {
26541 + length >>= PAGE_SHIFT;
26542 + flags |= 8;
26543 + }
26544 +
26545 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26546 + gdt = get_cpu_gdt_table(cpu);
26547 + pack_descriptor(&d, address, length, 0x9b, flags);
26548 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26549 + pack_descriptor(&d, address, length, 0x93, flags);
26550 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26551 + }
26552 + return entry;
26553 + }
26554 + case 0x80: /* Not present */
26555 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26556 + return 0;
26557 + default: /* Shouldn't happen */
26558 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26559 + service, return_code);
26560 + return 0;
26561 }
26562 }
26563
26564 static struct {
26565 unsigned long address;
26566 unsigned short segment;
26567 -} pci_indirect = { 0, __KERNEL_CS };
26568 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26569
26570 -static int pci_bios_present;
26571 +static int pci_bios_present __read_only;
26572
26573 static int __devinit check_pcibios(void)
26574 {
26575 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
26576 unsigned long flags, pcibios_entry;
26577
26578 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26579 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26580 + pci_indirect.address = pcibios_entry;
26581
26582 local_irq_save(flags);
26583 - __asm__(
26584 - "lcall *(%%edi); cld\n\t"
26585 + __asm__("movw %w6, %%ds\n\t"
26586 + "lcall *%%ss:(%%edi); cld\n\t"
26587 + "push %%ss\n\t"
26588 + "pop %%ds\n\t"
26589 "jc 1f\n\t"
26590 "xor %%ah, %%ah\n"
26591 "1:"
26592 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
26593 "=b" (ebx),
26594 "=c" (ecx)
26595 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26596 - "D" (&pci_indirect)
26597 + "D" (&pci_indirect),
26598 + "r" (__PCIBIOS_DS)
26599 : "memory");
26600 local_irq_restore(flags);
26601
26602 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26603
26604 switch (len) {
26605 case 1:
26606 - __asm__("lcall *(%%esi); cld\n\t"
26607 + __asm__("movw %w6, %%ds\n\t"
26608 + "lcall *%%ss:(%%esi); cld\n\t"
26609 + "push %%ss\n\t"
26610 + "pop %%ds\n\t"
26611 "jc 1f\n\t"
26612 "xor %%ah, %%ah\n"
26613 "1:"
26614 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26615 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26616 "b" (bx),
26617 "D" ((long)reg),
26618 - "S" (&pci_indirect));
26619 + "S" (&pci_indirect),
26620 + "r" (__PCIBIOS_DS));
26621 /*
26622 * Zero-extend the result beyond 8 bits, do not trust the
26623 * BIOS having done it:
26624 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26625 *value &= 0xff;
26626 break;
26627 case 2:
26628 - __asm__("lcall *(%%esi); cld\n\t"
26629 + __asm__("movw %w6, %%ds\n\t"
26630 + "lcall *%%ss:(%%esi); cld\n\t"
26631 + "push %%ss\n\t"
26632 + "pop %%ds\n\t"
26633 "jc 1f\n\t"
26634 "xor %%ah, %%ah\n"
26635 "1:"
26636 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26637 : "1" (PCIBIOS_READ_CONFIG_WORD),
26638 "b" (bx),
26639 "D" ((long)reg),
26640 - "S" (&pci_indirect));
26641 + "S" (&pci_indirect),
26642 + "r" (__PCIBIOS_DS));
26643 /*
26644 * Zero-extend the result beyond 16 bits, do not trust the
26645 * BIOS having done it:
26646 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26647 *value &= 0xffff;
26648 break;
26649 case 4:
26650 - __asm__("lcall *(%%esi); cld\n\t"
26651 + __asm__("movw %w6, %%ds\n\t"
26652 + "lcall *%%ss:(%%esi); cld\n\t"
26653 + "push %%ss\n\t"
26654 + "pop %%ds\n\t"
26655 "jc 1f\n\t"
26656 "xor %%ah, %%ah\n"
26657 "1:"
26658 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26659 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26660 "b" (bx),
26661 "D" ((long)reg),
26662 - "S" (&pci_indirect));
26663 + "S" (&pci_indirect),
26664 + "r" (__PCIBIOS_DS));
26665 break;
26666 }
26667
26668 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26669
26670 switch (len) {
26671 case 1:
26672 - __asm__("lcall *(%%esi); cld\n\t"
26673 + __asm__("movw %w6, %%ds\n\t"
26674 + "lcall *%%ss:(%%esi); cld\n\t"
26675 + "push %%ss\n\t"
26676 + "pop %%ds\n\t"
26677 "jc 1f\n\t"
26678 "xor %%ah, %%ah\n"
26679 "1:"
26680 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26681 "c" (value),
26682 "b" (bx),
26683 "D" ((long)reg),
26684 - "S" (&pci_indirect));
26685 + "S" (&pci_indirect),
26686 + "r" (__PCIBIOS_DS));
26687 break;
26688 case 2:
26689 - __asm__("lcall *(%%esi); cld\n\t"
26690 + __asm__("movw %w6, %%ds\n\t"
26691 + "lcall *%%ss:(%%esi); cld\n\t"
26692 + "push %%ss\n\t"
26693 + "pop %%ds\n\t"
26694 "jc 1f\n\t"
26695 "xor %%ah, %%ah\n"
26696 "1:"
26697 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26698 "c" (value),
26699 "b" (bx),
26700 "D" ((long)reg),
26701 - "S" (&pci_indirect));
26702 + "S" (&pci_indirect),
26703 + "r" (__PCIBIOS_DS));
26704 break;
26705 case 4:
26706 - __asm__("lcall *(%%esi); cld\n\t"
26707 + __asm__("movw %w6, %%ds\n\t"
26708 + "lcall *%%ss:(%%esi); cld\n\t"
26709 + "push %%ss\n\t"
26710 + "pop %%ds\n\t"
26711 "jc 1f\n\t"
26712 "xor %%ah, %%ah\n"
26713 "1:"
26714 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26715 "c" (value),
26716 "b" (bx),
26717 "D" ((long)reg),
26718 - "S" (&pci_indirect));
26719 + "S" (&pci_indirect),
26720 + "r" (__PCIBIOS_DS));
26721 break;
26722 }
26723
26724 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26725 * Function table for BIOS32 access
26726 */
26727
26728 -static struct pci_raw_ops pci_bios_access = {
26729 +static const struct pci_raw_ops pci_bios_access = {
26730 .read = pci_bios_read,
26731 .write = pci_bios_write
26732 };
26733 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
26734 * Try to find PCI BIOS.
26735 */
26736
26737 -static struct pci_raw_ops * __devinit pci_find_bios(void)
26738 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
26739 {
26740 union bios32 *check;
26741 unsigned char sum;
26742 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26743
26744 DBG("PCI: Fetching IRQ routing table... ");
26745 __asm__("push %%es\n\t"
26746 + "movw %w8, %%ds\n\t"
26747 "push %%ds\n\t"
26748 "pop %%es\n\t"
26749 - "lcall *(%%esi); cld\n\t"
26750 + "lcall *%%ss:(%%esi); cld\n\t"
26751 "pop %%es\n\t"
26752 + "push %%ss\n\t"
26753 + "pop %%ds\n"
26754 "jc 1f\n\t"
26755 "xor %%ah, %%ah\n"
26756 "1:"
26757 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26758 "1" (0),
26759 "D" ((long) &opt),
26760 "S" (&pci_indirect),
26761 - "m" (opt)
26762 + "m" (opt),
26763 + "r" (__PCIBIOS_DS)
26764 : "memory");
26765 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26766 if (ret & 0xff00)
26767 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26768 {
26769 int ret;
26770
26771 - __asm__("lcall *(%%esi); cld\n\t"
26772 + __asm__("movw %w5, %%ds\n\t"
26773 + "lcall *%%ss:(%%esi); cld\n\t"
26774 + "push %%ss\n\t"
26775 + "pop %%ds\n"
26776 "jc 1f\n\t"
26777 "xor %%ah, %%ah\n"
26778 "1:"
26779 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26780 : "0" (PCIBIOS_SET_PCI_HW_INT),
26781 "b" ((dev->bus->number << 8) | dev->devfn),
26782 "c" ((irq << 8) | (pin + 10)),
26783 - "S" (&pci_indirect));
26784 + "S" (&pci_indirect),
26785 + "r" (__PCIBIOS_DS));
26786 return !(ret & 0xff00);
26787 }
26788 EXPORT_SYMBOL(pcibios_set_irq_routing);
26789 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26790 index fa0f651..9d8f3d9 100644
26791 --- a/arch/x86/power/cpu.c
26792 +++ b/arch/x86/power/cpu.c
26793 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
26794 static void fix_processor_context(void)
26795 {
26796 int cpu = smp_processor_id();
26797 - struct tss_struct *t = &per_cpu(init_tss, cpu);
26798 + struct tss_struct *t = init_tss + cpu;
26799
26800 set_tss_desc(cpu, t); /*
26801 * This just modifies memory; should not be
26802 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
26803 */
26804
26805 #ifdef CONFIG_X86_64
26806 + pax_open_kernel();
26807 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26808 + pax_close_kernel();
26809
26810 syscall_init(); /* This sets MSR_*STAR and related */
26811 #endif
26812 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26813 index dd78ef6..f9d928d 100644
26814 --- a/arch/x86/vdso/Makefile
26815 +++ b/arch/x86/vdso/Makefile
26816 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
26817 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
26818 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
26819
26820 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26821 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26822 GCOV_PROFILE := n
26823
26824 #
26825 diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
26826 index ee55754..0013b2e 100644
26827 --- a/arch/x86/vdso/vclock_gettime.c
26828 +++ b/arch/x86/vdso/vclock_gettime.c
26829 @@ -22,24 +22,48 @@
26830 #include <asm/hpet.h>
26831 #include <asm/unistd.h>
26832 #include <asm/io.h>
26833 +#include <asm/fixmap.h>
26834 #include "vextern.h"
26835
26836 #define gtod vdso_vsyscall_gtod_data
26837
26838 +notrace noinline long __vdso_fallback_time(long *t)
26839 +{
26840 + long secs;
26841 + asm volatile("syscall"
26842 + : "=a" (secs)
26843 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
26844 + return secs;
26845 +}
26846 +
26847 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
26848 {
26849 long ret;
26850 asm("syscall" : "=a" (ret) :
26851 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
26852 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
26853 return ret;
26854 }
26855
26856 +notrace static inline cycle_t __vdso_vread_hpet(void)
26857 +{
26858 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
26859 +}
26860 +
26861 +notrace static inline cycle_t __vdso_vread_tsc(void)
26862 +{
26863 + cycle_t ret = (cycle_t)vget_cycles();
26864 +
26865 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
26866 +}
26867 +
26868 notrace static inline long vgetns(void)
26869 {
26870 long v;
26871 - cycles_t (*vread)(void);
26872 - vread = gtod->clock.vread;
26873 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
26874 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
26875 + v = __vdso_vread_tsc();
26876 + else
26877 + v = __vdso_vread_hpet();
26878 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
26879 return (v * gtod->clock.mult) >> gtod->clock.shift;
26880 }
26881
26882 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
26883
26884 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26885 {
26886 - if (likely(gtod->sysctl_enabled))
26887 + if (likely(gtod->sysctl_enabled &&
26888 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26889 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26890 switch (clock) {
26891 case CLOCK_REALTIME:
26892 if (likely(gtod->clock.vread))
26893 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26894 int clock_gettime(clockid_t, struct timespec *)
26895 __attribute__((weak, alias("__vdso_clock_gettime")));
26896
26897 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
26898 +{
26899 + long ret;
26900 + asm("syscall" : "=a" (ret) :
26901 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
26902 + return ret;
26903 +}
26904 +
26905 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26906 {
26907 - long ret;
26908 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
26909 + if (likely(gtod->sysctl_enabled &&
26910 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26911 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26912 + {
26913 if (likely(tv != NULL)) {
26914 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
26915 offsetof(struct timespec, tv_nsec) ||
26916 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26917 }
26918 return 0;
26919 }
26920 - asm("syscall" : "=a" (ret) :
26921 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
26922 - return ret;
26923 + return __vdso_fallback_gettimeofday(tv, tz);
26924 }
26925 int gettimeofday(struct timeval *, struct timezone *)
26926 __attribute__((weak, alias("__vdso_gettimeofday")));
26927 diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
26928 index 4e5dd3b..00ba15e 100644
26929 --- a/arch/x86/vdso/vdso.lds.S
26930 +++ b/arch/x86/vdso/vdso.lds.S
26931 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
26932 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
26933 #include "vextern.h"
26934 #undef VEXTERN
26935 +
26936 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
26937 +VEXTERN(fallback_gettimeofday)
26938 +VEXTERN(fallback_time)
26939 +VEXTERN(getcpu)
26940 +#undef VEXTERN
26941 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26942 index 58bc00f..d53fb48 100644
26943 --- a/arch/x86/vdso/vdso32-setup.c
26944 +++ b/arch/x86/vdso/vdso32-setup.c
26945 @@ -25,6 +25,7 @@
26946 #include <asm/tlbflush.h>
26947 #include <asm/vdso.h>
26948 #include <asm/proto.h>
26949 +#include <asm/mman.h>
26950
26951 enum {
26952 VDSO_DISABLED = 0,
26953 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26954 void enable_sep_cpu(void)
26955 {
26956 int cpu = get_cpu();
26957 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
26958 + struct tss_struct *tss = init_tss + cpu;
26959
26960 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26961 put_cpu();
26962 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26963 gate_vma.vm_start = FIXADDR_USER_START;
26964 gate_vma.vm_end = FIXADDR_USER_END;
26965 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26966 - gate_vma.vm_page_prot = __P101;
26967 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26968 /*
26969 * Make sure the vDSO gets into every core dump.
26970 * Dumping its contents makes post-mortem fully interpretable later
26971 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26972 if (compat)
26973 addr = VDSO_HIGH_BASE;
26974 else {
26975 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26976 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26977 if (IS_ERR_VALUE(addr)) {
26978 ret = addr;
26979 goto up_fail;
26980 }
26981 }
26982
26983 - current->mm->context.vdso = (void *)addr;
26984 + current->mm->context.vdso = addr;
26985
26986 if (compat_uses_vma || !compat) {
26987 /*
26988 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26989 }
26990
26991 current_thread_info()->sysenter_return =
26992 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26993 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26994
26995 up_fail:
26996 if (ret)
26997 - current->mm->context.vdso = NULL;
26998 + current->mm->context.vdso = 0;
26999
27000 up_write(&mm->mmap_sem);
27001
27002 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
27003
27004 const char *arch_vma_name(struct vm_area_struct *vma)
27005 {
27006 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
27007 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
27008 return "[vdso]";
27009 +
27010 +#ifdef CONFIG_PAX_SEGMEXEC
27011 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
27012 + return "[vdso]";
27013 +#endif
27014 +
27015 return NULL;
27016 }
27017
27018 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
27019 struct mm_struct *mm = tsk->mm;
27020
27021 /* Check to see if this task was created in compat vdso mode */
27022 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
27023 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
27024 return &gate_vma;
27025 return NULL;
27026 }
27027 diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
27028 index 1683ba2..48d07f3 100644
27029 --- a/arch/x86/vdso/vextern.h
27030 +++ b/arch/x86/vdso/vextern.h
27031 @@ -11,6 +11,5 @@
27032 put into vextern.h and be referenced as a pointer with vdso prefix.
27033 The main kernel later fills in the values. */
27034
27035 -VEXTERN(jiffies)
27036 VEXTERN(vgetcpu_mode)
27037 VEXTERN(vsyscall_gtod_data)
27038 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
27039 index 21e1aeb..2c0b3c4 100644
27040 --- a/arch/x86/vdso/vma.c
27041 +++ b/arch/x86/vdso/vma.c
27042 @@ -17,8 +17,6 @@
27043 #include "vextern.h" /* Just for VMAGIC. */
27044 #undef VEXTERN
27045
27046 -unsigned int __read_mostly vdso_enabled = 1;
27047 -
27048 extern char vdso_start[], vdso_end[];
27049 extern unsigned short vdso_sync_cpuid;
27050
27051 @@ -27,10 +25,8 @@ static unsigned vdso_size;
27052
27053 static inline void *var_ref(void *p, char *name)
27054 {
27055 - if (*(void **)p != (void *)VMAGIC) {
27056 - printk("VDSO: variable %s broken\n", name);
27057 - vdso_enabled = 0;
27058 - }
27059 + if (*(void **)p != (void *)VMAGIC)
27060 + panic("VDSO: variable %s broken\n", name);
27061 return p;
27062 }
27063
27064 @@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
27065 if (!vbase)
27066 goto oom;
27067
27068 - if (memcmp(vbase, "\177ELF", 4)) {
27069 - printk("VDSO: I'm broken; not ELF\n");
27070 - vdso_enabled = 0;
27071 - }
27072 + if (memcmp(vbase, ELFMAG, SELFMAG))
27073 + panic("VDSO: I'm broken; not ELF\n");
27074
27075 #define VEXTERN(x) \
27076 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
27077 #include "vextern.h"
27078 #undef VEXTERN
27079 + vunmap(vbase);
27080 return 0;
27081
27082 oom:
27083 - printk("Cannot allocate vdso\n");
27084 - vdso_enabled = 0;
27085 - return -ENOMEM;
27086 + panic("Cannot allocate vdso\n");
27087 }
27088 __initcall(init_vdso_vars);
27089
27090 @@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
27091 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27092 {
27093 struct mm_struct *mm = current->mm;
27094 - unsigned long addr;
27095 + unsigned long addr = 0;
27096 int ret;
27097
27098 - if (!vdso_enabled)
27099 - return 0;
27100 -
27101 down_write(&mm->mmap_sem);
27102 +
27103 +#ifdef CONFIG_PAX_RANDMMAP
27104 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27105 +#endif
27106 +
27107 addr = vdso_addr(mm->start_stack, vdso_size);
27108 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
27109 if (IS_ERR_VALUE(addr)) {
27110 @@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27111 goto up_fail;
27112 }
27113
27114 - current->mm->context.vdso = (void *)addr;
27115 + current->mm->context.vdso = addr;
27116
27117 ret = install_special_mapping(mm, addr, vdso_size,
27118 VM_READ|VM_EXEC|
27119 @@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27120 VM_ALWAYSDUMP,
27121 vdso_pages);
27122 if (ret) {
27123 - current->mm->context.vdso = NULL;
27124 + current->mm->context.vdso = 0;
27125 goto up_fail;
27126 }
27127
27128 @@ -132,10 +127,3 @@ up_fail:
27129 up_write(&mm->mmap_sem);
27130 return ret;
27131 }
27132 -
27133 -static __init int vdso_setup(char *s)
27134 -{
27135 - vdso_enabled = simple_strtoul(s, NULL, 0);
27136 - return 0;
27137 -}
27138 -__setup("vdso=", vdso_setup);
27139 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
27140 index 0087b00..eecb34f 100644
27141 --- a/arch/x86/xen/enlighten.c
27142 +++ b/arch/x86/xen/enlighten.c
27143 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
27144
27145 struct shared_info xen_dummy_shared_info;
27146
27147 -void *xen_initial_gdt;
27148 -
27149 /*
27150 * Point at some empty memory to start with. We map the real shared_info
27151 * page as soon as fixmap is up and running.
27152 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
27153
27154 preempt_disable();
27155
27156 - start = __get_cpu_var(idt_desc).address;
27157 + start = (unsigned long)__get_cpu_var(idt_desc).address;
27158 end = start + __get_cpu_var(idt_desc).size + 1;
27159
27160 xen_mc_flush();
27161 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
27162 #endif
27163 };
27164
27165 -static void xen_reboot(int reason)
27166 +static __noreturn void xen_reboot(int reason)
27167 {
27168 struct sched_shutdown r = { .reason = reason };
27169
27170 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
27171 BUG();
27172 }
27173
27174 -static void xen_restart(char *msg)
27175 +static __noreturn void xen_restart(char *msg)
27176 {
27177 xen_reboot(SHUTDOWN_reboot);
27178 }
27179
27180 -static void xen_emergency_restart(void)
27181 +static __noreturn void xen_emergency_restart(void)
27182 {
27183 xen_reboot(SHUTDOWN_reboot);
27184 }
27185
27186 -static void xen_machine_halt(void)
27187 +static __noreturn void xen_machine_halt(void)
27188 {
27189 xen_reboot(SHUTDOWN_poweroff);
27190 }
27191 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
27192 */
27193 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27194
27195 -#ifdef CONFIG_X86_64
27196 /* Work out if we support NX */
27197 - check_efer();
27198 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27199 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27200 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27201 + unsigned l, h;
27202 +
27203 +#ifdef CONFIG_X86_PAE
27204 + nx_enabled = 1;
27205 +#endif
27206 + __supported_pte_mask |= _PAGE_NX;
27207 + rdmsr(MSR_EFER, l, h);
27208 + l |= EFER_NX;
27209 + wrmsr(MSR_EFER, l, h);
27210 + }
27211 #endif
27212
27213 xen_setup_features();
27214 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
27215
27216 machine_ops = xen_machine_ops;
27217
27218 - /*
27219 - * The only reliable way to retain the initial address of the
27220 - * percpu gdt_page is to remember it here, so we can go and
27221 - * mark it RW later, when the initial percpu area is freed.
27222 - */
27223 - xen_initial_gdt = &per_cpu(gdt_page, 0);
27224 -
27225 xen_smp_init();
27226
27227 pgd = (pgd_t *)xen_start_info->pt_base;
27228 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27229 index 3f90a2c..2c2ad84 100644
27230 --- a/arch/x86/xen/mmu.c
27231 +++ b/arch/x86/xen/mmu.c
27232 @@ -1719,6 +1719,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27233 convert_pfn_mfn(init_level4_pgt);
27234 convert_pfn_mfn(level3_ident_pgt);
27235 convert_pfn_mfn(level3_kernel_pgt);
27236 + convert_pfn_mfn(level3_vmalloc_start_pgt);
27237 + convert_pfn_mfn(level3_vmalloc_end_pgt);
27238 + convert_pfn_mfn(level3_vmemmap_pgt);
27239
27240 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27241 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27242 @@ -1737,7 +1740,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27243 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27244 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27245 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27246 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27247 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27248 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27249 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27250 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27251 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27252 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27253
27254 @@ -1860,6 +1867,7 @@ static __init void xen_post_allocator_init(void)
27255 pv_mmu_ops.set_pud = xen_set_pud;
27256 #if PAGETABLE_LEVELS == 4
27257 pv_mmu_ops.set_pgd = xen_set_pgd;
27258 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27259 #endif
27260
27261 /* This will work as long as patching hasn't happened yet
27262 @@ -1946,6 +1954,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
27263 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27264 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27265 .set_pgd = xen_set_pgd_hyper,
27266 + .set_pgd_batched = xen_set_pgd_hyper,
27267
27268 .alloc_pud = xen_alloc_pmd_init,
27269 .release_pud = xen_release_pmd_init,
27270 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27271 index a96204a..fca9b8e 100644
27272 --- a/arch/x86/xen/smp.c
27273 +++ b/arch/x86/xen/smp.c
27274 @@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27275 {
27276 BUG_ON(smp_processor_id() != 0);
27277 native_smp_prepare_boot_cpu();
27278 -
27279 - /* We've switched to the "real" per-cpu gdt, so make sure the
27280 - old memory can be recycled */
27281 - make_lowmem_page_readwrite(xen_initial_gdt);
27282 -
27283 xen_setup_vcpu_info_placement();
27284 }
27285
27286 @@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27287 gdt = get_cpu_gdt_table(cpu);
27288
27289 ctxt->flags = VGCF_IN_KERNEL;
27290 - ctxt->user_regs.ds = __USER_DS;
27291 - ctxt->user_regs.es = __USER_DS;
27292 + ctxt->user_regs.ds = __KERNEL_DS;
27293 + ctxt->user_regs.es = __KERNEL_DS;
27294 ctxt->user_regs.ss = __KERNEL_DS;
27295 #ifdef CONFIG_X86_32
27296 ctxt->user_regs.fs = __KERNEL_PERCPU;
27297 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27298 + savesegment(gs, ctxt->user_regs.gs);
27299 #else
27300 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27301 #endif
27302 @@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27303 int rc;
27304
27305 per_cpu(current_task, cpu) = idle;
27306 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
27307 #ifdef CONFIG_X86_32
27308 irq_ctx_init(cpu);
27309 #else
27310 clear_tsk_thread_flag(idle, TIF_FORK);
27311 - per_cpu(kernel_stack, cpu) =
27312 - (unsigned long)task_stack_page(idle) -
27313 - KERNEL_STACK_OFFSET + THREAD_SIZE;
27314 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27315 #endif
27316 xen_setup_runstate_info(cpu);
27317 xen_setup_timer(cpu);
27318 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27319 index 9a95a9c..4f39e774 100644
27320 --- a/arch/x86/xen/xen-asm_32.S
27321 +++ b/arch/x86/xen/xen-asm_32.S
27322 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
27323 ESP_OFFSET=4 # bytes pushed onto stack
27324
27325 /*
27326 - * Store vcpu_info pointer for easy access. Do it this way to
27327 - * avoid having to reload %fs
27328 + * Store vcpu_info pointer for easy access.
27329 */
27330 #ifdef CONFIG_SMP
27331 - GET_THREAD_INFO(%eax)
27332 - movl TI_cpu(%eax), %eax
27333 - movl __per_cpu_offset(,%eax,4), %eax
27334 - mov per_cpu__xen_vcpu(%eax), %eax
27335 + push %fs
27336 + mov $(__KERNEL_PERCPU), %eax
27337 + mov %eax, %fs
27338 + mov PER_CPU_VAR(xen_vcpu), %eax
27339 + pop %fs
27340 #else
27341 movl per_cpu__xen_vcpu, %eax
27342 #endif
27343 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27344 index 1a5ff24..a187d40 100644
27345 --- a/arch/x86/xen/xen-head.S
27346 +++ b/arch/x86/xen/xen-head.S
27347 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
27348 #ifdef CONFIG_X86_32
27349 mov %esi,xen_start_info
27350 mov $init_thread_union+THREAD_SIZE,%esp
27351 +#ifdef CONFIG_SMP
27352 + movl $cpu_gdt_table,%edi
27353 + movl $__per_cpu_load,%eax
27354 + movw %ax,__KERNEL_PERCPU + 2(%edi)
27355 + rorl $16,%eax
27356 + movb %al,__KERNEL_PERCPU + 4(%edi)
27357 + movb %ah,__KERNEL_PERCPU + 7(%edi)
27358 + movl $__per_cpu_end - 1,%eax
27359 + subl $__per_cpu_start,%eax
27360 + movw %ax,__KERNEL_PERCPU + 0(%edi)
27361 +#endif
27362 #else
27363 mov %rsi,xen_start_info
27364 mov $init_thread_union+THREAD_SIZE,%rsp
27365 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27366 index f9153a3..51eab3d 100644
27367 --- a/arch/x86/xen/xen-ops.h
27368 +++ b/arch/x86/xen/xen-ops.h
27369 @@ -10,8 +10,6 @@
27370 extern const char xen_hypervisor_callback[];
27371 extern const char xen_failsafe_callback[];
27372
27373 -extern void *xen_initial_gdt;
27374 -
27375 struct trap_info;
27376 void xen_copy_trap_info(struct trap_info *traps);
27377
27378 diff --git a/block/blk-integrity.c b/block/blk-integrity.c
27379 index 15c6308..96e83c2 100644
27380 --- a/block/blk-integrity.c
27381 +++ b/block/blk-integrity.c
27382 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
27383 NULL,
27384 };
27385
27386 -static struct sysfs_ops integrity_ops = {
27387 +static const struct sysfs_ops integrity_ops = {
27388 .show = &integrity_attr_show,
27389 .store = &integrity_attr_store,
27390 };
27391 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27392 index ca56420..f2fc409 100644
27393 --- a/block/blk-iopoll.c
27394 +++ b/block/blk-iopoll.c
27395 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27396 }
27397 EXPORT_SYMBOL(blk_iopoll_complete);
27398
27399 -static void blk_iopoll_softirq(struct softirq_action *h)
27400 +static void blk_iopoll_softirq(void)
27401 {
27402 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27403 int rearm = 0, budget = blk_iopoll_budget;
27404 diff --git a/block/blk-map.c b/block/blk-map.c
27405 index 30a7e51..0aeec6a 100644
27406 --- a/block/blk-map.c
27407 +++ b/block/blk-map.c
27408 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
27409 * direct dma. else, set up kernel bounce buffers
27410 */
27411 uaddr = (unsigned long) ubuf;
27412 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
27413 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
27414 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
27415 else
27416 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
27417 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
27418 for (i = 0; i < iov_count; i++) {
27419 unsigned long uaddr = (unsigned long)iov[i].iov_base;
27420
27421 + if (!iov[i].iov_len)
27422 + return -EINVAL;
27423 +
27424 if (uaddr & queue_dma_alignment(q)) {
27425 unaligned = 1;
27426 break;
27427 }
27428 - if (!iov[i].iov_len)
27429 - return -EINVAL;
27430 }
27431
27432 if (unaligned || (q->dma_pad_mask & len) || map_data)
27433 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27434 if (!len || !kbuf)
27435 return -EINVAL;
27436
27437 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
27438 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
27439 if (do_copy)
27440 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27441 else
27442 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27443 index ee9c216..58d410a 100644
27444 --- a/block/blk-softirq.c
27445 +++ b/block/blk-softirq.c
27446 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27447 * Softirq action handler - move entries to local list and loop over them
27448 * while passing them to the queue registered handler.
27449 */
27450 -static void blk_done_softirq(struct softirq_action *h)
27451 +static void blk_done_softirq(void)
27452 {
27453 struct list_head *cpu_list, local_list;
27454
27455 diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
27456 index bb9c5ea..5330d48 100644
27457 --- a/block/blk-sysfs.c
27458 +++ b/block/blk-sysfs.c
27459 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
27460 kmem_cache_free(blk_requestq_cachep, q);
27461 }
27462
27463 -static struct sysfs_ops queue_sysfs_ops = {
27464 +static const struct sysfs_ops queue_sysfs_ops = {
27465 .show = queue_attr_show,
27466 .store = queue_attr_store,
27467 };
27468 diff --git a/block/bsg.c b/block/bsg.c
27469 index 7154a7a..08ac2f0 100644
27470 --- a/block/bsg.c
27471 +++ b/block/bsg.c
27472 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27473 struct sg_io_v4 *hdr, struct bsg_device *bd,
27474 fmode_t has_write_perm)
27475 {
27476 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27477 + unsigned char *cmdptr;
27478 +
27479 if (hdr->request_len > BLK_MAX_CDB) {
27480 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27481 if (!rq->cmd)
27482 return -ENOMEM;
27483 - }
27484 + cmdptr = rq->cmd;
27485 + } else
27486 + cmdptr = tmpcmd;
27487
27488 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
27489 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27490 hdr->request_len))
27491 return -EFAULT;
27492
27493 + if (cmdptr != rq->cmd)
27494 + memcpy(rq->cmd, cmdptr, hdr->request_len);
27495 +
27496 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27497 if (blk_verify_command(rq->cmd, has_write_perm))
27498 return -EPERM;
27499 @@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27500 rq->next_rq = next_rq;
27501 next_rq->cmd_type = rq->cmd_type;
27502
27503 - dxferp = (void*)(unsigned long)hdr->din_xferp;
27504 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27505 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
27506 hdr->din_xfer_len, GFP_KERNEL);
27507 if (ret)
27508 @@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27509
27510 if (hdr->dout_xfer_len) {
27511 dxfer_len = hdr->dout_xfer_len;
27512 - dxferp = (void*)(unsigned long)hdr->dout_xferp;
27513 + dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
27514 } else if (hdr->din_xfer_len) {
27515 dxfer_len = hdr->din_xfer_len;
27516 - dxferp = (void*)(unsigned long)hdr->din_xferp;
27517 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27518 } else
27519 dxfer_len = 0;
27520
27521 @@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
27522 int len = min_t(unsigned int, hdr->max_response_len,
27523 rq->sense_len);
27524
27525 - ret = copy_to_user((void*)(unsigned long)hdr->response,
27526 + ret = copy_to_user((void __user *)(unsigned long)hdr->response,
27527 rq->sense, len);
27528 if (!ret)
27529 hdr->response_len = len;
27530 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27531 index 9bd086c..ca1fc22 100644
27532 --- a/block/compat_ioctl.c
27533 +++ b/block/compat_ioctl.c
27534 @@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27535 err |= __get_user(f->spec1, &uf->spec1);
27536 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27537 err |= __get_user(name, &uf->name);
27538 - f->name = compat_ptr(name);
27539 + f->name = (void __force_kernel *)compat_ptr(name);
27540 if (err) {
27541 err = -EFAULT;
27542 goto out;
27543 diff --git a/block/elevator.c b/block/elevator.c
27544 index a847046..75a1746 100644
27545 --- a/block/elevator.c
27546 +++ b/block/elevator.c
27547 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
27548 return error;
27549 }
27550
27551 -static struct sysfs_ops elv_sysfs_ops = {
27552 +static const struct sysfs_ops elv_sysfs_ops = {
27553 .show = elv_attr_show,
27554 .store = elv_attr_store,
27555 };
27556 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27557 index 2be0a97..bded3fd 100644
27558 --- a/block/scsi_ioctl.c
27559 +++ b/block/scsi_ioctl.c
27560 @@ -221,8 +221,20 @@ EXPORT_SYMBOL(blk_verify_command);
27561 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27562 struct sg_io_hdr *hdr, fmode_t mode)
27563 {
27564 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27565 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27566 + unsigned char *cmdptr;
27567 +
27568 + if (rq->cmd != rq->__cmd)
27569 + cmdptr = rq->cmd;
27570 + else
27571 + cmdptr = tmpcmd;
27572 +
27573 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27574 return -EFAULT;
27575 +
27576 + if (cmdptr != rq->cmd)
27577 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27578 +
27579 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27580 return -EPERM;
27581
27582 @@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27583 int err;
27584 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27585 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27586 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27587 + unsigned char *cmdptr;
27588
27589 if (!sic)
27590 return -EINVAL;
27591 @@ -464,9 +478,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27592 */
27593 err = -EFAULT;
27594 rq->cmd_len = cmdlen;
27595 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
27596 +
27597 + if (rq->cmd != rq->__cmd)
27598 + cmdptr = rq->cmd;
27599 + else
27600 + cmdptr = tmpcmd;
27601 +
27602 + if (copy_from_user(cmdptr, sic->data, cmdlen))
27603 goto error;
27604
27605 + if (rq->cmd != cmdptr)
27606 + memcpy(rq->cmd, cmdptr, cmdlen);
27607 +
27608 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27609 goto error;
27610
27611 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27612 index 3533582..f143117 100644
27613 --- a/crypto/cryptd.c
27614 +++ b/crypto/cryptd.c
27615 @@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
27616
27617 struct cryptd_blkcipher_request_ctx {
27618 crypto_completion_t complete;
27619 -};
27620 +} __no_const;
27621
27622 struct cryptd_hash_ctx {
27623 struct crypto_shash *child;
27624 diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
27625 index a90d260..7a9765e 100644
27626 --- a/crypto/gf128mul.c
27627 +++ b/crypto/gf128mul.c
27628 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
27629 for (i = 0; i < 7; ++i)
27630 gf128mul_x_lle(&p[i + 1], &p[i]);
27631
27632 - memset(r, 0, sizeof(r));
27633 + memset(r, 0, sizeof(*r));
27634 for (i = 0;;) {
27635 u8 ch = ((u8 *)b)[15 - i];
27636
27637 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
27638 for (i = 0; i < 7; ++i)
27639 gf128mul_x_bbe(&p[i + 1], &p[i]);
27640
27641 - memset(r, 0, sizeof(r));
27642 + memset(r, 0, sizeof(*r));
27643 for (i = 0;;) {
27644 u8 ch = ((u8 *)b)[i];
27645
27646 diff --git a/crypto/serpent.c b/crypto/serpent.c
27647 index b651a55..023297d 100644
27648 --- a/crypto/serpent.c
27649 +++ b/crypto/serpent.c
27650 @@ -21,6 +21,7 @@
27651 #include <asm/byteorder.h>
27652 #include <linux/crypto.h>
27653 #include <linux/types.h>
27654 +#include <linux/sched.h>
27655
27656 /* Key is padded to the maximum of 256 bits before round key generation.
27657 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
27658 @@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
27659 u32 r0,r1,r2,r3,r4;
27660 int i;
27661
27662 + pax_track_stack();
27663 +
27664 /* Copy key, add padding */
27665
27666 for (i = 0; i < keylen; ++i)
27667 diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
27668 index 0d2cdb8..d8de48d 100644
27669 --- a/drivers/acpi/acpi_pad.c
27670 +++ b/drivers/acpi/acpi_pad.c
27671 @@ -30,7 +30,7 @@
27672 #include <acpi/acpi_bus.h>
27673 #include <acpi/acpi_drivers.h>
27674
27675 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
27676 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
27677 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
27678 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
27679 static DEFINE_MUTEX(isolated_cpus_lock);
27680 diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
27681 index 3f4602b..2e41d36 100644
27682 --- a/drivers/acpi/battery.c
27683 +++ b/drivers/acpi/battery.c
27684 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
27685 }
27686
27687 static struct battery_file {
27688 - struct file_operations ops;
27689 + const struct file_operations ops;
27690 mode_t mode;
27691 const char *name;
27692 } acpi_battery_file[] = {
27693 diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
27694 index 7338b6a..82f0257 100644
27695 --- a/drivers/acpi/dock.c
27696 +++ b/drivers/acpi/dock.c
27697 @@ -77,7 +77,7 @@ struct dock_dependent_device {
27698 struct list_head list;
27699 struct list_head hotplug_list;
27700 acpi_handle handle;
27701 - struct acpi_dock_ops *ops;
27702 + const struct acpi_dock_ops *ops;
27703 void *context;
27704 };
27705
27706 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
27707 * the dock driver after _DCK is executed.
27708 */
27709 int
27710 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
27711 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
27712 void *context)
27713 {
27714 struct dock_dependent_device *dd;
27715 diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
27716 index 7c1c59e..2993595 100644
27717 --- a/drivers/acpi/osl.c
27718 +++ b/drivers/acpi/osl.c
27719 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
27720 void __iomem *virt_addr;
27721
27722 virt_addr = ioremap(phys_addr, width);
27723 + if (!virt_addr)
27724 + return AE_NO_MEMORY;
27725 if (!value)
27726 value = &dummy;
27727
27728 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
27729 void __iomem *virt_addr;
27730
27731 virt_addr = ioremap(phys_addr, width);
27732 + if (!virt_addr)
27733 + return AE_NO_MEMORY;
27734
27735 switch (width) {
27736 case 8:
27737 diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
27738 index c216062..eec10d2 100644
27739 --- a/drivers/acpi/power_meter.c
27740 +++ b/drivers/acpi/power_meter.c
27741 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
27742 return res;
27743
27744 temp /= 1000;
27745 - if (temp < 0)
27746 - return -EINVAL;
27747
27748 mutex_lock(&resource->lock);
27749 resource->trip[attr->index - 7] = temp;
27750 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27751 index d0d25e2..961643d 100644
27752 --- a/drivers/acpi/proc.c
27753 +++ b/drivers/acpi/proc.c
27754 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
27755 size_t count, loff_t * ppos)
27756 {
27757 struct list_head *node, *next;
27758 - char strbuf[5];
27759 - char str[5] = "";
27760 - unsigned int len = count;
27761 + char strbuf[5] = {0};
27762 struct acpi_device *found_dev = NULL;
27763
27764 - if (len > 4)
27765 - len = 4;
27766 - if (len < 0)
27767 - return -EFAULT;
27768 + if (count > 4)
27769 + count = 4;
27770
27771 - if (copy_from_user(strbuf, buffer, len))
27772 + if (copy_from_user(strbuf, buffer, count))
27773 return -EFAULT;
27774 - strbuf[len] = '\0';
27775 - sscanf(strbuf, "%s", str);
27776 + strbuf[count] = '\0';
27777
27778 mutex_lock(&acpi_device_lock);
27779 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27780 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
27781 if (!dev->wakeup.flags.valid)
27782 continue;
27783
27784 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
27785 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27786 dev->wakeup.state.enabled =
27787 dev->wakeup.state.enabled ? 0 : 1;
27788 found_dev = dev;
27789 diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
27790 index 7102474..de8ad22 100644
27791 --- a/drivers/acpi/processor_core.c
27792 +++ b/drivers/acpi/processor_core.c
27793 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27794 return 0;
27795 }
27796
27797 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27798 + BUG_ON(pr->id >= nr_cpu_ids);
27799
27800 /*
27801 * Buggy BIOS check
27802 diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
27803 index d933980..5761f13 100644
27804 --- a/drivers/acpi/sbshc.c
27805 +++ b/drivers/acpi/sbshc.c
27806 @@ -17,7 +17,7 @@
27807
27808 #define PREFIX "ACPI: "
27809
27810 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
27811 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
27812 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
27813
27814 struct acpi_smb_hc {
27815 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
27816 index 0458094..6978e7b 100644
27817 --- a/drivers/acpi/sleep.c
27818 +++ b/drivers/acpi/sleep.c
27819 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
27820 }
27821 }
27822
27823 -static struct platform_suspend_ops acpi_suspend_ops = {
27824 +static const struct platform_suspend_ops acpi_suspend_ops = {
27825 .valid = acpi_suspend_state_valid,
27826 .begin = acpi_suspend_begin,
27827 .prepare_late = acpi_pm_prepare,
27828 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
27829 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27830 * been requested.
27831 */
27832 -static struct platform_suspend_ops acpi_suspend_ops_old = {
27833 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
27834 .valid = acpi_suspend_state_valid,
27835 .begin = acpi_suspend_begin_old,
27836 .prepare_late = acpi_pm_disable_gpes,
27837 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
27838 acpi_enable_all_runtime_gpes();
27839 }
27840
27841 -static struct platform_hibernation_ops acpi_hibernation_ops = {
27842 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
27843 .begin = acpi_hibernation_begin,
27844 .end = acpi_pm_end,
27845 .pre_snapshot = acpi_hibernation_pre_snapshot,
27846 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
27847 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27848 * been requested.
27849 */
27850 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
27851 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
27852 .begin = acpi_hibernation_begin_old,
27853 .end = acpi_pm_end,
27854 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
27855 diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
27856 index 05dff63..b662ab7 100644
27857 --- a/drivers/acpi/video.c
27858 +++ b/drivers/acpi/video.c
27859 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
27860 vd->brightness->levels[request_level]);
27861 }
27862
27863 -static struct backlight_ops acpi_backlight_ops = {
27864 +static const struct backlight_ops acpi_backlight_ops = {
27865 .get_brightness = acpi_video_get_brightness,
27866 .update_status = acpi_video_set_brightness,
27867 };
27868 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
27869 index 6787aab..23ffb0e 100644
27870 --- a/drivers/ata/ahci.c
27871 +++ b/drivers/ata/ahci.c
27872 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
27873 .sdev_attrs = ahci_sdev_attrs,
27874 };
27875
27876 -static struct ata_port_operations ahci_ops = {
27877 +static const struct ata_port_operations ahci_ops = {
27878 .inherits = &sata_pmp_port_ops,
27879
27880 .qc_defer = sata_pmp_qc_defer_cmd_switch,
27881 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
27882 .port_stop = ahci_port_stop,
27883 };
27884
27885 -static struct ata_port_operations ahci_vt8251_ops = {
27886 +static const struct ata_port_operations ahci_vt8251_ops = {
27887 .inherits = &ahci_ops,
27888 .hardreset = ahci_vt8251_hardreset,
27889 };
27890
27891 -static struct ata_port_operations ahci_p5wdh_ops = {
27892 +static const struct ata_port_operations ahci_p5wdh_ops = {
27893 .inherits = &ahci_ops,
27894 .hardreset = ahci_p5wdh_hardreset,
27895 };
27896
27897 -static struct ata_port_operations ahci_sb600_ops = {
27898 +static const struct ata_port_operations ahci_sb600_ops = {
27899 .inherits = &ahci_ops,
27900 .softreset = ahci_sb600_softreset,
27901 .pmp_softreset = ahci_sb600_softreset,
27902 diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
27903 index 99e7196..4968c77 100644
27904 --- a/drivers/ata/ata_generic.c
27905 +++ b/drivers/ata/ata_generic.c
27906 @@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
27907 ATA_BMDMA_SHT(DRV_NAME),
27908 };
27909
27910 -static struct ata_port_operations generic_port_ops = {
27911 +static const struct ata_port_operations generic_port_ops = {
27912 .inherits = &ata_bmdma_port_ops,
27913 .cable_detect = ata_cable_unknown,
27914 .set_mode = generic_set_mode,
27915 diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
27916 index c33591d..000c121 100644
27917 --- a/drivers/ata/ata_piix.c
27918 +++ b/drivers/ata/ata_piix.c
27919 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
27920 ATA_BMDMA_SHT(DRV_NAME),
27921 };
27922
27923 -static struct ata_port_operations piix_pata_ops = {
27924 +static const struct ata_port_operations piix_pata_ops = {
27925 .inherits = &ata_bmdma32_port_ops,
27926 .cable_detect = ata_cable_40wire,
27927 .set_piomode = piix_set_piomode,
27928 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
27929 .prereset = piix_pata_prereset,
27930 };
27931
27932 -static struct ata_port_operations piix_vmw_ops = {
27933 +static const struct ata_port_operations piix_vmw_ops = {
27934 .inherits = &piix_pata_ops,
27935 .bmdma_status = piix_vmw_bmdma_status,
27936 };
27937
27938 -static struct ata_port_operations ich_pata_ops = {
27939 +static const struct ata_port_operations ich_pata_ops = {
27940 .inherits = &piix_pata_ops,
27941 .cable_detect = ich_pata_cable_detect,
27942 .set_dmamode = ich_set_dmamode,
27943 };
27944
27945 -static struct ata_port_operations piix_sata_ops = {
27946 +static const struct ata_port_operations piix_sata_ops = {
27947 .inherits = &ata_bmdma_port_ops,
27948 };
27949
27950 -static struct ata_port_operations piix_sidpr_sata_ops = {
27951 +static const struct ata_port_operations piix_sidpr_sata_ops = {
27952 .inherits = &piix_sata_ops,
27953 .hardreset = sata_std_hardreset,
27954 .scr_read = piix_sidpr_scr_read,
27955 diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
27956 index b0882cd..c295d65 100644
27957 --- a/drivers/ata/libata-acpi.c
27958 +++ b/drivers/ata/libata-acpi.c
27959 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
27960 ata_acpi_uevent(dev->link->ap, dev, event);
27961 }
27962
27963 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27964 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27965 .handler = ata_acpi_dev_notify_dock,
27966 .uevent = ata_acpi_dev_uevent,
27967 };
27968
27969 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27970 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27971 .handler = ata_acpi_ap_notify_dock,
27972 .uevent = ata_acpi_ap_uevent,
27973 };
27974 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27975 index d4f7f99..94f603e 100644
27976 --- a/drivers/ata/libata-core.c
27977 +++ b/drivers/ata/libata-core.c
27978 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27979 struct ata_port *ap;
27980 unsigned int tag;
27981
27982 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27983 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27984 ap = qc->ap;
27985
27986 qc->flags = 0;
27987 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27988 struct ata_port *ap;
27989 struct ata_link *link;
27990
27991 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27992 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27993 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27994 ap = qc->ap;
27995 link = qc->dev->link;
27996 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
27997 * LOCKING:
27998 * None.
27999 */
28000 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
28001 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
28002 {
28003 static DEFINE_SPINLOCK(lock);
28004 const struct ata_port_operations *cur;
28005 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
28006 return;
28007
28008 spin_lock(&lock);
28009 + pax_open_kernel();
28010
28011 for (cur = ops->inherits; cur; cur = cur->inherits) {
28012 void **inherit = (void **)cur;
28013 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
28014 if (IS_ERR(*pp))
28015 *pp = NULL;
28016
28017 - ops->inherits = NULL;
28018 + *(struct ata_port_operations **)&ops->inherits = NULL;
28019
28020 + pax_close_kernel();
28021 spin_unlock(&lock);
28022 }
28023
28024 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
28025 */
28026 /* KILLME - the only user left is ipr */
28027 void ata_host_init(struct ata_host *host, struct device *dev,
28028 - unsigned long flags, struct ata_port_operations *ops)
28029 + unsigned long flags, const struct ata_port_operations *ops)
28030 {
28031 spin_lock_init(&host->lock);
28032 host->dev = dev;
28033 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
28034 /* truly dummy */
28035 }
28036
28037 -struct ata_port_operations ata_dummy_port_ops = {
28038 +const struct ata_port_operations ata_dummy_port_ops = {
28039 .qc_prep = ata_noop_qc_prep,
28040 .qc_issue = ata_dummy_qc_issue,
28041 .error_handler = ata_dummy_error_handler,
28042 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
28043 index e5bdb9b..45a8e72 100644
28044 --- a/drivers/ata/libata-eh.c
28045 +++ b/drivers/ata/libata-eh.c
28046 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
28047 {
28048 struct ata_link *link;
28049
28050 + pax_track_stack();
28051 +
28052 ata_for_each_link(link, ap, HOST_FIRST)
28053 ata_eh_link_report(link);
28054 }
28055 @@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
28056 */
28057 void ata_std_error_handler(struct ata_port *ap)
28058 {
28059 - struct ata_port_operations *ops = ap->ops;
28060 + const struct ata_port_operations *ops = ap->ops;
28061 ata_reset_fn_t hardreset = ops->hardreset;
28062
28063 /* ignore built-in hardreset if SCR access is not available */
28064 diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
28065 index 51f0ffb..19ce3e3 100644
28066 --- a/drivers/ata/libata-pmp.c
28067 +++ b/drivers/ata/libata-pmp.c
28068 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
28069 */
28070 static int sata_pmp_eh_recover(struct ata_port *ap)
28071 {
28072 - struct ata_port_operations *ops = ap->ops;
28073 + const struct ata_port_operations *ops = ap->ops;
28074 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
28075 struct ata_link *pmp_link = &ap->link;
28076 struct ata_device *pmp_dev = pmp_link->device;
28077 diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
28078 index d8f35fe..288180a 100644
28079 --- a/drivers/ata/pata_acpi.c
28080 +++ b/drivers/ata/pata_acpi.c
28081 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
28082 ATA_BMDMA_SHT(DRV_NAME),
28083 };
28084
28085 -static struct ata_port_operations pacpi_ops = {
28086 +static const struct ata_port_operations pacpi_ops = {
28087 .inherits = &ata_bmdma_port_ops,
28088 .qc_issue = pacpi_qc_issue,
28089 .cable_detect = pacpi_cable_detect,
28090 diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
28091 index 9434114..1f2f364 100644
28092 --- a/drivers/ata/pata_ali.c
28093 +++ b/drivers/ata/pata_ali.c
28094 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
28095 * Port operations for PIO only ALi
28096 */
28097
28098 -static struct ata_port_operations ali_early_port_ops = {
28099 +static const struct ata_port_operations ali_early_port_ops = {
28100 .inherits = &ata_sff_port_ops,
28101 .cable_detect = ata_cable_40wire,
28102 .set_piomode = ali_set_piomode,
28103 @@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
28104 * Port operations for DMA capable ALi without cable
28105 * detect
28106 */
28107 -static struct ata_port_operations ali_20_port_ops = {
28108 +static const struct ata_port_operations ali_20_port_ops = {
28109 .inherits = &ali_dma_base_ops,
28110 .cable_detect = ata_cable_40wire,
28111 .mode_filter = ali_20_filter,
28112 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
28113 /*
28114 * Port operations for DMA capable ALi with cable detect
28115 */
28116 -static struct ata_port_operations ali_c2_port_ops = {
28117 +static const struct ata_port_operations ali_c2_port_ops = {
28118 .inherits = &ali_dma_base_ops,
28119 .check_atapi_dma = ali_check_atapi_dma,
28120 .cable_detect = ali_c2_cable_detect,
28121 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
28122 /*
28123 * Port operations for DMA capable ALi with cable detect
28124 */
28125 -static struct ata_port_operations ali_c4_port_ops = {
28126 +static const struct ata_port_operations ali_c4_port_ops = {
28127 .inherits = &ali_dma_base_ops,
28128 .check_atapi_dma = ali_check_atapi_dma,
28129 .cable_detect = ali_c2_cable_detect,
28130 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
28131 /*
28132 * Port operations for DMA capable ALi with cable detect and LBA48
28133 */
28134 -static struct ata_port_operations ali_c5_port_ops = {
28135 +static const struct ata_port_operations ali_c5_port_ops = {
28136 .inherits = &ali_dma_base_ops,
28137 .check_atapi_dma = ali_check_atapi_dma,
28138 .dev_config = ali_warn_atapi_dma,
28139 diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
28140 index 567f3f7..c8ee0da 100644
28141 --- a/drivers/ata/pata_amd.c
28142 +++ b/drivers/ata/pata_amd.c
28143 @@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
28144 .prereset = amd_pre_reset,
28145 };
28146
28147 -static struct ata_port_operations amd33_port_ops = {
28148 +static const struct ata_port_operations amd33_port_ops = {
28149 .inherits = &amd_base_port_ops,
28150 .cable_detect = ata_cable_40wire,
28151 .set_piomode = amd33_set_piomode,
28152 .set_dmamode = amd33_set_dmamode,
28153 };
28154
28155 -static struct ata_port_operations amd66_port_ops = {
28156 +static const struct ata_port_operations amd66_port_ops = {
28157 .inherits = &amd_base_port_ops,
28158 .cable_detect = ata_cable_unknown,
28159 .set_piomode = amd66_set_piomode,
28160 .set_dmamode = amd66_set_dmamode,
28161 };
28162
28163 -static struct ata_port_operations amd100_port_ops = {
28164 +static const struct ata_port_operations amd100_port_ops = {
28165 .inherits = &amd_base_port_ops,
28166 .cable_detect = ata_cable_unknown,
28167 .set_piomode = amd100_set_piomode,
28168 .set_dmamode = amd100_set_dmamode,
28169 };
28170
28171 -static struct ata_port_operations amd133_port_ops = {
28172 +static const struct ata_port_operations amd133_port_ops = {
28173 .inherits = &amd_base_port_ops,
28174 .cable_detect = amd_cable_detect,
28175 .set_piomode = amd133_set_piomode,
28176 @@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
28177 .host_stop = nv_host_stop,
28178 };
28179
28180 -static struct ata_port_operations nv100_port_ops = {
28181 +static const struct ata_port_operations nv100_port_ops = {
28182 .inherits = &nv_base_port_ops,
28183 .set_piomode = nv100_set_piomode,
28184 .set_dmamode = nv100_set_dmamode,
28185 };
28186
28187 -static struct ata_port_operations nv133_port_ops = {
28188 +static const struct ata_port_operations nv133_port_ops = {
28189 .inherits = &nv_base_port_ops,
28190 .set_piomode = nv133_set_piomode,
28191 .set_dmamode = nv133_set_dmamode,
28192 diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
28193 index d332cfd..4b7eaae 100644
28194 --- a/drivers/ata/pata_artop.c
28195 +++ b/drivers/ata/pata_artop.c
28196 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
28197 ATA_BMDMA_SHT(DRV_NAME),
28198 };
28199
28200 -static struct ata_port_operations artop6210_ops = {
28201 +static const struct ata_port_operations artop6210_ops = {
28202 .inherits = &ata_bmdma_port_ops,
28203 .cable_detect = ata_cable_40wire,
28204 .set_piomode = artop6210_set_piomode,
28205 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
28206 .qc_defer = artop6210_qc_defer,
28207 };
28208
28209 -static struct ata_port_operations artop6260_ops = {
28210 +static const struct ata_port_operations artop6260_ops = {
28211 .inherits = &ata_bmdma_port_ops,
28212 .cable_detect = artop6260_cable_detect,
28213 .set_piomode = artop6260_set_piomode,
28214 diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
28215 index 5c129f9..7bb7ccb 100644
28216 --- a/drivers/ata/pata_at32.c
28217 +++ b/drivers/ata/pata_at32.c
28218 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
28219 ATA_PIO_SHT(DRV_NAME),
28220 };
28221
28222 -static struct ata_port_operations at32_port_ops = {
28223 +static const struct ata_port_operations at32_port_ops = {
28224 .inherits = &ata_sff_port_ops,
28225 .cable_detect = ata_cable_40wire,
28226 .set_piomode = pata_at32_set_piomode,
28227 diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
28228 index 41c94b1..829006d 100644
28229 --- a/drivers/ata/pata_at91.c
28230 +++ b/drivers/ata/pata_at91.c
28231 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
28232 ATA_PIO_SHT(DRV_NAME),
28233 };
28234
28235 -static struct ata_port_operations pata_at91_port_ops = {
28236 +static const struct ata_port_operations pata_at91_port_ops = {
28237 .inherits = &ata_sff_port_ops,
28238
28239 .sff_data_xfer = pata_at91_data_xfer_noirq,
28240 diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
28241 index ae4454d..d391eb4 100644
28242 --- a/drivers/ata/pata_atiixp.c
28243 +++ b/drivers/ata/pata_atiixp.c
28244 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
28245 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28246 };
28247
28248 -static struct ata_port_operations atiixp_port_ops = {
28249 +static const struct ata_port_operations atiixp_port_ops = {
28250 .inherits = &ata_bmdma_port_ops,
28251
28252 .qc_prep = ata_sff_dumb_qc_prep,
28253 diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
28254 index 6fe7ded..2a425dc 100644
28255 --- a/drivers/ata/pata_atp867x.c
28256 +++ b/drivers/ata/pata_atp867x.c
28257 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
28258 ATA_BMDMA_SHT(DRV_NAME),
28259 };
28260
28261 -static struct ata_port_operations atp867x_ops = {
28262 +static const struct ata_port_operations atp867x_ops = {
28263 .inherits = &ata_bmdma_port_ops,
28264 .cable_detect = atp867x_cable_detect,
28265 .set_piomode = atp867x_set_piomode,
28266 diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
28267 index c4b47a3..b27a367 100644
28268 --- a/drivers/ata/pata_bf54x.c
28269 +++ b/drivers/ata/pata_bf54x.c
28270 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
28271 .dma_boundary = ATA_DMA_BOUNDARY,
28272 };
28273
28274 -static struct ata_port_operations bfin_pata_ops = {
28275 +static const struct ata_port_operations bfin_pata_ops = {
28276 .inherits = &ata_sff_port_ops,
28277
28278 .set_piomode = bfin_set_piomode,
28279 diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
28280 index 5acf9fa..84248be 100644
28281 --- a/drivers/ata/pata_cmd640.c
28282 +++ b/drivers/ata/pata_cmd640.c
28283 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
28284 ATA_BMDMA_SHT(DRV_NAME),
28285 };
28286
28287 -static struct ata_port_operations cmd640_port_ops = {
28288 +static const struct ata_port_operations cmd640_port_ops = {
28289 .inherits = &ata_bmdma_port_ops,
28290 /* In theory xfer_noirq is not needed once we kill the prefetcher */
28291 .sff_data_xfer = ata_sff_data_xfer_noirq,
28292 diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
28293 index ccd2694..c869c3d 100644
28294 --- a/drivers/ata/pata_cmd64x.c
28295 +++ b/drivers/ata/pata_cmd64x.c
28296 @@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
28297 .set_dmamode = cmd64x_set_dmamode,
28298 };
28299
28300 -static struct ata_port_operations cmd64x_port_ops = {
28301 +static const struct ata_port_operations cmd64x_port_ops = {
28302 .inherits = &cmd64x_base_ops,
28303 .cable_detect = ata_cable_40wire,
28304 };
28305
28306 -static struct ata_port_operations cmd646r1_port_ops = {
28307 +static const struct ata_port_operations cmd646r1_port_ops = {
28308 .inherits = &cmd64x_base_ops,
28309 .bmdma_stop = cmd646r1_bmdma_stop,
28310 .cable_detect = ata_cable_40wire,
28311 };
28312
28313 -static struct ata_port_operations cmd648_port_ops = {
28314 +static const struct ata_port_operations cmd648_port_ops = {
28315 .inherits = &cmd64x_base_ops,
28316 .bmdma_stop = cmd648_bmdma_stop,
28317 .cable_detect = cmd648_cable_detect,
28318 diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
28319 index 0df83cf..d7595b0 100644
28320 --- a/drivers/ata/pata_cs5520.c
28321 +++ b/drivers/ata/pata_cs5520.c
28322 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
28323 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28324 };
28325
28326 -static struct ata_port_operations cs5520_port_ops = {
28327 +static const struct ata_port_operations cs5520_port_ops = {
28328 .inherits = &ata_bmdma_port_ops,
28329 .qc_prep = ata_sff_dumb_qc_prep,
28330 .cable_detect = ata_cable_40wire,
28331 diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
28332 index c974b05..6d26b11 100644
28333 --- a/drivers/ata/pata_cs5530.c
28334 +++ b/drivers/ata/pata_cs5530.c
28335 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
28336 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28337 };
28338
28339 -static struct ata_port_operations cs5530_port_ops = {
28340 +static const struct ata_port_operations cs5530_port_ops = {
28341 .inherits = &ata_bmdma_port_ops,
28342
28343 .qc_prep = ata_sff_dumb_qc_prep,
28344 diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
28345 index 403f561..aacd26b 100644
28346 --- a/drivers/ata/pata_cs5535.c
28347 +++ b/drivers/ata/pata_cs5535.c
28348 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
28349 ATA_BMDMA_SHT(DRV_NAME),
28350 };
28351
28352 -static struct ata_port_operations cs5535_port_ops = {
28353 +static const struct ata_port_operations cs5535_port_ops = {
28354 .inherits = &ata_bmdma_port_ops,
28355 .cable_detect = cs5535_cable_detect,
28356 .set_piomode = cs5535_set_piomode,
28357 diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
28358 index 6da4cb4..de24a25 100644
28359 --- a/drivers/ata/pata_cs5536.c
28360 +++ b/drivers/ata/pata_cs5536.c
28361 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
28362 ATA_BMDMA_SHT(DRV_NAME),
28363 };
28364
28365 -static struct ata_port_operations cs5536_port_ops = {
28366 +static const struct ata_port_operations cs5536_port_ops = {
28367 .inherits = &ata_bmdma_port_ops,
28368 .cable_detect = cs5536_cable_detect,
28369 .set_piomode = cs5536_set_piomode,
28370 diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
28371 index 8fb040b..b16a9c9 100644
28372 --- a/drivers/ata/pata_cypress.c
28373 +++ b/drivers/ata/pata_cypress.c
28374 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
28375 ATA_BMDMA_SHT(DRV_NAME),
28376 };
28377
28378 -static struct ata_port_operations cy82c693_port_ops = {
28379 +static const struct ata_port_operations cy82c693_port_ops = {
28380 .inherits = &ata_bmdma_port_ops,
28381 .cable_detect = ata_cable_40wire,
28382 .set_piomode = cy82c693_set_piomode,
28383 diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
28384 index 2a6412f..555ee11 100644
28385 --- a/drivers/ata/pata_efar.c
28386 +++ b/drivers/ata/pata_efar.c
28387 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
28388 ATA_BMDMA_SHT(DRV_NAME),
28389 };
28390
28391 -static struct ata_port_operations efar_ops = {
28392 +static const struct ata_port_operations efar_ops = {
28393 .inherits = &ata_bmdma_port_ops,
28394 .cable_detect = efar_cable_detect,
28395 .set_piomode = efar_set_piomode,
28396 diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
28397 index b9d8836..0b92030 100644
28398 --- a/drivers/ata/pata_hpt366.c
28399 +++ b/drivers/ata/pata_hpt366.c
28400 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
28401 * Configuration for HPT366/68
28402 */
28403
28404 -static struct ata_port_operations hpt366_port_ops = {
28405 +static const struct ata_port_operations hpt366_port_ops = {
28406 .inherits = &ata_bmdma_port_ops,
28407 .cable_detect = hpt36x_cable_detect,
28408 .mode_filter = hpt366_filter,
28409 diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
28410 index 5af7f19..00c4980 100644
28411 --- a/drivers/ata/pata_hpt37x.c
28412 +++ b/drivers/ata/pata_hpt37x.c
28413 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
28414 * Configuration for HPT370
28415 */
28416
28417 -static struct ata_port_operations hpt370_port_ops = {
28418 +static const struct ata_port_operations hpt370_port_ops = {
28419 .inherits = &ata_bmdma_port_ops,
28420
28421 .bmdma_stop = hpt370_bmdma_stop,
28422 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
28423 * Configuration for HPT370A. Close to 370 but less filters
28424 */
28425
28426 -static struct ata_port_operations hpt370a_port_ops = {
28427 +static const struct ata_port_operations hpt370a_port_ops = {
28428 .inherits = &hpt370_port_ops,
28429 .mode_filter = hpt370a_filter,
28430 };
28431 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
28432 * and DMA mode setting functionality.
28433 */
28434
28435 -static struct ata_port_operations hpt372_port_ops = {
28436 +static const struct ata_port_operations hpt372_port_ops = {
28437 .inherits = &ata_bmdma_port_ops,
28438
28439 .bmdma_stop = hpt37x_bmdma_stop,
28440 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
28441 * but we have a different cable detection procedure for function 1.
28442 */
28443
28444 -static struct ata_port_operations hpt374_fn1_port_ops = {
28445 +static const struct ata_port_operations hpt374_fn1_port_ops = {
28446 .inherits = &hpt372_port_ops,
28447 .prereset = hpt374_fn1_pre_reset,
28448 };
28449 diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
28450 index 100f227..2e39382 100644
28451 --- a/drivers/ata/pata_hpt3x2n.c
28452 +++ b/drivers/ata/pata_hpt3x2n.c
28453 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
28454 * Configuration for HPT3x2n.
28455 */
28456
28457 -static struct ata_port_operations hpt3x2n_port_ops = {
28458 +static const struct ata_port_operations hpt3x2n_port_ops = {
28459 .inherits = &ata_bmdma_port_ops,
28460
28461 .bmdma_stop = hpt3x2n_bmdma_stop,
28462 diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
28463 index 7e31025..6fca8f4 100644
28464 --- a/drivers/ata/pata_hpt3x3.c
28465 +++ b/drivers/ata/pata_hpt3x3.c
28466 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
28467 ATA_BMDMA_SHT(DRV_NAME),
28468 };
28469
28470 -static struct ata_port_operations hpt3x3_port_ops = {
28471 +static const struct ata_port_operations hpt3x3_port_ops = {
28472 .inherits = &ata_bmdma_port_ops,
28473 .cable_detect = ata_cable_40wire,
28474 .set_piomode = hpt3x3_set_piomode,
28475 diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
28476 index b663b7f..9a26c2a 100644
28477 --- a/drivers/ata/pata_icside.c
28478 +++ b/drivers/ata/pata_icside.c
28479 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
28480 }
28481 }
28482
28483 -static struct ata_port_operations pata_icside_port_ops = {
28484 +static const struct ata_port_operations pata_icside_port_ops = {
28485 .inherits = &ata_sff_port_ops,
28486 /* no need to build any PRD tables for DMA */
28487 .qc_prep = ata_noop_qc_prep,
28488 diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
28489 index 4bceb88..457dfb6 100644
28490 --- a/drivers/ata/pata_isapnp.c
28491 +++ b/drivers/ata/pata_isapnp.c
28492 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
28493 ATA_PIO_SHT(DRV_NAME),
28494 };
28495
28496 -static struct ata_port_operations isapnp_port_ops = {
28497 +static const struct ata_port_operations isapnp_port_ops = {
28498 .inherits = &ata_sff_port_ops,
28499 .cable_detect = ata_cable_40wire,
28500 };
28501
28502 -static struct ata_port_operations isapnp_noalt_port_ops = {
28503 +static const struct ata_port_operations isapnp_noalt_port_ops = {
28504 .inherits = &ata_sff_port_ops,
28505 .cable_detect = ata_cable_40wire,
28506 /* No altstatus so we don't want to use the lost interrupt poll */
28507 diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
28508 index f156da8..24976e2 100644
28509 --- a/drivers/ata/pata_it8213.c
28510 +++ b/drivers/ata/pata_it8213.c
28511 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
28512 };
28513
28514
28515 -static struct ata_port_operations it8213_ops = {
28516 +static const struct ata_port_operations it8213_ops = {
28517 .inherits = &ata_bmdma_port_ops,
28518 .cable_detect = it8213_cable_detect,
28519 .set_piomode = it8213_set_piomode,
28520 diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
28521 index 188bc2f..ca9e785 100644
28522 --- a/drivers/ata/pata_it821x.c
28523 +++ b/drivers/ata/pata_it821x.c
28524 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
28525 ATA_BMDMA_SHT(DRV_NAME),
28526 };
28527
28528 -static struct ata_port_operations it821x_smart_port_ops = {
28529 +static const struct ata_port_operations it821x_smart_port_ops = {
28530 .inherits = &ata_bmdma_port_ops,
28531
28532 .check_atapi_dma= it821x_check_atapi_dma,
28533 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
28534 .port_start = it821x_port_start,
28535 };
28536
28537 -static struct ata_port_operations it821x_passthru_port_ops = {
28538 +static const struct ata_port_operations it821x_passthru_port_ops = {
28539 .inherits = &ata_bmdma_port_ops,
28540
28541 .check_atapi_dma= it821x_check_atapi_dma,
28542 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
28543 .port_start = it821x_port_start,
28544 };
28545
28546 -static struct ata_port_operations it821x_rdc_port_ops = {
28547 +static const struct ata_port_operations it821x_rdc_port_ops = {
28548 .inherits = &ata_bmdma_port_ops,
28549
28550 .check_atapi_dma= it821x_check_atapi_dma,
28551 diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
28552 index ba54b08..4b952b7 100644
28553 --- a/drivers/ata/pata_ixp4xx_cf.c
28554 +++ b/drivers/ata/pata_ixp4xx_cf.c
28555 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
28556 ATA_PIO_SHT(DRV_NAME),
28557 };
28558
28559 -static struct ata_port_operations ixp4xx_port_ops = {
28560 +static const struct ata_port_operations ixp4xx_port_ops = {
28561 .inherits = &ata_sff_port_ops,
28562 .sff_data_xfer = ixp4xx_mmio_data_xfer,
28563 .cable_detect = ata_cable_40wire,
28564 diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
28565 index 3a1474a..434b0ff 100644
28566 --- a/drivers/ata/pata_jmicron.c
28567 +++ b/drivers/ata/pata_jmicron.c
28568 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
28569 ATA_BMDMA_SHT(DRV_NAME),
28570 };
28571
28572 -static struct ata_port_operations jmicron_ops = {
28573 +static const struct ata_port_operations jmicron_ops = {
28574 .inherits = &ata_bmdma_port_ops,
28575 .prereset = jmicron_pre_reset,
28576 };
28577 diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
28578 index 6932e56..220e71d 100644
28579 --- a/drivers/ata/pata_legacy.c
28580 +++ b/drivers/ata/pata_legacy.c
28581 @@ -106,7 +106,7 @@ struct legacy_probe {
28582
28583 struct legacy_controller {
28584 const char *name;
28585 - struct ata_port_operations *ops;
28586 + const struct ata_port_operations *ops;
28587 unsigned int pio_mask;
28588 unsigned int flags;
28589 unsigned int pflags;
28590 @@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
28591 * pio_mask as well.
28592 */
28593
28594 -static struct ata_port_operations simple_port_ops = {
28595 +static const struct ata_port_operations simple_port_ops = {
28596 .inherits = &legacy_base_port_ops,
28597 .sff_data_xfer = ata_sff_data_xfer_noirq,
28598 };
28599
28600 -static struct ata_port_operations legacy_port_ops = {
28601 +static const struct ata_port_operations legacy_port_ops = {
28602 .inherits = &legacy_base_port_ops,
28603 .sff_data_xfer = ata_sff_data_xfer_noirq,
28604 .set_mode = legacy_set_mode,
28605 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
28606 return buflen;
28607 }
28608
28609 -static struct ata_port_operations pdc20230_port_ops = {
28610 +static const struct ata_port_operations pdc20230_port_ops = {
28611 .inherits = &legacy_base_port_ops,
28612 .set_piomode = pdc20230_set_piomode,
28613 .sff_data_xfer = pdc_data_xfer_vlb,
28614 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
28615 ioread8(ap->ioaddr.status_addr);
28616 }
28617
28618 -static struct ata_port_operations ht6560a_port_ops = {
28619 +static const struct ata_port_operations ht6560a_port_ops = {
28620 .inherits = &legacy_base_port_ops,
28621 .set_piomode = ht6560a_set_piomode,
28622 };
28623 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
28624 ioread8(ap->ioaddr.status_addr);
28625 }
28626
28627 -static struct ata_port_operations ht6560b_port_ops = {
28628 +static const struct ata_port_operations ht6560b_port_ops = {
28629 .inherits = &legacy_base_port_ops,
28630 .set_piomode = ht6560b_set_piomode,
28631 };
28632 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
28633 }
28634
28635
28636 -static struct ata_port_operations opti82c611a_port_ops = {
28637 +static const struct ata_port_operations opti82c611a_port_ops = {
28638 .inherits = &legacy_base_port_ops,
28639 .set_piomode = opti82c611a_set_piomode,
28640 };
28641 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
28642 return ata_sff_qc_issue(qc);
28643 }
28644
28645 -static struct ata_port_operations opti82c46x_port_ops = {
28646 +static const struct ata_port_operations opti82c46x_port_ops = {
28647 .inherits = &legacy_base_port_ops,
28648 .set_piomode = opti82c46x_set_piomode,
28649 .qc_issue = opti82c46x_qc_issue,
28650 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
28651 return 0;
28652 }
28653
28654 -static struct ata_port_operations qdi6500_port_ops = {
28655 +static const struct ata_port_operations qdi6500_port_ops = {
28656 .inherits = &legacy_base_port_ops,
28657 .set_piomode = qdi6500_set_piomode,
28658 .qc_issue = qdi_qc_issue,
28659 .sff_data_xfer = vlb32_data_xfer,
28660 };
28661
28662 -static struct ata_port_operations qdi6580_port_ops = {
28663 +static const struct ata_port_operations qdi6580_port_ops = {
28664 .inherits = &legacy_base_port_ops,
28665 .set_piomode = qdi6580_set_piomode,
28666 .sff_data_xfer = vlb32_data_xfer,
28667 };
28668
28669 -static struct ata_port_operations qdi6580dp_port_ops = {
28670 +static const struct ata_port_operations qdi6580dp_port_ops = {
28671 .inherits = &legacy_base_port_ops,
28672 .set_piomode = qdi6580dp_set_piomode,
28673 .sff_data_xfer = vlb32_data_xfer,
28674 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
28675 return 0;
28676 }
28677
28678 -static struct ata_port_operations winbond_port_ops = {
28679 +static const struct ata_port_operations winbond_port_ops = {
28680 .inherits = &legacy_base_port_ops,
28681 .set_piomode = winbond_set_piomode,
28682 .sff_data_xfer = vlb32_data_xfer,
28683 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
28684 int pio_modes = controller->pio_mask;
28685 unsigned long io = probe->port;
28686 u32 mask = (1 << probe->slot);
28687 - struct ata_port_operations *ops = controller->ops;
28688 + const struct ata_port_operations *ops = controller->ops;
28689 struct legacy_data *ld = &legacy_data[probe->slot];
28690 struct ata_host *host = NULL;
28691 struct ata_port *ap;
28692 diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
28693 index 2096fb7..4d090fc 100644
28694 --- a/drivers/ata/pata_marvell.c
28695 +++ b/drivers/ata/pata_marvell.c
28696 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
28697 ATA_BMDMA_SHT(DRV_NAME),
28698 };
28699
28700 -static struct ata_port_operations marvell_ops = {
28701 +static const struct ata_port_operations marvell_ops = {
28702 .inherits = &ata_bmdma_port_ops,
28703 .cable_detect = marvell_cable_detect,
28704 .prereset = marvell_pre_reset,
28705 diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
28706 index 99d41be..7d56aa8 100644
28707 --- a/drivers/ata/pata_mpc52xx.c
28708 +++ b/drivers/ata/pata_mpc52xx.c
28709 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
28710 ATA_PIO_SHT(DRV_NAME),
28711 };
28712
28713 -static struct ata_port_operations mpc52xx_ata_port_ops = {
28714 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
28715 .inherits = &ata_bmdma_port_ops,
28716 .sff_dev_select = mpc52xx_ata_dev_select,
28717 .set_piomode = mpc52xx_ata_set_piomode,
28718 diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
28719 index b21f002..0a27e7f 100644
28720 --- a/drivers/ata/pata_mpiix.c
28721 +++ b/drivers/ata/pata_mpiix.c
28722 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
28723 ATA_PIO_SHT(DRV_NAME),
28724 };
28725
28726 -static struct ata_port_operations mpiix_port_ops = {
28727 +static const struct ata_port_operations mpiix_port_ops = {
28728 .inherits = &ata_sff_port_ops,
28729 .qc_issue = mpiix_qc_issue,
28730 .cable_detect = ata_cable_40wire,
28731 diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
28732 index f0d52f7..89c3be3 100644
28733 --- a/drivers/ata/pata_netcell.c
28734 +++ b/drivers/ata/pata_netcell.c
28735 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
28736 ATA_BMDMA_SHT(DRV_NAME),
28737 };
28738
28739 -static struct ata_port_operations netcell_ops = {
28740 +static const struct ata_port_operations netcell_ops = {
28741 .inherits = &ata_bmdma_port_ops,
28742 .cable_detect = ata_cable_80wire,
28743 .read_id = netcell_read_id,
28744 diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
28745 index dd53a66..a3f4317 100644
28746 --- a/drivers/ata/pata_ninja32.c
28747 +++ b/drivers/ata/pata_ninja32.c
28748 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
28749 ATA_BMDMA_SHT(DRV_NAME),
28750 };
28751
28752 -static struct ata_port_operations ninja32_port_ops = {
28753 +static const struct ata_port_operations ninja32_port_ops = {
28754 .inherits = &ata_bmdma_port_ops,
28755 .sff_dev_select = ninja32_dev_select,
28756 .cable_detect = ata_cable_40wire,
28757 diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
28758 index ca53fac..9aa93ef 100644
28759 --- a/drivers/ata/pata_ns87410.c
28760 +++ b/drivers/ata/pata_ns87410.c
28761 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
28762 ATA_PIO_SHT(DRV_NAME),
28763 };
28764
28765 -static struct ata_port_operations ns87410_port_ops = {
28766 +static const struct ata_port_operations ns87410_port_ops = {
28767 .inherits = &ata_sff_port_ops,
28768 .qc_issue = ns87410_qc_issue,
28769 .cable_detect = ata_cable_40wire,
28770 diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
28771 index 773b159..55f454e 100644
28772 --- a/drivers/ata/pata_ns87415.c
28773 +++ b/drivers/ata/pata_ns87415.c
28774 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
28775 }
28776 #endif /* 87560 SuperIO Support */
28777
28778 -static struct ata_port_operations ns87415_pata_ops = {
28779 +static const struct ata_port_operations ns87415_pata_ops = {
28780 .inherits = &ata_bmdma_port_ops,
28781
28782 .check_atapi_dma = ns87415_check_atapi_dma,
28783 @@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
28784 };
28785
28786 #if defined(CONFIG_SUPERIO)
28787 -static struct ata_port_operations ns87560_pata_ops = {
28788 +static const struct ata_port_operations ns87560_pata_ops = {
28789 .inherits = &ns87415_pata_ops,
28790 .sff_tf_read = ns87560_tf_read,
28791 .sff_check_status = ns87560_check_status,
28792 diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
28793 index d6f6956..639295b 100644
28794 --- a/drivers/ata/pata_octeon_cf.c
28795 +++ b/drivers/ata/pata_octeon_cf.c
28796 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
28797 return 0;
28798 }
28799
28800 +/* cannot be const */
28801 static struct ata_port_operations octeon_cf_ops = {
28802 .inherits = &ata_sff_port_ops,
28803 .check_atapi_dma = octeon_cf_check_atapi_dma,
28804 diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
28805 index 84ac503..adee1cd 100644
28806 --- a/drivers/ata/pata_oldpiix.c
28807 +++ b/drivers/ata/pata_oldpiix.c
28808 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
28809 ATA_BMDMA_SHT(DRV_NAME),
28810 };
28811
28812 -static struct ata_port_operations oldpiix_pata_ops = {
28813 +static const struct ata_port_operations oldpiix_pata_ops = {
28814 .inherits = &ata_bmdma_port_ops,
28815 .qc_issue = oldpiix_qc_issue,
28816 .cable_detect = ata_cable_40wire,
28817 diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
28818 index 99eddda..3a4c0aa 100644
28819 --- a/drivers/ata/pata_opti.c
28820 +++ b/drivers/ata/pata_opti.c
28821 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
28822 ATA_PIO_SHT(DRV_NAME),
28823 };
28824
28825 -static struct ata_port_operations opti_port_ops = {
28826 +static const struct ata_port_operations opti_port_ops = {
28827 .inherits = &ata_sff_port_ops,
28828 .cable_detect = ata_cable_40wire,
28829 .set_piomode = opti_set_piomode,
28830 diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
28831 index 86885a4..8e9968d 100644
28832 --- a/drivers/ata/pata_optidma.c
28833 +++ b/drivers/ata/pata_optidma.c
28834 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
28835 ATA_BMDMA_SHT(DRV_NAME),
28836 };
28837
28838 -static struct ata_port_operations optidma_port_ops = {
28839 +static const struct ata_port_operations optidma_port_ops = {
28840 .inherits = &ata_bmdma_port_ops,
28841 .cable_detect = ata_cable_40wire,
28842 .set_piomode = optidma_set_pio_mode,
28843 @@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
28844 .prereset = optidma_pre_reset,
28845 };
28846
28847 -static struct ata_port_operations optiplus_port_ops = {
28848 +static const struct ata_port_operations optiplus_port_ops = {
28849 .inherits = &optidma_port_ops,
28850 .set_piomode = optiplus_set_pio_mode,
28851 .set_dmamode = optiplus_set_dma_mode,
28852 diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
28853 index 11fb4cc..1a14022 100644
28854 --- a/drivers/ata/pata_palmld.c
28855 +++ b/drivers/ata/pata_palmld.c
28856 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
28857 ATA_PIO_SHT(DRV_NAME),
28858 };
28859
28860 -static struct ata_port_operations palmld_port_ops = {
28861 +static const struct ata_port_operations palmld_port_ops = {
28862 .inherits = &ata_sff_port_ops,
28863 .sff_data_xfer = ata_sff_data_xfer_noirq,
28864 .cable_detect = ata_cable_40wire,
28865 diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
28866 index dc99e26..7f4b1e4 100644
28867 --- a/drivers/ata/pata_pcmcia.c
28868 +++ b/drivers/ata/pata_pcmcia.c
28869 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
28870 ATA_PIO_SHT(DRV_NAME),
28871 };
28872
28873 -static struct ata_port_operations pcmcia_port_ops = {
28874 +static const struct ata_port_operations pcmcia_port_ops = {
28875 .inherits = &ata_sff_port_ops,
28876 .sff_data_xfer = ata_sff_data_xfer_noirq,
28877 .cable_detect = ata_cable_40wire,
28878 .set_mode = pcmcia_set_mode,
28879 };
28880
28881 -static struct ata_port_operations pcmcia_8bit_port_ops = {
28882 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
28883 .inherits = &ata_sff_port_ops,
28884 .sff_data_xfer = ata_data_xfer_8bit,
28885 .cable_detect = ata_cable_40wire,
28886 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
28887 unsigned long io_base, ctl_base;
28888 void __iomem *io_addr, *ctl_addr;
28889 int n_ports = 1;
28890 - struct ata_port_operations *ops = &pcmcia_port_ops;
28891 + const struct ata_port_operations *ops = &pcmcia_port_ops;
28892
28893 info = kzalloc(sizeof(*info), GFP_KERNEL);
28894 if (info == NULL)
28895 diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
28896 index ca5cad0..3a1f125 100644
28897 --- a/drivers/ata/pata_pdc2027x.c
28898 +++ b/drivers/ata/pata_pdc2027x.c
28899 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
28900 ATA_BMDMA_SHT(DRV_NAME),
28901 };
28902
28903 -static struct ata_port_operations pdc2027x_pata100_ops = {
28904 +static const struct ata_port_operations pdc2027x_pata100_ops = {
28905 .inherits = &ata_bmdma_port_ops,
28906 .check_atapi_dma = pdc2027x_check_atapi_dma,
28907 .cable_detect = pdc2027x_cable_detect,
28908 .prereset = pdc2027x_prereset,
28909 };
28910
28911 -static struct ata_port_operations pdc2027x_pata133_ops = {
28912 +static const struct ata_port_operations pdc2027x_pata133_ops = {
28913 .inherits = &pdc2027x_pata100_ops,
28914 .mode_filter = pdc2027x_mode_filter,
28915 .set_piomode = pdc2027x_set_piomode,
28916 diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
28917 index 2911120..4bf62aa 100644
28918 --- a/drivers/ata/pata_pdc202xx_old.c
28919 +++ b/drivers/ata/pata_pdc202xx_old.c
28920 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
28921 ATA_BMDMA_SHT(DRV_NAME),
28922 };
28923
28924 -static struct ata_port_operations pdc2024x_port_ops = {
28925 +static const struct ata_port_operations pdc2024x_port_ops = {
28926 .inherits = &ata_bmdma_port_ops,
28927
28928 .cable_detect = ata_cable_40wire,
28929 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
28930 .sff_exec_command = pdc202xx_exec_command,
28931 };
28932
28933 -static struct ata_port_operations pdc2026x_port_ops = {
28934 +static const struct ata_port_operations pdc2026x_port_ops = {
28935 .inherits = &pdc2024x_port_ops,
28936
28937 .check_atapi_dma = pdc2026x_check_atapi_dma,
28938 diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
28939 index 3f6ebc6..a18c358 100644
28940 --- a/drivers/ata/pata_platform.c
28941 +++ b/drivers/ata/pata_platform.c
28942 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
28943 ATA_PIO_SHT(DRV_NAME),
28944 };
28945
28946 -static struct ata_port_operations pata_platform_port_ops = {
28947 +static const struct ata_port_operations pata_platform_port_ops = {
28948 .inherits = &ata_sff_port_ops,
28949 .sff_data_xfer = ata_sff_data_xfer_noirq,
28950 .cable_detect = ata_cable_unknown,
28951 diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
28952 index 45879dc..165a9f9 100644
28953 --- a/drivers/ata/pata_qdi.c
28954 +++ b/drivers/ata/pata_qdi.c
28955 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
28956 ATA_PIO_SHT(DRV_NAME),
28957 };
28958
28959 -static struct ata_port_operations qdi6500_port_ops = {
28960 +static const struct ata_port_operations qdi6500_port_ops = {
28961 .inherits = &ata_sff_port_ops,
28962 .qc_issue = qdi_qc_issue,
28963 .sff_data_xfer = qdi_data_xfer,
28964 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
28965 .set_piomode = qdi6500_set_piomode,
28966 };
28967
28968 -static struct ata_port_operations qdi6580_port_ops = {
28969 +static const struct ata_port_operations qdi6580_port_ops = {
28970 .inherits = &qdi6500_port_ops,
28971 .set_piomode = qdi6580_set_piomode,
28972 };
28973 diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
28974 index 4401b33..716c5cc 100644
28975 --- a/drivers/ata/pata_radisys.c
28976 +++ b/drivers/ata/pata_radisys.c
28977 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
28978 ATA_BMDMA_SHT(DRV_NAME),
28979 };
28980
28981 -static struct ata_port_operations radisys_pata_ops = {
28982 +static const struct ata_port_operations radisys_pata_ops = {
28983 .inherits = &ata_bmdma_port_ops,
28984 .qc_issue = radisys_qc_issue,
28985 .cable_detect = ata_cable_unknown,
28986 diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
28987 index 45f1e10..fab6bca 100644
28988 --- a/drivers/ata/pata_rb532_cf.c
28989 +++ b/drivers/ata/pata_rb532_cf.c
28990 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
28991 return IRQ_HANDLED;
28992 }
28993
28994 -static struct ata_port_operations rb532_pata_port_ops = {
28995 +static const struct ata_port_operations rb532_pata_port_ops = {
28996 .inherits = &ata_sff_port_ops,
28997 .sff_data_xfer = ata_sff_data_xfer32,
28998 };
28999 diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
29000 index c843a1e..b5853c3 100644
29001 --- a/drivers/ata/pata_rdc.c
29002 +++ b/drivers/ata/pata_rdc.c
29003 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
29004 pci_write_config_byte(dev, 0x48, udma_enable);
29005 }
29006
29007 -static struct ata_port_operations rdc_pata_ops = {
29008 +static const struct ata_port_operations rdc_pata_ops = {
29009 .inherits = &ata_bmdma32_port_ops,
29010 .cable_detect = rdc_pata_cable_detect,
29011 .set_piomode = rdc_set_piomode,
29012 diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
29013 index a5e4dfe..080c8c9 100644
29014 --- a/drivers/ata/pata_rz1000.c
29015 +++ b/drivers/ata/pata_rz1000.c
29016 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
29017 ATA_PIO_SHT(DRV_NAME),
29018 };
29019
29020 -static struct ata_port_operations rz1000_port_ops = {
29021 +static const struct ata_port_operations rz1000_port_ops = {
29022 .inherits = &ata_sff_port_ops,
29023 .cable_detect = ata_cable_40wire,
29024 .set_mode = rz1000_set_mode,
29025 diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
29026 index 3bbed83..e309daf 100644
29027 --- a/drivers/ata/pata_sc1200.c
29028 +++ b/drivers/ata/pata_sc1200.c
29029 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
29030 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29031 };
29032
29033 -static struct ata_port_operations sc1200_port_ops = {
29034 +static const struct ata_port_operations sc1200_port_ops = {
29035 .inherits = &ata_bmdma_port_ops,
29036 .qc_prep = ata_sff_dumb_qc_prep,
29037 .qc_issue = sc1200_qc_issue,
29038 diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
29039 index 4257d6b..4c1d9d5 100644
29040 --- a/drivers/ata/pata_scc.c
29041 +++ b/drivers/ata/pata_scc.c
29042 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
29043 ATA_BMDMA_SHT(DRV_NAME),
29044 };
29045
29046 -static struct ata_port_operations scc_pata_ops = {
29047 +static const struct ata_port_operations scc_pata_ops = {
29048 .inherits = &ata_bmdma_port_ops,
29049
29050 .set_piomode = scc_set_piomode,
29051 diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
29052 index 99cceb4..e2e0a87 100644
29053 --- a/drivers/ata/pata_sch.c
29054 +++ b/drivers/ata/pata_sch.c
29055 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
29056 ATA_BMDMA_SHT(DRV_NAME),
29057 };
29058
29059 -static struct ata_port_operations sch_pata_ops = {
29060 +static const struct ata_port_operations sch_pata_ops = {
29061 .inherits = &ata_bmdma_port_ops,
29062 .cable_detect = ata_cable_unknown,
29063 .set_piomode = sch_set_piomode,
29064 diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
29065 index beaed12..39969f1 100644
29066 --- a/drivers/ata/pata_serverworks.c
29067 +++ b/drivers/ata/pata_serverworks.c
29068 @@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
29069 ATA_BMDMA_SHT(DRV_NAME),
29070 };
29071
29072 -static struct ata_port_operations serverworks_osb4_port_ops = {
29073 +static const struct ata_port_operations serverworks_osb4_port_ops = {
29074 .inherits = &ata_bmdma_port_ops,
29075 .cable_detect = serverworks_cable_detect,
29076 .mode_filter = serverworks_osb4_filter,
29077 @@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
29078 .set_dmamode = serverworks_set_dmamode,
29079 };
29080
29081 -static struct ata_port_operations serverworks_csb_port_ops = {
29082 +static const struct ata_port_operations serverworks_csb_port_ops = {
29083 .inherits = &serverworks_osb4_port_ops,
29084 .mode_filter = serverworks_csb_filter,
29085 };
29086 diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
29087 index a2ace48..0463b44 100644
29088 --- a/drivers/ata/pata_sil680.c
29089 +++ b/drivers/ata/pata_sil680.c
29090 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
29091 ATA_BMDMA_SHT(DRV_NAME),
29092 };
29093
29094 -static struct ata_port_operations sil680_port_ops = {
29095 +static const struct ata_port_operations sil680_port_ops = {
29096 .inherits = &ata_bmdma32_port_ops,
29097 .cable_detect = sil680_cable_detect,
29098 .set_piomode = sil680_set_piomode,
29099 diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
29100 index 488e77b..b3724d5 100644
29101 --- a/drivers/ata/pata_sis.c
29102 +++ b/drivers/ata/pata_sis.c
29103 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
29104 ATA_BMDMA_SHT(DRV_NAME),
29105 };
29106
29107 -static struct ata_port_operations sis_133_for_sata_ops = {
29108 +static const struct ata_port_operations sis_133_for_sata_ops = {
29109 .inherits = &ata_bmdma_port_ops,
29110 .set_piomode = sis_133_set_piomode,
29111 .set_dmamode = sis_133_set_dmamode,
29112 .cable_detect = sis_133_cable_detect,
29113 };
29114
29115 -static struct ata_port_operations sis_base_ops = {
29116 +static const struct ata_port_operations sis_base_ops = {
29117 .inherits = &ata_bmdma_port_ops,
29118 .prereset = sis_pre_reset,
29119 };
29120
29121 -static struct ata_port_operations sis_133_ops = {
29122 +static const struct ata_port_operations sis_133_ops = {
29123 .inherits = &sis_base_ops,
29124 .set_piomode = sis_133_set_piomode,
29125 .set_dmamode = sis_133_set_dmamode,
29126 .cable_detect = sis_133_cable_detect,
29127 };
29128
29129 -static struct ata_port_operations sis_133_early_ops = {
29130 +static const struct ata_port_operations sis_133_early_ops = {
29131 .inherits = &sis_base_ops,
29132 .set_piomode = sis_100_set_piomode,
29133 .set_dmamode = sis_133_early_set_dmamode,
29134 .cable_detect = sis_66_cable_detect,
29135 };
29136
29137 -static struct ata_port_operations sis_100_ops = {
29138 +static const struct ata_port_operations sis_100_ops = {
29139 .inherits = &sis_base_ops,
29140 .set_piomode = sis_100_set_piomode,
29141 .set_dmamode = sis_100_set_dmamode,
29142 .cable_detect = sis_66_cable_detect,
29143 };
29144
29145 -static struct ata_port_operations sis_66_ops = {
29146 +static const struct ata_port_operations sis_66_ops = {
29147 .inherits = &sis_base_ops,
29148 .set_piomode = sis_old_set_piomode,
29149 .set_dmamode = sis_66_set_dmamode,
29150 .cable_detect = sis_66_cable_detect,
29151 };
29152
29153 -static struct ata_port_operations sis_old_ops = {
29154 +static const struct ata_port_operations sis_old_ops = {
29155 .inherits = &sis_base_ops,
29156 .set_piomode = sis_old_set_piomode,
29157 .set_dmamode = sis_old_set_dmamode,
29158 diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
29159 index 29f733c..43e9ca0 100644
29160 --- a/drivers/ata/pata_sl82c105.c
29161 +++ b/drivers/ata/pata_sl82c105.c
29162 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
29163 ATA_BMDMA_SHT(DRV_NAME),
29164 };
29165
29166 -static struct ata_port_operations sl82c105_port_ops = {
29167 +static const struct ata_port_operations sl82c105_port_ops = {
29168 .inherits = &ata_bmdma_port_ops,
29169 .qc_defer = sl82c105_qc_defer,
29170 .bmdma_start = sl82c105_bmdma_start,
29171 diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
29172 index f1f13ff..df39e99 100644
29173 --- a/drivers/ata/pata_triflex.c
29174 +++ b/drivers/ata/pata_triflex.c
29175 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
29176 ATA_BMDMA_SHT(DRV_NAME),
29177 };
29178
29179 -static struct ata_port_operations triflex_port_ops = {
29180 +static const struct ata_port_operations triflex_port_ops = {
29181 .inherits = &ata_bmdma_port_ops,
29182 .bmdma_start = triflex_bmdma_start,
29183 .bmdma_stop = triflex_bmdma_stop,
29184 diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
29185 index 1d73b8d..98a4b29 100644
29186 --- a/drivers/ata/pata_via.c
29187 +++ b/drivers/ata/pata_via.c
29188 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
29189 ATA_BMDMA_SHT(DRV_NAME),
29190 };
29191
29192 -static struct ata_port_operations via_port_ops = {
29193 +static const struct ata_port_operations via_port_ops = {
29194 .inherits = &ata_bmdma_port_ops,
29195 .cable_detect = via_cable_detect,
29196 .set_piomode = via_set_piomode,
29197 @@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
29198 .port_start = via_port_start,
29199 };
29200
29201 -static struct ata_port_operations via_port_ops_noirq = {
29202 +static const struct ata_port_operations via_port_ops_noirq = {
29203 .inherits = &via_port_ops,
29204 .sff_data_xfer = ata_sff_data_xfer_noirq,
29205 };
29206 diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
29207 index 6d8619b..ad511c4 100644
29208 --- a/drivers/ata/pata_winbond.c
29209 +++ b/drivers/ata/pata_winbond.c
29210 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
29211 ATA_PIO_SHT(DRV_NAME),
29212 };
29213
29214 -static struct ata_port_operations winbond_port_ops = {
29215 +static const struct ata_port_operations winbond_port_ops = {
29216 .inherits = &ata_sff_port_ops,
29217 .sff_data_xfer = winbond_data_xfer,
29218 .cable_detect = ata_cable_40wire,
29219 diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
29220 index 6c65b07..f996ec7 100644
29221 --- a/drivers/ata/pdc_adma.c
29222 +++ b/drivers/ata/pdc_adma.c
29223 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
29224 .dma_boundary = ADMA_DMA_BOUNDARY,
29225 };
29226
29227 -static struct ata_port_operations adma_ata_ops = {
29228 +static const struct ata_port_operations adma_ata_ops = {
29229 .inherits = &ata_sff_port_ops,
29230
29231 .lost_interrupt = ATA_OP_NULL,
29232 diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
29233 index 172b57e..c49bc1e 100644
29234 --- a/drivers/ata/sata_fsl.c
29235 +++ b/drivers/ata/sata_fsl.c
29236 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
29237 .dma_boundary = ATA_DMA_BOUNDARY,
29238 };
29239
29240 -static struct ata_port_operations sata_fsl_ops = {
29241 +static const struct ata_port_operations sata_fsl_ops = {
29242 .inherits = &sata_pmp_port_ops,
29243
29244 .qc_defer = ata_std_qc_defer,
29245 diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
29246 index 4406902..60603ef 100644
29247 --- a/drivers/ata/sata_inic162x.c
29248 +++ b/drivers/ata/sata_inic162x.c
29249 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
29250 return 0;
29251 }
29252
29253 -static struct ata_port_operations inic_port_ops = {
29254 +static const struct ata_port_operations inic_port_ops = {
29255 .inherits = &sata_port_ops,
29256
29257 .check_atapi_dma = inic_check_atapi_dma,
29258 diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
29259 index cf41126..8107be6 100644
29260 --- a/drivers/ata/sata_mv.c
29261 +++ b/drivers/ata/sata_mv.c
29262 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
29263 .dma_boundary = MV_DMA_BOUNDARY,
29264 };
29265
29266 -static struct ata_port_operations mv5_ops = {
29267 +static const struct ata_port_operations mv5_ops = {
29268 .inherits = &ata_sff_port_ops,
29269
29270 .lost_interrupt = ATA_OP_NULL,
29271 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
29272 .port_stop = mv_port_stop,
29273 };
29274
29275 -static struct ata_port_operations mv6_ops = {
29276 +static const struct ata_port_operations mv6_ops = {
29277 .inherits = &mv5_ops,
29278 .dev_config = mv6_dev_config,
29279 .scr_read = mv_scr_read,
29280 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
29281 .bmdma_status = mv_bmdma_status,
29282 };
29283
29284 -static struct ata_port_operations mv_iie_ops = {
29285 +static const struct ata_port_operations mv_iie_ops = {
29286 .inherits = &mv6_ops,
29287 .dev_config = ATA_OP_NULL,
29288 .qc_prep = mv_qc_prep_iie,
29289 diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
29290 index ae2297c..d5c9c33 100644
29291 --- a/drivers/ata/sata_nv.c
29292 +++ b/drivers/ata/sata_nv.c
29293 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
29294 * cases. Define nv_hardreset() which only kicks in for post-boot
29295 * probing and use it for all variants.
29296 */
29297 -static struct ata_port_operations nv_generic_ops = {
29298 +static const struct ata_port_operations nv_generic_ops = {
29299 .inherits = &ata_bmdma_port_ops,
29300 .lost_interrupt = ATA_OP_NULL,
29301 .scr_read = nv_scr_read,
29302 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
29303 .hardreset = nv_hardreset,
29304 };
29305
29306 -static struct ata_port_operations nv_nf2_ops = {
29307 +static const struct ata_port_operations nv_nf2_ops = {
29308 .inherits = &nv_generic_ops,
29309 .freeze = nv_nf2_freeze,
29310 .thaw = nv_nf2_thaw,
29311 };
29312
29313 -static struct ata_port_operations nv_ck804_ops = {
29314 +static const struct ata_port_operations nv_ck804_ops = {
29315 .inherits = &nv_generic_ops,
29316 .freeze = nv_ck804_freeze,
29317 .thaw = nv_ck804_thaw,
29318 .host_stop = nv_ck804_host_stop,
29319 };
29320
29321 -static struct ata_port_operations nv_adma_ops = {
29322 +static const struct ata_port_operations nv_adma_ops = {
29323 .inherits = &nv_ck804_ops,
29324
29325 .check_atapi_dma = nv_adma_check_atapi_dma,
29326 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
29327 .host_stop = nv_adma_host_stop,
29328 };
29329
29330 -static struct ata_port_operations nv_swncq_ops = {
29331 +static const struct ata_port_operations nv_swncq_ops = {
29332 .inherits = &nv_generic_ops,
29333
29334 .qc_defer = ata_std_qc_defer,
29335 diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
29336 index 07d8d00..6cc70bb 100644
29337 --- a/drivers/ata/sata_promise.c
29338 +++ b/drivers/ata/sata_promise.c
29339 @@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
29340 .error_handler = pdc_error_handler,
29341 };
29342
29343 -static struct ata_port_operations pdc_sata_ops = {
29344 +static const struct ata_port_operations pdc_sata_ops = {
29345 .inherits = &pdc_common_ops,
29346 .cable_detect = pdc_sata_cable_detect,
29347 .freeze = pdc_sata_freeze,
29348 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
29349
29350 /* First-generation chips need a more restrictive ->check_atapi_dma op,
29351 and ->freeze/thaw that ignore the hotplug controls. */
29352 -static struct ata_port_operations pdc_old_sata_ops = {
29353 +static const struct ata_port_operations pdc_old_sata_ops = {
29354 .inherits = &pdc_sata_ops,
29355 .freeze = pdc_freeze,
29356 .thaw = pdc_thaw,
29357 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
29358 };
29359
29360 -static struct ata_port_operations pdc_pata_ops = {
29361 +static const struct ata_port_operations pdc_pata_ops = {
29362 .inherits = &pdc_common_ops,
29363 .cable_detect = pdc_pata_cable_detect,
29364 .freeze = pdc_freeze,
29365 diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
29366 index 326c0cf..36ecebe 100644
29367 --- a/drivers/ata/sata_qstor.c
29368 +++ b/drivers/ata/sata_qstor.c
29369 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
29370 .dma_boundary = QS_DMA_BOUNDARY,
29371 };
29372
29373 -static struct ata_port_operations qs_ata_ops = {
29374 +static const struct ata_port_operations qs_ata_ops = {
29375 .inherits = &ata_sff_port_ops,
29376
29377 .check_atapi_dma = qs_check_atapi_dma,
29378 diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
29379 index 3cb69d5..0871d3c 100644
29380 --- a/drivers/ata/sata_sil.c
29381 +++ b/drivers/ata/sata_sil.c
29382 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
29383 .sg_tablesize = ATA_MAX_PRD
29384 };
29385
29386 -static struct ata_port_operations sil_ops = {
29387 +static const struct ata_port_operations sil_ops = {
29388 .inherits = &ata_bmdma32_port_ops,
29389 .dev_config = sil_dev_config,
29390 .set_mode = sil_set_mode,
29391 diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
29392 index e6946fc..eddb794 100644
29393 --- a/drivers/ata/sata_sil24.c
29394 +++ b/drivers/ata/sata_sil24.c
29395 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
29396 .dma_boundary = ATA_DMA_BOUNDARY,
29397 };
29398
29399 -static struct ata_port_operations sil24_ops = {
29400 +static const struct ata_port_operations sil24_ops = {
29401 .inherits = &sata_pmp_port_ops,
29402
29403 .qc_defer = sil24_qc_defer,
29404 diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
29405 index f8a91bf..9cb06b6 100644
29406 --- a/drivers/ata/sata_sis.c
29407 +++ b/drivers/ata/sata_sis.c
29408 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
29409 ATA_BMDMA_SHT(DRV_NAME),
29410 };
29411
29412 -static struct ata_port_operations sis_ops = {
29413 +static const struct ata_port_operations sis_ops = {
29414 .inherits = &ata_bmdma_port_ops,
29415 .scr_read = sis_scr_read,
29416 .scr_write = sis_scr_write,
29417 diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
29418 index 7257f2d..d04c6f5 100644
29419 --- a/drivers/ata/sata_svw.c
29420 +++ b/drivers/ata/sata_svw.c
29421 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
29422 };
29423
29424
29425 -static struct ata_port_operations k2_sata_ops = {
29426 +static const struct ata_port_operations k2_sata_ops = {
29427 .inherits = &ata_bmdma_port_ops,
29428 .sff_tf_load = k2_sata_tf_load,
29429 .sff_tf_read = k2_sata_tf_read,
29430 diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
29431 index bbcf970..cd0df0d 100644
29432 --- a/drivers/ata/sata_sx4.c
29433 +++ b/drivers/ata/sata_sx4.c
29434 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
29435 };
29436
29437 /* TODO: inherit from base port_ops after converting to new EH */
29438 -static struct ata_port_operations pdc_20621_ops = {
29439 +static const struct ata_port_operations pdc_20621_ops = {
29440 .inherits = &ata_sff_port_ops,
29441
29442 .check_atapi_dma = pdc_check_atapi_dma,
29443 diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
29444 index e5bff47..089d859 100644
29445 --- a/drivers/ata/sata_uli.c
29446 +++ b/drivers/ata/sata_uli.c
29447 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
29448 ATA_BMDMA_SHT(DRV_NAME),
29449 };
29450
29451 -static struct ata_port_operations uli_ops = {
29452 +static const struct ata_port_operations uli_ops = {
29453 .inherits = &ata_bmdma_port_ops,
29454 .scr_read = uli_scr_read,
29455 .scr_write = uli_scr_write,
29456 diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
29457 index f5dcca7..77b94eb 100644
29458 --- a/drivers/ata/sata_via.c
29459 +++ b/drivers/ata/sata_via.c
29460 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
29461 ATA_BMDMA_SHT(DRV_NAME),
29462 };
29463
29464 -static struct ata_port_operations svia_base_ops = {
29465 +static const struct ata_port_operations svia_base_ops = {
29466 .inherits = &ata_bmdma_port_ops,
29467 .sff_tf_load = svia_tf_load,
29468 };
29469
29470 -static struct ata_port_operations vt6420_sata_ops = {
29471 +static const struct ata_port_operations vt6420_sata_ops = {
29472 .inherits = &svia_base_ops,
29473 .freeze = svia_noop_freeze,
29474 .prereset = vt6420_prereset,
29475 .bmdma_start = vt6420_bmdma_start,
29476 };
29477
29478 -static struct ata_port_operations vt6421_pata_ops = {
29479 +static const struct ata_port_operations vt6421_pata_ops = {
29480 .inherits = &svia_base_ops,
29481 .cable_detect = vt6421_pata_cable_detect,
29482 .set_piomode = vt6421_set_pio_mode,
29483 .set_dmamode = vt6421_set_dma_mode,
29484 };
29485
29486 -static struct ata_port_operations vt6421_sata_ops = {
29487 +static const struct ata_port_operations vt6421_sata_ops = {
29488 .inherits = &svia_base_ops,
29489 .scr_read = svia_scr_read,
29490 .scr_write = svia_scr_write,
29491 };
29492
29493 -static struct ata_port_operations vt8251_ops = {
29494 +static const struct ata_port_operations vt8251_ops = {
29495 .inherits = &svia_base_ops,
29496 .hardreset = sata_std_hardreset,
29497 .scr_read = vt8251_scr_read,
29498 diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
29499 index 8b2a278..51e65d3 100644
29500 --- a/drivers/ata/sata_vsc.c
29501 +++ b/drivers/ata/sata_vsc.c
29502 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
29503 };
29504
29505
29506 -static struct ata_port_operations vsc_sata_ops = {
29507 +static const struct ata_port_operations vsc_sata_ops = {
29508 .inherits = &ata_bmdma_port_ops,
29509 /* The IRQ handling is not quite standard SFF behaviour so we
29510 cannot use the default lost interrupt handler */
29511 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
29512 index 5effec6..7e4019a 100644
29513 --- a/drivers/atm/adummy.c
29514 +++ b/drivers/atm/adummy.c
29515 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
29516 vcc->pop(vcc, skb);
29517 else
29518 dev_kfree_skb_any(skb);
29519 - atomic_inc(&vcc->stats->tx);
29520 + atomic_inc_unchecked(&vcc->stats->tx);
29521
29522 return 0;
29523 }
29524 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
29525 index 66e1813..26a27c6 100644
29526 --- a/drivers/atm/ambassador.c
29527 +++ b/drivers/atm/ambassador.c
29528 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
29529 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
29530
29531 // VC layer stats
29532 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29533 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29534
29535 // free the descriptor
29536 kfree (tx_descr);
29537 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29538 dump_skb ("<<<", vc, skb);
29539
29540 // VC layer stats
29541 - atomic_inc(&atm_vcc->stats->rx);
29542 + atomic_inc_unchecked(&atm_vcc->stats->rx);
29543 __net_timestamp(skb);
29544 // end of our responsability
29545 atm_vcc->push (atm_vcc, skb);
29546 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29547 } else {
29548 PRINTK (KERN_INFO, "dropped over-size frame");
29549 // should we count this?
29550 - atomic_inc(&atm_vcc->stats->rx_drop);
29551 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29552 }
29553
29554 } else {
29555 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
29556 }
29557
29558 if (check_area (skb->data, skb->len)) {
29559 - atomic_inc(&atm_vcc->stats->tx_err);
29560 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
29561 return -ENOMEM; // ?
29562 }
29563
29564 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
29565 index 02ad83d..6daffeb 100644
29566 --- a/drivers/atm/atmtcp.c
29567 +++ b/drivers/atm/atmtcp.c
29568 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29569 if (vcc->pop) vcc->pop(vcc,skb);
29570 else dev_kfree_skb(skb);
29571 if (dev_data) return 0;
29572 - atomic_inc(&vcc->stats->tx_err);
29573 + atomic_inc_unchecked(&vcc->stats->tx_err);
29574 return -ENOLINK;
29575 }
29576 size = skb->len+sizeof(struct atmtcp_hdr);
29577 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29578 if (!new_skb) {
29579 if (vcc->pop) vcc->pop(vcc,skb);
29580 else dev_kfree_skb(skb);
29581 - atomic_inc(&vcc->stats->tx_err);
29582 + atomic_inc_unchecked(&vcc->stats->tx_err);
29583 return -ENOBUFS;
29584 }
29585 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
29586 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29587 if (vcc->pop) vcc->pop(vcc,skb);
29588 else dev_kfree_skb(skb);
29589 out_vcc->push(out_vcc,new_skb);
29590 - atomic_inc(&vcc->stats->tx);
29591 - atomic_inc(&out_vcc->stats->rx);
29592 + atomic_inc_unchecked(&vcc->stats->tx);
29593 + atomic_inc_unchecked(&out_vcc->stats->rx);
29594 return 0;
29595 }
29596
29597 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29598 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
29599 read_unlock(&vcc_sklist_lock);
29600 if (!out_vcc) {
29601 - atomic_inc(&vcc->stats->tx_err);
29602 + atomic_inc_unchecked(&vcc->stats->tx_err);
29603 goto done;
29604 }
29605 skb_pull(skb,sizeof(struct atmtcp_hdr));
29606 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29607 __net_timestamp(new_skb);
29608 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
29609 out_vcc->push(out_vcc,new_skb);
29610 - atomic_inc(&vcc->stats->tx);
29611 - atomic_inc(&out_vcc->stats->rx);
29612 + atomic_inc_unchecked(&vcc->stats->tx);
29613 + atomic_inc_unchecked(&out_vcc->stats->rx);
29614 done:
29615 if (vcc->pop) vcc->pop(vcc,skb);
29616 else dev_kfree_skb(skb);
29617 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
29618 index 0c30261..3da356e 100644
29619 --- a/drivers/atm/eni.c
29620 +++ b/drivers/atm/eni.c
29621 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
29622 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
29623 vcc->dev->number);
29624 length = 0;
29625 - atomic_inc(&vcc->stats->rx_err);
29626 + atomic_inc_unchecked(&vcc->stats->rx_err);
29627 }
29628 else {
29629 length = ATM_CELL_SIZE-1; /* no HEC */
29630 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29631 size);
29632 }
29633 eff = length = 0;
29634 - atomic_inc(&vcc->stats->rx_err);
29635 + atomic_inc_unchecked(&vcc->stats->rx_err);
29636 }
29637 else {
29638 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
29639 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29640 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
29641 vcc->dev->number,vcc->vci,length,size << 2,descr);
29642 length = eff = 0;
29643 - atomic_inc(&vcc->stats->rx_err);
29644 + atomic_inc_unchecked(&vcc->stats->rx_err);
29645 }
29646 }
29647 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
29648 @@ -770,7 +770,7 @@ rx_dequeued++;
29649 vcc->push(vcc,skb);
29650 pushed++;
29651 }
29652 - atomic_inc(&vcc->stats->rx);
29653 + atomic_inc_unchecked(&vcc->stats->rx);
29654 }
29655 wake_up(&eni_dev->rx_wait);
29656 }
29657 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
29658 PCI_DMA_TODEVICE);
29659 if (vcc->pop) vcc->pop(vcc,skb);
29660 else dev_kfree_skb_irq(skb);
29661 - atomic_inc(&vcc->stats->tx);
29662 + atomic_inc_unchecked(&vcc->stats->tx);
29663 wake_up(&eni_dev->tx_wait);
29664 dma_complete++;
29665 }
29666 @@ -1570,7 +1570,7 @@ tx_complete++;
29667 /*--------------------------------- entries ---------------------------------*/
29668
29669
29670 -static const char *media_name[] __devinitdata = {
29671 +static const char *media_name[] __devinitconst = {
29672 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
29673 "UTP", "05?", "06?", "07?", /* 4- 7 */
29674 "TAXI","09?", "10?", "11?", /* 8-11 */
29675 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
29676 index cd5049a..a51209f 100644
29677 --- a/drivers/atm/firestream.c
29678 +++ b/drivers/atm/firestream.c
29679 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
29680 }
29681 }
29682
29683 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29684 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29685
29686 fs_dprintk (FS_DEBUG_TXMEM, "i");
29687 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
29688 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29689 #endif
29690 skb_put (skb, qe->p1 & 0xffff);
29691 ATM_SKB(skb)->vcc = atm_vcc;
29692 - atomic_inc(&atm_vcc->stats->rx);
29693 + atomic_inc_unchecked(&atm_vcc->stats->rx);
29694 __net_timestamp(skb);
29695 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
29696 atm_vcc->push (atm_vcc, skb);
29697 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29698 kfree (pe);
29699 }
29700 if (atm_vcc)
29701 - atomic_inc(&atm_vcc->stats->rx_drop);
29702 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29703 break;
29704 case 0x1f: /* Reassembly abort: no buffers. */
29705 /* Silently increment error counter. */
29706 if (atm_vcc)
29707 - atomic_inc(&atm_vcc->stats->rx_drop);
29708 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29709 break;
29710 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
29711 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
29712 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
29713 index f766cc4..a34002e 100644
29714 --- a/drivers/atm/fore200e.c
29715 +++ b/drivers/atm/fore200e.c
29716 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
29717 #endif
29718 /* check error condition */
29719 if (*entry->status & STATUS_ERROR)
29720 - atomic_inc(&vcc->stats->tx_err);
29721 + atomic_inc_unchecked(&vcc->stats->tx_err);
29722 else
29723 - atomic_inc(&vcc->stats->tx);
29724 + atomic_inc_unchecked(&vcc->stats->tx);
29725 }
29726 }
29727
29728 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29729 if (skb == NULL) {
29730 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
29731
29732 - atomic_inc(&vcc->stats->rx_drop);
29733 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29734 return -ENOMEM;
29735 }
29736
29737 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29738
29739 dev_kfree_skb_any(skb);
29740
29741 - atomic_inc(&vcc->stats->rx_drop);
29742 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29743 return -ENOMEM;
29744 }
29745
29746 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29747
29748 vcc->push(vcc, skb);
29749 - atomic_inc(&vcc->stats->rx);
29750 + atomic_inc_unchecked(&vcc->stats->rx);
29751
29752 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29753
29754 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
29755 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
29756 fore200e->atm_dev->number,
29757 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
29758 - atomic_inc(&vcc->stats->rx_err);
29759 + atomic_inc_unchecked(&vcc->stats->rx_err);
29760 }
29761 }
29762
29763 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
29764 goto retry_here;
29765 }
29766
29767 - atomic_inc(&vcc->stats->tx_err);
29768 + atomic_inc_unchecked(&vcc->stats->tx_err);
29769
29770 fore200e->tx_sat++;
29771 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
29772 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
29773 index 7066703..2b130de 100644
29774 --- a/drivers/atm/he.c
29775 +++ b/drivers/atm/he.c
29776 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29777
29778 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
29779 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
29780 - atomic_inc(&vcc->stats->rx_drop);
29781 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29782 goto return_host_buffers;
29783 }
29784
29785 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29786 RBRQ_LEN_ERR(he_dev->rbrq_head)
29787 ? "LEN_ERR" : "",
29788 vcc->vpi, vcc->vci);
29789 - atomic_inc(&vcc->stats->rx_err);
29790 + atomic_inc_unchecked(&vcc->stats->rx_err);
29791 goto return_host_buffers;
29792 }
29793
29794 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29795 vcc->push(vcc, skb);
29796 spin_lock(&he_dev->global_lock);
29797
29798 - atomic_inc(&vcc->stats->rx);
29799 + atomic_inc_unchecked(&vcc->stats->rx);
29800
29801 return_host_buffers:
29802 ++pdus_assembled;
29803 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
29804 tpd->vcc->pop(tpd->vcc, tpd->skb);
29805 else
29806 dev_kfree_skb_any(tpd->skb);
29807 - atomic_inc(&tpd->vcc->stats->tx_err);
29808 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
29809 }
29810 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
29811 return;
29812 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29813 vcc->pop(vcc, skb);
29814 else
29815 dev_kfree_skb_any(skb);
29816 - atomic_inc(&vcc->stats->tx_err);
29817 + atomic_inc_unchecked(&vcc->stats->tx_err);
29818 return -EINVAL;
29819 }
29820
29821 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29822 vcc->pop(vcc, skb);
29823 else
29824 dev_kfree_skb_any(skb);
29825 - atomic_inc(&vcc->stats->tx_err);
29826 + atomic_inc_unchecked(&vcc->stats->tx_err);
29827 return -EINVAL;
29828 }
29829 #endif
29830 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29831 vcc->pop(vcc, skb);
29832 else
29833 dev_kfree_skb_any(skb);
29834 - atomic_inc(&vcc->stats->tx_err);
29835 + atomic_inc_unchecked(&vcc->stats->tx_err);
29836 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29837 return -ENOMEM;
29838 }
29839 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29840 vcc->pop(vcc, skb);
29841 else
29842 dev_kfree_skb_any(skb);
29843 - atomic_inc(&vcc->stats->tx_err);
29844 + atomic_inc_unchecked(&vcc->stats->tx_err);
29845 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29846 return -ENOMEM;
29847 }
29848 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29849 __enqueue_tpd(he_dev, tpd, cid);
29850 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29851
29852 - atomic_inc(&vcc->stats->tx);
29853 + atomic_inc_unchecked(&vcc->stats->tx);
29854
29855 return 0;
29856 }
29857 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
29858 index 4e49021..01b1512 100644
29859 --- a/drivers/atm/horizon.c
29860 +++ b/drivers/atm/horizon.c
29861 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
29862 {
29863 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
29864 // VC layer stats
29865 - atomic_inc(&vcc->stats->rx);
29866 + atomic_inc_unchecked(&vcc->stats->rx);
29867 __net_timestamp(skb);
29868 // end of our responsability
29869 vcc->push (vcc, skb);
29870 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
29871 dev->tx_iovec = NULL;
29872
29873 // VC layer stats
29874 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29875 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29876
29877 // free the skb
29878 hrz_kfree_skb (skb);
29879 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
29880 index e33ae00..9deb4ab 100644
29881 --- a/drivers/atm/idt77252.c
29882 +++ b/drivers/atm/idt77252.c
29883 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
29884 else
29885 dev_kfree_skb(skb);
29886
29887 - atomic_inc(&vcc->stats->tx);
29888 + atomic_inc_unchecked(&vcc->stats->tx);
29889 }
29890
29891 atomic_dec(&scq->used);
29892 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29893 if ((sb = dev_alloc_skb(64)) == NULL) {
29894 printk("%s: Can't allocate buffers for aal0.\n",
29895 card->name);
29896 - atomic_add(i, &vcc->stats->rx_drop);
29897 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
29898 break;
29899 }
29900 if (!atm_charge(vcc, sb->truesize)) {
29901 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
29902 card->name);
29903 - atomic_add(i - 1, &vcc->stats->rx_drop);
29904 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
29905 dev_kfree_skb(sb);
29906 break;
29907 }
29908 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29909 ATM_SKB(sb)->vcc = vcc;
29910 __net_timestamp(sb);
29911 vcc->push(vcc, sb);
29912 - atomic_inc(&vcc->stats->rx);
29913 + atomic_inc_unchecked(&vcc->stats->rx);
29914
29915 cell += ATM_CELL_PAYLOAD;
29916 }
29917 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29918 "(CDC: %08x)\n",
29919 card->name, len, rpp->len, readl(SAR_REG_CDC));
29920 recycle_rx_pool_skb(card, rpp);
29921 - atomic_inc(&vcc->stats->rx_err);
29922 + atomic_inc_unchecked(&vcc->stats->rx_err);
29923 return;
29924 }
29925 if (stat & SAR_RSQE_CRC) {
29926 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
29927 recycle_rx_pool_skb(card, rpp);
29928 - atomic_inc(&vcc->stats->rx_err);
29929 + atomic_inc_unchecked(&vcc->stats->rx_err);
29930 return;
29931 }
29932 if (skb_queue_len(&rpp->queue) > 1) {
29933 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29934 RXPRINTK("%s: Can't alloc RX skb.\n",
29935 card->name);
29936 recycle_rx_pool_skb(card, rpp);
29937 - atomic_inc(&vcc->stats->rx_err);
29938 + atomic_inc_unchecked(&vcc->stats->rx_err);
29939 return;
29940 }
29941 if (!atm_charge(vcc, skb->truesize)) {
29942 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29943 __net_timestamp(skb);
29944
29945 vcc->push(vcc, skb);
29946 - atomic_inc(&vcc->stats->rx);
29947 + atomic_inc_unchecked(&vcc->stats->rx);
29948
29949 return;
29950 }
29951 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29952 __net_timestamp(skb);
29953
29954 vcc->push(vcc, skb);
29955 - atomic_inc(&vcc->stats->rx);
29956 + atomic_inc_unchecked(&vcc->stats->rx);
29957
29958 if (skb->truesize > SAR_FB_SIZE_3)
29959 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
29960 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
29961 if (vcc->qos.aal != ATM_AAL0) {
29962 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
29963 card->name, vpi, vci);
29964 - atomic_inc(&vcc->stats->rx_drop);
29965 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29966 goto drop;
29967 }
29968
29969 if ((sb = dev_alloc_skb(64)) == NULL) {
29970 printk("%s: Can't allocate buffers for AAL0.\n",
29971 card->name);
29972 - atomic_inc(&vcc->stats->rx_err);
29973 + atomic_inc_unchecked(&vcc->stats->rx_err);
29974 goto drop;
29975 }
29976
29977 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
29978 ATM_SKB(sb)->vcc = vcc;
29979 __net_timestamp(sb);
29980 vcc->push(vcc, sb);
29981 - atomic_inc(&vcc->stats->rx);
29982 + atomic_inc_unchecked(&vcc->stats->rx);
29983
29984 drop:
29985 skb_pull(queue, 64);
29986 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29987
29988 if (vc == NULL) {
29989 printk("%s: NULL connection in send().\n", card->name);
29990 - atomic_inc(&vcc->stats->tx_err);
29991 + atomic_inc_unchecked(&vcc->stats->tx_err);
29992 dev_kfree_skb(skb);
29993 return -EINVAL;
29994 }
29995 if (!test_bit(VCF_TX, &vc->flags)) {
29996 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
29997 - atomic_inc(&vcc->stats->tx_err);
29998 + atomic_inc_unchecked(&vcc->stats->tx_err);
29999 dev_kfree_skb(skb);
30000 return -EINVAL;
30001 }
30002 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30003 break;
30004 default:
30005 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
30006 - atomic_inc(&vcc->stats->tx_err);
30007 + atomic_inc_unchecked(&vcc->stats->tx_err);
30008 dev_kfree_skb(skb);
30009 return -EINVAL;
30010 }
30011
30012 if (skb_shinfo(skb)->nr_frags != 0) {
30013 printk("%s: No scatter-gather yet.\n", card->name);
30014 - atomic_inc(&vcc->stats->tx_err);
30015 + atomic_inc_unchecked(&vcc->stats->tx_err);
30016 dev_kfree_skb(skb);
30017 return -EINVAL;
30018 }
30019 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30020
30021 err = queue_skb(card, vc, skb, oam);
30022 if (err) {
30023 - atomic_inc(&vcc->stats->tx_err);
30024 + atomic_inc_unchecked(&vcc->stats->tx_err);
30025 dev_kfree_skb(skb);
30026 return err;
30027 }
30028 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
30029 skb = dev_alloc_skb(64);
30030 if (!skb) {
30031 printk("%s: Out of memory in send_oam().\n", card->name);
30032 - atomic_inc(&vcc->stats->tx_err);
30033 + atomic_inc_unchecked(&vcc->stats->tx_err);
30034 return -ENOMEM;
30035 }
30036 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
30037 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
30038 index b2c1b37..faa672b 100644
30039 --- a/drivers/atm/iphase.c
30040 +++ b/drivers/atm/iphase.c
30041 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
30042 status = (u_short) (buf_desc_ptr->desc_mode);
30043 if (status & (RX_CER | RX_PTE | RX_OFL))
30044 {
30045 - atomic_inc(&vcc->stats->rx_err);
30046 + atomic_inc_unchecked(&vcc->stats->rx_err);
30047 IF_ERR(printk("IA: bad packet, dropping it");)
30048 if (status & RX_CER) {
30049 IF_ERR(printk(" cause: packet CRC error\n");)
30050 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
30051 len = dma_addr - buf_addr;
30052 if (len > iadev->rx_buf_sz) {
30053 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
30054 - atomic_inc(&vcc->stats->rx_err);
30055 + atomic_inc_unchecked(&vcc->stats->rx_err);
30056 goto out_free_desc;
30057 }
30058
30059 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30060 ia_vcc = INPH_IA_VCC(vcc);
30061 if (ia_vcc == NULL)
30062 {
30063 - atomic_inc(&vcc->stats->rx_err);
30064 + atomic_inc_unchecked(&vcc->stats->rx_err);
30065 dev_kfree_skb_any(skb);
30066 atm_return(vcc, atm_guess_pdu2truesize(len));
30067 goto INCR_DLE;
30068 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30069 if ((length > iadev->rx_buf_sz) || (length >
30070 (skb->len - sizeof(struct cpcs_trailer))))
30071 {
30072 - atomic_inc(&vcc->stats->rx_err);
30073 + atomic_inc_unchecked(&vcc->stats->rx_err);
30074 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
30075 length, skb->len);)
30076 dev_kfree_skb_any(skb);
30077 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30078
30079 IF_RX(printk("rx_dle_intr: skb push");)
30080 vcc->push(vcc,skb);
30081 - atomic_inc(&vcc->stats->rx);
30082 + atomic_inc_unchecked(&vcc->stats->rx);
30083 iadev->rx_pkt_cnt++;
30084 }
30085 INCR_DLE:
30086 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
30087 {
30088 struct k_sonet_stats *stats;
30089 stats = &PRIV(_ia_dev[board])->sonet_stats;
30090 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
30091 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
30092 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
30093 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
30094 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
30095 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
30096 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
30097 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
30098 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
30099 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
30100 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
30101 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
30102 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
30103 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
30104 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
30105 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
30106 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
30107 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
30108 }
30109 ia_cmds.status = 0;
30110 break;
30111 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30112 if ((desc == 0) || (desc > iadev->num_tx_desc))
30113 {
30114 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
30115 - atomic_inc(&vcc->stats->tx);
30116 + atomic_inc_unchecked(&vcc->stats->tx);
30117 if (vcc->pop)
30118 vcc->pop(vcc, skb);
30119 else
30120 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30121 ATM_DESC(skb) = vcc->vci;
30122 skb_queue_tail(&iadev->tx_dma_q, skb);
30123
30124 - atomic_inc(&vcc->stats->tx);
30125 + atomic_inc_unchecked(&vcc->stats->tx);
30126 iadev->tx_pkt_cnt++;
30127 /* Increment transaction counter */
30128 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
30129
30130 #if 0
30131 /* add flow control logic */
30132 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
30133 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
30134 if (iavcc->vc_desc_cnt > 10) {
30135 vcc->tx_quota = vcc->tx_quota * 3 / 4;
30136 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
30137 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
30138 index cf97c34..8d30655 100644
30139 --- a/drivers/atm/lanai.c
30140 +++ b/drivers/atm/lanai.c
30141 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
30142 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
30143 lanai_endtx(lanai, lvcc);
30144 lanai_free_skb(lvcc->tx.atmvcc, skb);
30145 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
30146 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
30147 }
30148
30149 /* Try to fill the buffer - don't call unless there is backlog */
30150 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
30151 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
30152 __net_timestamp(skb);
30153 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
30154 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
30155 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
30156 out:
30157 lvcc->rx.buf.ptr = end;
30158 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
30159 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30160 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
30161 "vcc %d\n", lanai->number, (unsigned int) s, vci);
30162 lanai->stats.service_rxnotaal5++;
30163 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30164 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30165 return 0;
30166 }
30167 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
30168 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30169 int bytes;
30170 read_unlock(&vcc_sklist_lock);
30171 DPRINTK("got trashed rx pdu on vci %d\n", vci);
30172 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30173 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30174 lvcc->stats.x.aal5.service_trash++;
30175 bytes = (SERVICE_GET_END(s) * 16) -
30176 (((unsigned long) lvcc->rx.buf.ptr) -
30177 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30178 }
30179 if (s & SERVICE_STREAM) {
30180 read_unlock(&vcc_sklist_lock);
30181 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30182 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30183 lvcc->stats.x.aal5.service_stream++;
30184 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
30185 "PDU on VCI %d!\n", lanai->number, vci);
30186 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30187 return 0;
30188 }
30189 DPRINTK("got rx crc error on vci %d\n", vci);
30190 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30191 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30192 lvcc->stats.x.aal5.service_rxcrc++;
30193 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
30194 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
30195 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
30196 index 3da804b..d3b0eed 100644
30197 --- a/drivers/atm/nicstar.c
30198 +++ b/drivers/atm/nicstar.c
30199 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30200 if ((vc = (vc_map *) vcc->dev_data) == NULL)
30201 {
30202 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
30203 - atomic_inc(&vcc->stats->tx_err);
30204 + atomic_inc_unchecked(&vcc->stats->tx_err);
30205 dev_kfree_skb_any(skb);
30206 return -EINVAL;
30207 }
30208 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30209 if (!vc->tx)
30210 {
30211 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
30212 - atomic_inc(&vcc->stats->tx_err);
30213 + atomic_inc_unchecked(&vcc->stats->tx_err);
30214 dev_kfree_skb_any(skb);
30215 return -EINVAL;
30216 }
30217 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30218 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
30219 {
30220 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
30221 - atomic_inc(&vcc->stats->tx_err);
30222 + atomic_inc_unchecked(&vcc->stats->tx_err);
30223 dev_kfree_skb_any(skb);
30224 return -EINVAL;
30225 }
30226 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30227 if (skb_shinfo(skb)->nr_frags != 0)
30228 {
30229 printk("nicstar%d: No scatter-gather yet.\n", card->index);
30230 - atomic_inc(&vcc->stats->tx_err);
30231 + atomic_inc_unchecked(&vcc->stats->tx_err);
30232 dev_kfree_skb_any(skb);
30233 return -EINVAL;
30234 }
30235 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30236
30237 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
30238 {
30239 - atomic_inc(&vcc->stats->tx_err);
30240 + atomic_inc_unchecked(&vcc->stats->tx_err);
30241 dev_kfree_skb_any(skb);
30242 return -EIO;
30243 }
30244 - atomic_inc(&vcc->stats->tx);
30245 + atomic_inc_unchecked(&vcc->stats->tx);
30246
30247 return 0;
30248 }
30249 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30250 {
30251 printk("nicstar%d: Can't allocate buffers for aal0.\n",
30252 card->index);
30253 - atomic_add(i,&vcc->stats->rx_drop);
30254 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
30255 break;
30256 }
30257 if (!atm_charge(vcc, sb->truesize))
30258 {
30259 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
30260 card->index);
30261 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30262 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30263 dev_kfree_skb_any(sb);
30264 break;
30265 }
30266 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30267 ATM_SKB(sb)->vcc = vcc;
30268 __net_timestamp(sb);
30269 vcc->push(vcc, sb);
30270 - atomic_inc(&vcc->stats->rx);
30271 + atomic_inc_unchecked(&vcc->stats->rx);
30272 cell += ATM_CELL_PAYLOAD;
30273 }
30274
30275 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30276 if (iovb == NULL)
30277 {
30278 printk("nicstar%d: Out of iovec buffers.\n", card->index);
30279 - atomic_inc(&vcc->stats->rx_drop);
30280 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30281 recycle_rx_buf(card, skb);
30282 return;
30283 }
30284 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30285 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
30286 {
30287 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
30288 - atomic_inc(&vcc->stats->rx_err);
30289 + atomic_inc_unchecked(&vcc->stats->rx_err);
30290 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
30291 NS_SKB(iovb)->iovcnt = 0;
30292 iovb->len = 0;
30293 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30294 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
30295 card->index);
30296 which_list(card, skb);
30297 - atomic_inc(&vcc->stats->rx_err);
30298 + atomic_inc_unchecked(&vcc->stats->rx_err);
30299 recycle_rx_buf(card, skb);
30300 vc->rx_iov = NULL;
30301 recycle_iov_buf(card, iovb);
30302 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30303 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
30304 card->index);
30305 which_list(card, skb);
30306 - atomic_inc(&vcc->stats->rx_err);
30307 + atomic_inc_unchecked(&vcc->stats->rx_err);
30308 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30309 NS_SKB(iovb)->iovcnt);
30310 vc->rx_iov = NULL;
30311 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30312 printk(" - PDU size mismatch.\n");
30313 else
30314 printk(".\n");
30315 - atomic_inc(&vcc->stats->rx_err);
30316 + atomic_inc_unchecked(&vcc->stats->rx_err);
30317 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30318 NS_SKB(iovb)->iovcnt);
30319 vc->rx_iov = NULL;
30320 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30321 if (!atm_charge(vcc, skb->truesize))
30322 {
30323 push_rxbufs(card, skb);
30324 - atomic_inc(&vcc->stats->rx_drop);
30325 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30326 }
30327 else
30328 {
30329 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30330 ATM_SKB(skb)->vcc = vcc;
30331 __net_timestamp(skb);
30332 vcc->push(vcc, skb);
30333 - atomic_inc(&vcc->stats->rx);
30334 + atomic_inc_unchecked(&vcc->stats->rx);
30335 }
30336 }
30337 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
30338 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30339 if (!atm_charge(vcc, sb->truesize))
30340 {
30341 push_rxbufs(card, sb);
30342 - atomic_inc(&vcc->stats->rx_drop);
30343 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30344 }
30345 else
30346 {
30347 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30348 ATM_SKB(sb)->vcc = vcc;
30349 __net_timestamp(sb);
30350 vcc->push(vcc, sb);
30351 - atomic_inc(&vcc->stats->rx);
30352 + atomic_inc_unchecked(&vcc->stats->rx);
30353 }
30354
30355 push_rxbufs(card, skb);
30356 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30357 if (!atm_charge(vcc, skb->truesize))
30358 {
30359 push_rxbufs(card, skb);
30360 - atomic_inc(&vcc->stats->rx_drop);
30361 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30362 }
30363 else
30364 {
30365 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30366 ATM_SKB(skb)->vcc = vcc;
30367 __net_timestamp(skb);
30368 vcc->push(vcc, skb);
30369 - atomic_inc(&vcc->stats->rx);
30370 + atomic_inc_unchecked(&vcc->stats->rx);
30371 }
30372
30373 push_rxbufs(card, sb);
30374 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30375 if (hb == NULL)
30376 {
30377 printk("nicstar%d: Out of huge buffers.\n", card->index);
30378 - atomic_inc(&vcc->stats->rx_drop);
30379 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30380 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30381 NS_SKB(iovb)->iovcnt);
30382 vc->rx_iov = NULL;
30383 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30384 }
30385 else
30386 dev_kfree_skb_any(hb);
30387 - atomic_inc(&vcc->stats->rx_drop);
30388 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30389 }
30390 else
30391 {
30392 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30393 #endif /* NS_USE_DESTRUCTORS */
30394 __net_timestamp(hb);
30395 vcc->push(vcc, hb);
30396 - atomic_inc(&vcc->stats->rx);
30397 + atomic_inc_unchecked(&vcc->stats->rx);
30398 }
30399 }
30400
30401 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
30402 index 84c93ff..e6ed269 100644
30403 --- a/drivers/atm/solos-pci.c
30404 +++ b/drivers/atm/solos-pci.c
30405 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
30406 }
30407 atm_charge(vcc, skb->truesize);
30408 vcc->push(vcc, skb);
30409 - atomic_inc(&vcc->stats->rx);
30410 + atomic_inc_unchecked(&vcc->stats->rx);
30411 break;
30412
30413 case PKT_STATUS:
30414 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
30415 char msg[500];
30416 char item[10];
30417
30418 + pax_track_stack();
30419 +
30420 len = buf->len;
30421 for (i = 0; i < len; i++){
30422 if(i % 8 == 0)
30423 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
30424 vcc = SKB_CB(oldskb)->vcc;
30425
30426 if (vcc) {
30427 - atomic_inc(&vcc->stats->tx);
30428 + atomic_inc_unchecked(&vcc->stats->tx);
30429 solos_pop(vcc, oldskb);
30430 } else
30431 dev_kfree_skb_irq(oldskb);
30432 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
30433 index 6dd3f59..ee377f3 100644
30434 --- a/drivers/atm/suni.c
30435 +++ b/drivers/atm/suni.c
30436 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30437
30438
30439 #define ADD_LIMITED(s,v) \
30440 - atomic_add((v),&stats->s); \
30441 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30442 + atomic_add_unchecked((v),&stats->s); \
30443 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30444
30445
30446 static void suni_hz(unsigned long from_timer)
30447 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30448 index fc8cb07..4a80e53 100644
30449 --- a/drivers/atm/uPD98402.c
30450 +++ b/drivers/atm/uPD98402.c
30451 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
30452 struct sonet_stats tmp;
30453 int error = 0;
30454
30455 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30456 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30457 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
30458 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
30459 if (zero && !error) {
30460 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
30461
30462
30463 #define ADD_LIMITED(s,v) \
30464 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
30465 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
30466 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30467 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
30468 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
30469 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30470
30471
30472 static void stat_event(struct atm_dev *dev)
30473 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
30474 if (reason & uPD98402_INT_PFM) stat_event(dev);
30475 if (reason & uPD98402_INT_PCO) {
30476 (void) GET(PCOCR); /* clear interrupt cause */
30477 - atomic_add(GET(HECCT),
30478 + atomic_add_unchecked(GET(HECCT),
30479 &PRIV(dev)->sonet_stats.uncorr_hcs);
30480 }
30481 if ((reason & uPD98402_INT_RFO) &&
30482 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
30483 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
30484 uPD98402_INT_LOS),PIMR); /* enable them */
30485 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
30486 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30487 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
30488 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
30489 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30490 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
30491 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
30492 return 0;
30493 }
30494
30495 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
30496 index 2e9635b..32927b4 100644
30497 --- a/drivers/atm/zatm.c
30498 +++ b/drivers/atm/zatm.c
30499 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30500 }
30501 if (!size) {
30502 dev_kfree_skb_irq(skb);
30503 - if (vcc) atomic_inc(&vcc->stats->rx_err);
30504 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
30505 continue;
30506 }
30507 if (!atm_charge(vcc,skb->truesize)) {
30508 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30509 skb->len = size;
30510 ATM_SKB(skb)->vcc = vcc;
30511 vcc->push(vcc,skb);
30512 - atomic_inc(&vcc->stats->rx);
30513 + atomic_inc_unchecked(&vcc->stats->rx);
30514 }
30515 zout(pos & 0xffff,MTA(mbx));
30516 #if 0 /* probably a stupid idea */
30517 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
30518 skb_queue_head(&zatm_vcc->backlog,skb);
30519 break;
30520 }
30521 - atomic_inc(&vcc->stats->tx);
30522 + atomic_inc_unchecked(&vcc->stats->tx);
30523 wake_up(&zatm_vcc->tx_wait);
30524 }
30525
30526 diff --git a/drivers/base/bus.c b/drivers/base/bus.c
30527 index 63c143e..fece183 100644
30528 --- a/drivers/base/bus.c
30529 +++ b/drivers/base/bus.c
30530 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
30531 return ret;
30532 }
30533
30534 -static struct sysfs_ops driver_sysfs_ops = {
30535 +static const struct sysfs_ops driver_sysfs_ops = {
30536 .show = drv_attr_show,
30537 .store = drv_attr_store,
30538 };
30539 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
30540 return ret;
30541 }
30542
30543 -static struct sysfs_ops bus_sysfs_ops = {
30544 +static const struct sysfs_ops bus_sysfs_ops = {
30545 .show = bus_attr_show,
30546 .store = bus_attr_store,
30547 };
30548 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
30549 return 0;
30550 }
30551
30552 -static struct kset_uevent_ops bus_uevent_ops = {
30553 +static const struct kset_uevent_ops bus_uevent_ops = {
30554 .filter = bus_uevent_filter,
30555 };
30556
30557 diff --git a/drivers/base/class.c b/drivers/base/class.c
30558 index 6e2c3b0..cb61871 100644
30559 --- a/drivers/base/class.c
30560 +++ b/drivers/base/class.c
30561 @@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
30562 kfree(cp);
30563 }
30564
30565 -static struct sysfs_ops class_sysfs_ops = {
30566 +static const struct sysfs_ops class_sysfs_ops = {
30567 .show = class_attr_show,
30568 .store = class_attr_store,
30569 };
30570 diff --git a/drivers/base/core.c b/drivers/base/core.c
30571 index f33d768..a9358d0 100644
30572 --- a/drivers/base/core.c
30573 +++ b/drivers/base/core.c
30574 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
30575 return ret;
30576 }
30577
30578 -static struct sysfs_ops dev_sysfs_ops = {
30579 +static const struct sysfs_ops dev_sysfs_ops = {
30580 .show = dev_attr_show,
30581 .store = dev_attr_store,
30582 };
30583 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
30584 return retval;
30585 }
30586
30587 -static struct kset_uevent_ops device_uevent_ops = {
30588 +static const struct kset_uevent_ops device_uevent_ops = {
30589 .filter = dev_uevent_filter,
30590 .name = dev_uevent_name,
30591 .uevent = dev_uevent,
30592 diff --git a/drivers/base/memory.c b/drivers/base/memory.c
30593 index 989429c..2272b00 100644
30594 --- a/drivers/base/memory.c
30595 +++ b/drivers/base/memory.c
30596 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
30597 return retval;
30598 }
30599
30600 -static struct kset_uevent_ops memory_uevent_ops = {
30601 +static const struct kset_uevent_ops memory_uevent_ops = {
30602 .name = memory_uevent_name,
30603 .uevent = memory_uevent,
30604 };
30605 diff --git a/drivers/base/sys.c b/drivers/base/sys.c
30606 index 3f202f7..61c4a6f 100644
30607 --- a/drivers/base/sys.c
30608 +++ b/drivers/base/sys.c
30609 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
30610 return -EIO;
30611 }
30612
30613 -static struct sysfs_ops sysfs_ops = {
30614 +static const struct sysfs_ops sysfs_ops = {
30615 .show = sysdev_show,
30616 .store = sysdev_store,
30617 };
30618 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
30619 return -EIO;
30620 }
30621
30622 -static struct sysfs_ops sysfs_class_ops = {
30623 +static const struct sysfs_ops sysfs_class_ops = {
30624 .show = sysdev_class_show,
30625 .store = sysdev_class_store,
30626 };
30627 diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
30628 index eb4fa19..1954777 100644
30629 --- a/drivers/block/DAC960.c
30630 +++ b/drivers/block/DAC960.c
30631 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
30632 unsigned long flags;
30633 int Channel, TargetID;
30634
30635 + pax_track_stack();
30636 +
30637 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
30638 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
30639 sizeof(DAC960_SCSI_Inquiry_T) +
30640 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
30641 index 68b90d9..7e2e3f3 100644
30642 --- a/drivers/block/cciss.c
30643 +++ b/drivers/block/cciss.c
30644 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
30645 int err;
30646 u32 cp;
30647
30648 + memset(&arg64, 0, sizeof(arg64));
30649 +
30650 err = 0;
30651 err |=
30652 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
30653 @@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
30654 /* Wait (up to 20 seconds) for a command to complete */
30655
30656 for (i = 20 * HZ; i > 0; i--) {
30657 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
30658 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
30659 if (done == FIFO_EMPTY)
30660 schedule_timeout_uninterruptible(1);
30661 else
30662 @@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
30663 resend_cmd1:
30664
30665 /* Disable interrupt on the board. */
30666 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
30667 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
30668
30669 /* Make sure there is room in the command FIFO */
30670 /* Actually it should be completely empty at this time */
30671 @@ -2884,13 +2886,13 @@ resend_cmd1:
30672 /* tape side of the driver. */
30673 for (i = 200000; i > 0; i--) {
30674 /* if fifo isn't full go */
30675 - if (!(h->access.fifo_full(h)))
30676 + if (!(h->access->fifo_full(h)))
30677 break;
30678 udelay(10);
30679 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
30680 " waiting!\n", h->ctlr);
30681 }
30682 - h->access.submit_command(h, c); /* Send the cmd */
30683 + h->access->submit_command(h, c); /* Send the cmd */
30684 do {
30685 complete = pollcomplete(h->ctlr);
30686
30687 @@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
30688 while (!hlist_empty(&h->reqQ)) {
30689 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
30690 /* can't do anything if fifo is full */
30691 - if ((h->access.fifo_full(h))) {
30692 + if ((h->access->fifo_full(h))) {
30693 printk(KERN_WARNING "cciss: fifo full\n");
30694 break;
30695 }
30696 @@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
30697 h->Qdepth--;
30698
30699 /* Tell the controller execute command */
30700 - h->access.submit_command(h, c);
30701 + h->access->submit_command(h, c);
30702
30703 /* Put job onto the completed Q */
30704 addQ(&h->cmpQ, c);
30705 @@ -3393,17 +3395,17 @@ startio:
30706
30707 static inline unsigned long get_next_completion(ctlr_info_t *h)
30708 {
30709 - return h->access.command_completed(h);
30710 + return h->access->command_completed(h);
30711 }
30712
30713 static inline int interrupt_pending(ctlr_info_t *h)
30714 {
30715 - return h->access.intr_pending(h);
30716 + return h->access->intr_pending(h);
30717 }
30718
30719 static inline long interrupt_not_for_us(ctlr_info_t *h)
30720 {
30721 - return (((h->access.intr_pending(h) == 0) ||
30722 + return (((h->access->intr_pending(h) == 0) ||
30723 (h->interrupts_enabled == 0)));
30724 }
30725
30726 @@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
30727 */
30728 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
30729 c->product_name = products[prod_index].product_name;
30730 - c->access = *(products[prod_index].access);
30731 + c->access = products[prod_index].access;
30732 c->nr_cmds = c->max_commands - 4;
30733 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
30734 (readb(&c->cfgtable->Signature[1]) != 'I') ||
30735 @@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30736 }
30737
30738 /* make sure the board interrupts are off */
30739 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
30740 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
30741 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
30742 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
30743 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
30744 @@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30745 cciss_scsi_setup(i);
30746
30747 /* Turn the interrupts on so we can service requests */
30748 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
30749 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
30750
30751 /* Get the firmware version */
30752 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
30753 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
30754 index 04d6bf8..36e712d 100644
30755 --- a/drivers/block/cciss.h
30756 +++ b/drivers/block/cciss.h
30757 @@ -90,7 +90,7 @@ struct ctlr_info
30758 // information about each logical volume
30759 drive_info_struct *drv[CISS_MAX_LUN];
30760
30761 - struct access_method access;
30762 + struct access_method *access;
30763
30764 /* queue and queue Info */
30765 struct hlist_head reqQ;
30766 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
30767 index 6422651..bb1bdef 100644
30768 --- a/drivers/block/cpqarray.c
30769 +++ b/drivers/block/cpqarray.c
30770 @@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30771 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
30772 goto Enomem4;
30773 }
30774 - hba[i]->access.set_intr_mask(hba[i], 0);
30775 + hba[i]->access->set_intr_mask(hba[i], 0);
30776 if (request_irq(hba[i]->intr, do_ida_intr,
30777 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
30778 {
30779 @@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30780 add_timer(&hba[i]->timer);
30781
30782 /* Enable IRQ now that spinlock and rate limit timer are set up */
30783 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30784 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30785
30786 for(j=0; j<NWD; j++) {
30787 struct gendisk *disk = ida_gendisk[i][j];
30788 @@ -695,7 +695,7 @@ DBGINFO(
30789 for(i=0; i<NR_PRODUCTS; i++) {
30790 if (board_id == products[i].board_id) {
30791 c->product_name = products[i].product_name;
30792 - c->access = *(products[i].access);
30793 + c->access = products[i].access;
30794 break;
30795 }
30796 }
30797 @@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
30798 hba[ctlr]->intr = intr;
30799 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
30800 hba[ctlr]->product_name = products[j].product_name;
30801 - hba[ctlr]->access = *(products[j].access);
30802 + hba[ctlr]->access = products[j].access;
30803 hba[ctlr]->ctlr = ctlr;
30804 hba[ctlr]->board_id = board_id;
30805 hba[ctlr]->pci_dev = NULL; /* not PCI */
30806 @@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
30807 struct scatterlist tmp_sg[SG_MAX];
30808 int i, dir, seg;
30809
30810 + pax_track_stack();
30811 +
30812 if (blk_queue_plugged(q))
30813 goto startio;
30814
30815 @@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
30816
30817 while((c = h->reqQ) != NULL) {
30818 /* Can't do anything if we're busy */
30819 - if (h->access.fifo_full(h) == 0)
30820 + if (h->access->fifo_full(h) == 0)
30821 return;
30822
30823 /* Get the first entry from the request Q */
30824 @@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
30825 h->Qdepth--;
30826
30827 /* Tell the controller to do our bidding */
30828 - h->access.submit_command(h, c);
30829 + h->access->submit_command(h, c);
30830
30831 /* Get onto the completion Q */
30832 addQ(&h->cmpQ, c);
30833 @@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30834 unsigned long flags;
30835 __u32 a,a1;
30836
30837 - istat = h->access.intr_pending(h);
30838 + istat = h->access->intr_pending(h);
30839 /* Is this interrupt for us? */
30840 if (istat == 0)
30841 return IRQ_NONE;
30842 @@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30843 */
30844 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
30845 if (istat & FIFO_NOT_EMPTY) {
30846 - while((a = h->access.command_completed(h))) {
30847 + while((a = h->access->command_completed(h))) {
30848 a1 = a; a &= ~3;
30849 if ((c = h->cmpQ) == NULL)
30850 {
30851 @@ -1434,11 +1436,11 @@ static int sendcmd(
30852 /*
30853 * Disable interrupt
30854 */
30855 - info_p->access.set_intr_mask(info_p, 0);
30856 + info_p->access->set_intr_mask(info_p, 0);
30857 /* Make sure there is room in the command FIFO */
30858 /* Actually it should be completely empty at this time. */
30859 for (i = 200000; i > 0; i--) {
30860 - temp = info_p->access.fifo_full(info_p);
30861 + temp = info_p->access->fifo_full(info_p);
30862 if (temp != 0) {
30863 break;
30864 }
30865 @@ -1451,7 +1453,7 @@ DBG(
30866 /*
30867 * Send the cmd
30868 */
30869 - info_p->access.submit_command(info_p, c);
30870 + info_p->access->submit_command(info_p, c);
30871 complete = pollcomplete(ctlr);
30872
30873 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
30874 @@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
30875 * we check the new geometry. Then turn interrupts back on when
30876 * we're done.
30877 */
30878 - host->access.set_intr_mask(host, 0);
30879 + host->access->set_intr_mask(host, 0);
30880 getgeometry(ctlr);
30881 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
30882 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
30883
30884 for(i=0; i<NWD; i++) {
30885 struct gendisk *disk = ida_gendisk[ctlr][i];
30886 @@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
30887 /* Wait (up to 2 seconds) for a command to complete */
30888
30889 for (i = 200000; i > 0; i--) {
30890 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
30891 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
30892 if (done == 0) {
30893 udelay(10); /* a short fixed delay */
30894 } else
30895 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
30896 index be73e9d..7fbf140 100644
30897 --- a/drivers/block/cpqarray.h
30898 +++ b/drivers/block/cpqarray.h
30899 @@ -99,7 +99,7 @@ struct ctlr_info {
30900 drv_info_t drv[NWD];
30901 struct proc_dir_entry *proc;
30902
30903 - struct access_method access;
30904 + struct access_method *access;
30905
30906 cmdlist_t *reqQ;
30907 cmdlist_t *cmpQ;
30908 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
30909 index 8ec2d70..2804b30 100644
30910 --- a/drivers/block/loop.c
30911 +++ b/drivers/block/loop.c
30912 @@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
30913 mm_segment_t old_fs = get_fs();
30914
30915 set_fs(get_ds());
30916 - bw = file->f_op->write(file, buf, len, &pos);
30917 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
30918 set_fs(old_fs);
30919 if (likely(bw == len))
30920 return 0;
30921 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
30922 index 26ada47..083c480 100644
30923 --- a/drivers/block/nbd.c
30924 +++ b/drivers/block/nbd.c
30925 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
30926 struct kvec iov;
30927 sigset_t blocked, oldset;
30928
30929 + pax_track_stack();
30930 +
30931 if (unlikely(!sock)) {
30932 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
30933 lo->disk->disk_name, (send ? "send" : "recv"));
30934 @@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
30935 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
30936 unsigned int cmd, unsigned long arg)
30937 {
30938 + pax_track_stack();
30939 +
30940 switch (cmd) {
30941 case NBD_DISCONNECT: {
30942 struct request sreq;
30943 diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
30944 index a5d585d..d087be3 100644
30945 --- a/drivers/block/pktcdvd.c
30946 +++ b/drivers/block/pktcdvd.c
30947 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
30948 return len;
30949 }
30950
30951 -static struct sysfs_ops kobj_pkt_ops = {
30952 +static const struct sysfs_ops kobj_pkt_ops = {
30953 .show = kobj_pkt_show,
30954 .store = kobj_pkt_store
30955 };
30956 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
30957 index 6aad99e..89cd142 100644
30958 --- a/drivers/char/Kconfig
30959 +++ b/drivers/char/Kconfig
30960 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
30961
30962 config DEVKMEM
30963 bool "/dev/kmem virtual device support"
30964 - default y
30965 + default n
30966 + depends on !GRKERNSEC_KMEM
30967 help
30968 Say Y here if you want to support the /dev/kmem device. The
30969 /dev/kmem device is rarely used, but can be used for certain
30970 @@ -1114,6 +1115,7 @@ config DEVPORT
30971 bool
30972 depends on !M68K
30973 depends on ISA || PCI
30974 + depends on !GRKERNSEC_KMEM
30975 default y
30976
30977 source "drivers/s390/char/Kconfig"
30978 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
30979 index a96f319..a778a5b 100644
30980 --- a/drivers/char/agp/frontend.c
30981 +++ b/drivers/char/agp/frontend.c
30982 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
30983 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
30984 return -EFAULT;
30985
30986 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
30987 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
30988 return -EFAULT;
30989
30990 client = agp_find_client_by_pid(reserve.pid);
30991 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
30992 index d8cff90..9628e70 100644
30993 --- a/drivers/char/briq_panel.c
30994 +++ b/drivers/char/briq_panel.c
30995 @@ -10,6 +10,7 @@
30996 #include <linux/types.h>
30997 #include <linux/errno.h>
30998 #include <linux/tty.h>
30999 +#include <linux/mutex.h>
31000 #include <linux/timer.h>
31001 #include <linux/kernel.h>
31002 #include <linux/wait.h>
31003 @@ -36,6 +37,7 @@ static int vfd_is_open;
31004 static unsigned char vfd[40];
31005 static int vfd_cursor;
31006 static unsigned char ledpb, led;
31007 +static DEFINE_MUTEX(vfd_mutex);
31008
31009 static void update_vfd(void)
31010 {
31011 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
31012 if (!vfd_is_open)
31013 return -EBUSY;
31014
31015 + mutex_lock(&vfd_mutex);
31016 for (;;) {
31017 char c;
31018 if (!indx)
31019 break;
31020 - if (get_user(c, buf))
31021 + if (get_user(c, buf)) {
31022 + mutex_unlock(&vfd_mutex);
31023 return -EFAULT;
31024 + }
31025 if (esc) {
31026 set_led(c);
31027 esc = 0;
31028 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
31029 buf++;
31030 }
31031 update_vfd();
31032 + mutex_unlock(&vfd_mutex);
31033
31034 return len;
31035 }
31036 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
31037 index 31e7c91..161afc0 100644
31038 --- a/drivers/char/genrtc.c
31039 +++ b/drivers/char/genrtc.c
31040 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
31041 switch (cmd) {
31042
31043 case RTC_PLL_GET:
31044 + memset(&pll, 0, sizeof(pll));
31045 if (get_rtc_pll(&pll))
31046 return -EINVAL;
31047 else
31048 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
31049 index 006466d..a2bb21c 100644
31050 --- a/drivers/char/hpet.c
31051 +++ b/drivers/char/hpet.c
31052 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
31053 return 0;
31054 }
31055
31056 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
31057 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
31058
31059 static int
31060 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
31061 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
31062 }
31063
31064 static int
31065 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
31066 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
31067 {
31068 struct hpet_timer __iomem *timer;
31069 struct hpet __iomem *hpet;
31070 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
31071 {
31072 struct hpet_info info;
31073
31074 + memset(&info, 0, sizeof(info));
31075 +
31076 if (devp->hd_ireqfreq)
31077 info.hi_ireqfreq =
31078 hpet_time_div(hpetp, devp->hd_ireqfreq);
31079 - else
31080 - info.hi_ireqfreq = 0;
31081 info.hi_flags =
31082 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
31083 info.hi_hpet = hpetp->hp_which;
31084 diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
31085 index 0afc8b8..6913fc3 100644
31086 --- a/drivers/char/hvc_beat.c
31087 +++ b/drivers/char/hvc_beat.c
31088 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
31089 return cnt;
31090 }
31091
31092 -static struct hv_ops hvc_beat_get_put_ops = {
31093 +static const struct hv_ops hvc_beat_get_put_ops = {
31094 .get_chars = hvc_beat_get_chars,
31095 .put_chars = hvc_beat_put_chars,
31096 };
31097 diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
31098 index 98097f2..407dddc 100644
31099 --- a/drivers/char/hvc_console.c
31100 +++ b/drivers/char/hvc_console.c
31101 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
31102 * console interfaces but can still be used as a tty device. This has to be
31103 * static because kmalloc will not work during early console init.
31104 */
31105 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
31106 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
31107 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
31108 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
31109
31110 @@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
31111 * vty adapters do NOT get an hvc_instantiate() callback since they
31112 * appear after early console init.
31113 */
31114 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
31115 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
31116 {
31117 struct hvc_struct *hp;
31118
31119 @@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
31120 };
31121
31122 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
31123 - struct hv_ops *ops, int outbuf_size)
31124 + const struct hv_ops *ops, int outbuf_size)
31125 {
31126 struct hvc_struct *hp;
31127 int i;
31128 diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
31129 index 10950ca..ed176c3 100644
31130 --- a/drivers/char/hvc_console.h
31131 +++ b/drivers/char/hvc_console.h
31132 @@ -55,7 +55,7 @@ struct hvc_struct {
31133 int outbuf_size;
31134 int n_outbuf;
31135 uint32_t vtermno;
31136 - struct hv_ops *ops;
31137 + const struct hv_ops *ops;
31138 int irq_requested;
31139 int data;
31140 struct winsize ws;
31141 @@ -76,11 +76,11 @@ struct hv_ops {
31142 };
31143
31144 /* Register a vterm and a slot index for use as a console (console_init) */
31145 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
31146 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
31147
31148 /* register a vterm for hvc tty operation (module_init or hotplug add) */
31149 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
31150 - struct hv_ops *ops, int outbuf_size);
31151 + const struct hv_ops *ops, int outbuf_size);
31152 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
31153 extern int hvc_remove(struct hvc_struct *hp);
31154
31155 diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
31156 index 936d05b..fd02426 100644
31157 --- a/drivers/char/hvc_iseries.c
31158 +++ b/drivers/char/hvc_iseries.c
31159 @@ -197,7 +197,7 @@ done:
31160 return sent;
31161 }
31162
31163 -static struct hv_ops hvc_get_put_ops = {
31164 +static const struct hv_ops hvc_get_put_ops = {
31165 .get_chars = get_chars,
31166 .put_chars = put_chars,
31167 .notifier_add = notifier_add_irq,
31168 diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
31169 index b0e168f..69cda2a 100644
31170 --- a/drivers/char/hvc_iucv.c
31171 +++ b/drivers/char/hvc_iucv.c
31172 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
31173
31174
31175 /* HVC operations */
31176 -static struct hv_ops hvc_iucv_ops = {
31177 +static const struct hv_ops hvc_iucv_ops = {
31178 .get_chars = hvc_iucv_get_chars,
31179 .put_chars = hvc_iucv_put_chars,
31180 .notifier_add = hvc_iucv_notifier_add,
31181 diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
31182 index 88590d0..61c4a61 100644
31183 --- a/drivers/char/hvc_rtas.c
31184 +++ b/drivers/char/hvc_rtas.c
31185 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
31186 return i;
31187 }
31188
31189 -static struct hv_ops hvc_rtas_get_put_ops = {
31190 +static const struct hv_ops hvc_rtas_get_put_ops = {
31191 .get_chars = hvc_rtas_read_console,
31192 .put_chars = hvc_rtas_write_console,
31193 };
31194 diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
31195 index bd63ba8..b0957e6 100644
31196 --- a/drivers/char/hvc_udbg.c
31197 +++ b/drivers/char/hvc_udbg.c
31198 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
31199 return i;
31200 }
31201
31202 -static struct hv_ops hvc_udbg_ops = {
31203 +static const struct hv_ops hvc_udbg_ops = {
31204 .get_chars = hvc_udbg_get,
31205 .put_chars = hvc_udbg_put,
31206 };
31207 diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
31208 index 10be343..27370e9 100644
31209 --- a/drivers/char/hvc_vio.c
31210 +++ b/drivers/char/hvc_vio.c
31211 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
31212 return got;
31213 }
31214
31215 -static struct hv_ops hvc_get_put_ops = {
31216 +static const struct hv_ops hvc_get_put_ops = {
31217 .get_chars = filtered_get_chars,
31218 .put_chars = hvc_put_chars,
31219 .notifier_add = notifier_add_irq,
31220 diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
31221 index a6ee32b..94f8c26 100644
31222 --- a/drivers/char/hvc_xen.c
31223 +++ b/drivers/char/hvc_xen.c
31224 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
31225 return recv;
31226 }
31227
31228 -static struct hv_ops hvc_ops = {
31229 +static const struct hv_ops hvc_ops = {
31230 .get_chars = read_console,
31231 .put_chars = write_console,
31232 .notifier_add = notifier_add_irq,
31233 diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
31234 index 266b858..f3ee0bb 100644
31235 --- a/drivers/char/hvcs.c
31236 +++ b/drivers/char/hvcs.c
31237 @@ -82,6 +82,7 @@
31238 #include <asm/hvcserver.h>
31239 #include <asm/uaccess.h>
31240 #include <asm/vio.h>
31241 +#include <asm/local.h>
31242
31243 /*
31244 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
31245 @@ -269,7 +270,7 @@ struct hvcs_struct {
31246 unsigned int index;
31247
31248 struct tty_struct *tty;
31249 - int open_count;
31250 + local_t open_count;
31251
31252 /*
31253 * Used to tell the driver kernel_thread what operations need to take
31254 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
31255
31256 spin_lock_irqsave(&hvcsd->lock, flags);
31257
31258 - if (hvcsd->open_count > 0) {
31259 + if (local_read(&hvcsd->open_count) > 0) {
31260 spin_unlock_irqrestore(&hvcsd->lock, flags);
31261 printk(KERN_INFO "HVCS: vterm state unchanged. "
31262 "The hvcs device node is still in use.\n");
31263 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
31264 if ((retval = hvcs_partner_connect(hvcsd)))
31265 goto error_release;
31266
31267 - hvcsd->open_count = 1;
31268 + local_set(&hvcsd->open_count, 1);
31269 hvcsd->tty = tty;
31270 tty->driver_data = hvcsd;
31271
31272 @@ -1169,7 +1170,7 @@ fast_open:
31273
31274 spin_lock_irqsave(&hvcsd->lock, flags);
31275 kref_get(&hvcsd->kref);
31276 - hvcsd->open_count++;
31277 + local_inc(&hvcsd->open_count);
31278 hvcsd->todo_mask |= HVCS_SCHED_READ;
31279 spin_unlock_irqrestore(&hvcsd->lock, flags);
31280
31281 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31282 hvcsd = tty->driver_data;
31283
31284 spin_lock_irqsave(&hvcsd->lock, flags);
31285 - if (--hvcsd->open_count == 0) {
31286 + if (local_dec_and_test(&hvcsd->open_count)) {
31287
31288 vio_disable_interrupts(hvcsd->vdev);
31289
31290 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31291 free_irq(irq, hvcsd);
31292 kref_put(&hvcsd->kref, destroy_hvcs_struct);
31293 return;
31294 - } else if (hvcsd->open_count < 0) {
31295 + } else if (local_read(&hvcsd->open_count) < 0) {
31296 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
31297 " is missmanaged.\n",
31298 - hvcsd->vdev->unit_address, hvcsd->open_count);
31299 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
31300 }
31301
31302 spin_unlock_irqrestore(&hvcsd->lock, flags);
31303 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31304
31305 spin_lock_irqsave(&hvcsd->lock, flags);
31306 /* Preserve this so that we know how many kref refs to put */
31307 - temp_open_count = hvcsd->open_count;
31308 + temp_open_count = local_read(&hvcsd->open_count);
31309
31310 /*
31311 * Don't kref put inside the spinlock because the destruction
31312 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31313 hvcsd->tty->driver_data = NULL;
31314 hvcsd->tty = NULL;
31315
31316 - hvcsd->open_count = 0;
31317 + local_set(&hvcsd->open_count, 0);
31318
31319 /* This will drop any buffered data on the floor which is OK in a hangup
31320 * scenario. */
31321 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
31322 * the middle of a write operation? This is a crummy place to do this
31323 * but we want to keep it all in the spinlock.
31324 */
31325 - if (hvcsd->open_count <= 0) {
31326 + if (local_read(&hvcsd->open_count) <= 0) {
31327 spin_unlock_irqrestore(&hvcsd->lock, flags);
31328 return -ENODEV;
31329 }
31330 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
31331 {
31332 struct hvcs_struct *hvcsd = tty->driver_data;
31333
31334 - if (!hvcsd || hvcsd->open_count <= 0)
31335 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
31336 return 0;
31337
31338 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
31339 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
31340 index ec5e3f8..02455ba 100644
31341 --- a/drivers/char/ipmi/ipmi_msghandler.c
31342 +++ b/drivers/char/ipmi/ipmi_msghandler.c
31343 @@ -414,7 +414,7 @@ struct ipmi_smi {
31344 struct proc_dir_entry *proc_dir;
31345 char proc_dir_name[10];
31346
31347 - atomic_t stats[IPMI_NUM_STATS];
31348 + atomic_unchecked_t stats[IPMI_NUM_STATS];
31349
31350 /*
31351 * run_to_completion duplicate of smb_info, smi_info
31352 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
31353
31354
31355 #define ipmi_inc_stat(intf, stat) \
31356 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
31357 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
31358 #define ipmi_get_stat(intf, stat) \
31359 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
31360 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
31361
31362 static int is_lan_addr(struct ipmi_addr *addr)
31363 {
31364 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
31365 INIT_LIST_HEAD(&intf->cmd_rcvrs);
31366 init_waitqueue_head(&intf->waitq);
31367 for (i = 0; i < IPMI_NUM_STATS; i++)
31368 - atomic_set(&intf->stats[i], 0);
31369 + atomic_set_unchecked(&intf->stats[i], 0);
31370
31371 intf->proc_dir = NULL;
31372
31373 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
31374 struct ipmi_smi_msg smi_msg;
31375 struct ipmi_recv_msg recv_msg;
31376
31377 + pax_track_stack();
31378 +
31379 si = (struct ipmi_system_interface_addr *) &addr;
31380 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
31381 si->channel = IPMI_BMC_CHANNEL;
31382 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
31383 index abae8c9..8021979 100644
31384 --- a/drivers/char/ipmi/ipmi_si_intf.c
31385 +++ b/drivers/char/ipmi/ipmi_si_intf.c
31386 @@ -277,7 +277,7 @@ struct smi_info {
31387 unsigned char slave_addr;
31388
31389 /* Counters and things for the proc filesystem. */
31390 - atomic_t stats[SI_NUM_STATS];
31391 + atomic_unchecked_t stats[SI_NUM_STATS];
31392
31393 struct task_struct *thread;
31394
31395 @@ -285,9 +285,9 @@ struct smi_info {
31396 };
31397
31398 #define smi_inc_stat(smi, stat) \
31399 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
31400 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
31401 #define smi_get_stat(smi, stat) \
31402 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
31403 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
31404
31405 #define SI_MAX_PARMS 4
31406
31407 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
31408 atomic_set(&new_smi->req_events, 0);
31409 new_smi->run_to_completion = 0;
31410 for (i = 0; i < SI_NUM_STATS; i++)
31411 - atomic_set(&new_smi->stats[i], 0);
31412 + atomic_set_unchecked(&new_smi->stats[i], 0);
31413
31414 new_smi->interrupt_disabled = 0;
31415 atomic_set(&new_smi->stop_operation, 0);
31416 diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
31417 index 402838f..55e2200 100644
31418 --- a/drivers/char/istallion.c
31419 +++ b/drivers/char/istallion.c
31420 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
31421 * re-used for each stats call.
31422 */
31423 static comstats_t stli_comstats;
31424 -static combrd_t stli_brdstats;
31425 static struct asystats stli_cdkstats;
31426
31427 /*****************************************************************************/
31428 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
31429 {
31430 struct stlibrd *brdp;
31431 unsigned int i;
31432 + combrd_t stli_brdstats;
31433
31434 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
31435 return -EFAULT;
31436 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
31437 struct stliport stli_dummyport;
31438 struct stliport *portp;
31439
31440 + pax_track_stack();
31441 +
31442 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
31443 return -EFAULT;
31444 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
31445 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
31446 struct stlibrd stli_dummybrd;
31447 struct stlibrd *brdp;
31448
31449 + pax_track_stack();
31450 +
31451 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
31452 return -EFAULT;
31453 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
31454 diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
31455 index 950837c..e55a288 100644
31456 --- a/drivers/char/keyboard.c
31457 +++ b/drivers/char/keyboard.c
31458 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
31459 kbd->kbdmode == VC_MEDIUMRAW) &&
31460 value != KVAL(K_SAK))
31461 return; /* SAK is allowed even in raw mode */
31462 +
31463 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
31464 + {
31465 + void *func = fn_handler[value];
31466 + if (func == fn_show_state || func == fn_show_ptregs ||
31467 + func == fn_show_mem)
31468 + return;
31469 + }
31470 +#endif
31471 +
31472 fn_handler[value](vc);
31473 }
31474
31475 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
31476 .evbit = { BIT_MASK(EV_SND) },
31477 },
31478
31479 - { }, /* Terminating entry */
31480 + { 0 }, /* Terminating entry */
31481 };
31482
31483 MODULE_DEVICE_TABLE(input, kbd_ids);
31484 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
31485 index 87c67b4..230527a 100644
31486 --- a/drivers/char/mbcs.c
31487 +++ b/drivers/char/mbcs.c
31488 @@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
31489 return 0;
31490 }
31491
31492 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
31493 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
31494 {
31495 .part_num = MBCS_PART_NUM,
31496 .mfg_num = MBCS_MFG_NUM,
31497 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
31498 index 1270f64..8495f49 100644
31499 --- a/drivers/char/mem.c
31500 +++ b/drivers/char/mem.c
31501 @@ -18,6 +18,7 @@
31502 #include <linux/raw.h>
31503 #include <linux/tty.h>
31504 #include <linux/capability.h>
31505 +#include <linux/security.h>
31506 #include <linux/ptrace.h>
31507 #include <linux/device.h>
31508 #include <linux/highmem.h>
31509 @@ -35,6 +36,10 @@
31510 # include <linux/efi.h>
31511 #endif
31512
31513 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31514 +extern struct file_operations grsec_fops;
31515 +#endif
31516 +
31517 static inline unsigned long size_inside_page(unsigned long start,
31518 unsigned long size)
31519 {
31520 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31521
31522 while (cursor < to) {
31523 if (!devmem_is_allowed(pfn)) {
31524 +#ifdef CONFIG_GRKERNSEC_KMEM
31525 + gr_handle_mem_readwrite(from, to);
31526 +#else
31527 printk(KERN_INFO
31528 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
31529 current->comm, from, to);
31530 +#endif
31531 return 0;
31532 }
31533 cursor += PAGE_SIZE;
31534 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31535 }
31536 return 1;
31537 }
31538 +#elif defined(CONFIG_GRKERNSEC_KMEM)
31539 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31540 +{
31541 + return 0;
31542 +}
31543 #else
31544 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31545 {
31546 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31547 #endif
31548
31549 while (count > 0) {
31550 + char *temp;
31551 +
31552 /*
31553 * Handle first page in case it's not aligned
31554 */
31555 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31556 if (!ptr)
31557 return -EFAULT;
31558
31559 - if (copy_to_user(buf, ptr, sz)) {
31560 +#ifdef CONFIG_PAX_USERCOPY
31561 + temp = kmalloc(sz, GFP_KERNEL);
31562 + if (!temp) {
31563 + unxlate_dev_mem_ptr(p, ptr);
31564 + return -ENOMEM;
31565 + }
31566 + memcpy(temp, ptr, sz);
31567 +#else
31568 + temp = ptr;
31569 +#endif
31570 +
31571 + if (copy_to_user(buf, temp, sz)) {
31572 +
31573 +#ifdef CONFIG_PAX_USERCOPY
31574 + kfree(temp);
31575 +#endif
31576 +
31577 unxlate_dev_mem_ptr(p, ptr);
31578 return -EFAULT;
31579 }
31580
31581 +#ifdef CONFIG_PAX_USERCOPY
31582 + kfree(temp);
31583 +#endif
31584 +
31585 unxlate_dev_mem_ptr(p, ptr);
31586
31587 buf += sz;
31588 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31589 size_t count, loff_t *ppos)
31590 {
31591 unsigned long p = *ppos;
31592 - ssize_t low_count, read, sz;
31593 + ssize_t low_count, read, sz, err = 0;
31594 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
31595 - int err = 0;
31596
31597 read = 0;
31598 if (p < (unsigned long) high_memory) {
31599 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31600 }
31601 #endif
31602 while (low_count > 0) {
31603 + char *temp;
31604 +
31605 sz = size_inside_page(p, low_count);
31606
31607 /*
31608 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31609 */
31610 kbuf = xlate_dev_kmem_ptr((char *)p);
31611
31612 - if (copy_to_user(buf, kbuf, sz))
31613 +#ifdef CONFIG_PAX_USERCOPY
31614 + temp = kmalloc(sz, GFP_KERNEL);
31615 + if (!temp)
31616 + return -ENOMEM;
31617 + memcpy(temp, kbuf, sz);
31618 +#else
31619 + temp = kbuf;
31620 +#endif
31621 +
31622 + err = copy_to_user(buf, temp, sz);
31623 +
31624 +#ifdef CONFIG_PAX_USERCOPY
31625 + kfree(temp);
31626 +#endif
31627 +
31628 + if (err)
31629 return -EFAULT;
31630 buf += sz;
31631 p += sz;
31632 @@ -889,6 +941,9 @@ static const struct memdev {
31633 #ifdef CONFIG_CRASH_DUMP
31634 [12] = { "oldmem", 0, &oldmem_fops, NULL },
31635 #endif
31636 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31637 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
31638 +#endif
31639 };
31640
31641 static int memory_open(struct inode *inode, struct file *filp)
31642 diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
31643 index 918711a..4ffaf5e 100644
31644 --- a/drivers/char/mmtimer.c
31645 +++ b/drivers/char/mmtimer.c
31646 @@ -756,7 +756,7 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
31647 return err;
31648 }
31649
31650 -static struct k_clock sgi_clock = {
31651 +static k_clock_no_const sgi_clock = {
31652 .res = 0,
31653 .clock_set = sgi_clock_set,
31654 .clock_get = sgi_clock_get,
31655 diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
31656 index 674b3ab..a8d1970 100644
31657 --- a/drivers/char/pcmcia/ipwireless/tty.c
31658 +++ b/drivers/char/pcmcia/ipwireless/tty.c
31659 @@ -29,6 +29,7 @@
31660 #include <linux/tty_driver.h>
31661 #include <linux/tty_flip.h>
31662 #include <linux/uaccess.h>
31663 +#include <asm/local.h>
31664
31665 #include "tty.h"
31666 #include "network.h"
31667 @@ -51,7 +52,7 @@ struct ipw_tty {
31668 int tty_type;
31669 struct ipw_network *network;
31670 struct tty_struct *linux_tty;
31671 - int open_count;
31672 + local_t open_count;
31673 unsigned int control_lines;
31674 struct mutex ipw_tty_mutex;
31675 int tx_bytes_queued;
31676 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31677 mutex_unlock(&tty->ipw_tty_mutex);
31678 return -ENODEV;
31679 }
31680 - if (tty->open_count == 0)
31681 + if (local_read(&tty->open_count) == 0)
31682 tty->tx_bytes_queued = 0;
31683
31684 - tty->open_count++;
31685 + local_inc(&tty->open_count);
31686
31687 tty->linux_tty = linux_tty;
31688 linux_tty->driver_data = tty;
31689 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31690
31691 static void do_ipw_close(struct ipw_tty *tty)
31692 {
31693 - tty->open_count--;
31694 -
31695 - if (tty->open_count == 0) {
31696 + if (local_dec_return(&tty->open_count) == 0) {
31697 struct tty_struct *linux_tty = tty->linux_tty;
31698
31699 if (linux_tty != NULL) {
31700 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
31701 return;
31702
31703 mutex_lock(&tty->ipw_tty_mutex);
31704 - if (tty->open_count == 0) {
31705 + if (local_read(&tty->open_count) == 0) {
31706 mutex_unlock(&tty->ipw_tty_mutex);
31707 return;
31708 }
31709 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
31710 return;
31711 }
31712
31713 - if (!tty->open_count) {
31714 + if (!local_read(&tty->open_count)) {
31715 mutex_unlock(&tty->ipw_tty_mutex);
31716 return;
31717 }
31718 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
31719 return -ENODEV;
31720
31721 mutex_lock(&tty->ipw_tty_mutex);
31722 - if (!tty->open_count) {
31723 + if (!local_read(&tty->open_count)) {
31724 mutex_unlock(&tty->ipw_tty_mutex);
31725 return -EINVAL;
31726 }
31727 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
31728 if (!tty)
31729 return -ENODEV;
31730
31731 - if (!tty->open_count)
31732 + if (!local_read(&tty->open_count))
31733 return -EINVAL;
31734
31735 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
31736 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
31737 if (!tty)
31738 return 0;
31739
31740 - if (!tty->open_count)
31741 + if (!local_read(&tty->open_count))
31742 return 0;
31743
31744 return tty->tx_bytes_queued;
31745 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
31746 if (!tty)
31747 return -ENODEV;
31748
31749 - if (!tty->open_count)
31750 + if (!local_read(&tty->open_count))
31751 return -EINVAL;
31752
31753 return get_control_lines(tty);
31754 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
31755 if (!tty)
31756 return -ENODEV;
31757
31758 - if (!tty->open_count)
31759 + if (!local_read(&tty->open_count))
31760 return -EINVAL;
31761
31762 return set_control_lines(tty, set, clear);
31763 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
31764 if (!tty)
31765 return -ENODEV;
31766
31767 - if (!tty->open_count)
31768 + if (!local_read(&tty->open_count))
31769 return -EINVAL;
31770
31771 /* FIXME: Exactly how is the tty object locked here .. */
31772 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
31773 against a parallel ioctl etc */
31774 mutex_lock(&ttyj->ipw_tty_mutex);
31775 }
31776 - while (ttyj->open_count)
31777 + while (local_read(&ttyj->open_count))
31778 do_ipw_close(ttyj);
31779 ipwireless_disassociate_network_ttys(network,
31780 ttyj->channel_idx);
31781 diff --git a/drivers/char/pty.c b/drivers/char/pty.c
31782 index 62f282e..e45c45c 100644
31783 --- a/drivers/char/pty.c
31784 +++ b/drivers/char/pty.c
31785 @@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
31786 register_sysctl_table(pty_root_table);
31787
31788 /* Now create the /dev/ptmx special device */
31789 + pax_open_kernel();
31790 tty_default_fops(&ptmx_fops);
31791 - ptmx_fops.open = ptmx_open;
31792 + *(void **)&ptmx_fops.open = ptmx_open;
31793 + pax_close_kernel();
31794
31795 cdev_init(&ptmx_cdev, &ptmx_fops);
31796 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
31797 diff --git a/drivers/char/random.c b/drivers/char/random.c
31798 index 3a19e2d..6ed09d3 100644
31799 --- a/drivers/char/random.c
31800 +++ b/drivers/char/random.c
31801 @@ -254,8 +254,13 @@
31802 /*
31803 * Configuration information
31804 */
31805 +#ifdef CONFIG_GRKERNSEC_RANDNET
31806 +#define INPUT_POOL_WORDS 512
31807 +#define OUTPUT_POOL_WORDS 128
31808 +#else
31809 #define INPUT_POOL_WORDS 128
31810 #define OUTPUT_POOL_WORDS 32
31811 +#endif
31812 #define SEC_XFER_SIZE 512
31813
31814 /*
31815 @@ -292,10 +297,17 @@ static struct poolinfo {
31816 int poolwords;
31817 int tap1, tap2, tap3, tap4, tap5;
31818 } poolinfo_table[] = {
31819 +#ifdef CONFIG_GRKERNSEC_RANDNET
31820 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
31821 + { 512, 411, 308, 208, 104, 1 },
31822 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
31823 + { 128, 103, 76, 51, 25, 1 },
31824 +#else
31825 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
31826 { 128, 103, 76, 51, 25, 1 },
31827 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
31828 { 32, 26, 20, 14, 7, 1 },
31829 +#endif
31830 #if 0
31831 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
31832 { 2048, 1638, 1231, 819, 411, 1 },
31833 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
31834 #include <linux/sysctl.h>
31835
31836 static int min_read_thresh = 8, min_write_thresh;
31837 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
31838 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
31839 static int max_write_thresh = INPUT_POOL_WORDS * 32;
31840 static char sysctl_bootid[16];
31841
31842 diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
31843 index 0e29a23..0efc2c2 100644
31844 --- a/drivers/char/rocket.c
31845 +++ b/drivers/char/rocket.c
31846 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
31847 struct rocket_ports tmp;
31848 int board;
31849
31850 + pax_track_stack();
31851 +
31852 if (!retports)
31853 return -EFAULT;
31854 memset(&tmp, 0, sizeof (tmp));
31855 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
31856 index 8c262aa..4d3b058 100644
31857 --- a/drivers/char/sonypi.c
31858 +++ b/drivers/char/sonypi.c
31859 @@ -55,6 +55,7 @@
31860 #include <asm/uaccess.h>
31861 #include <asm/io.h>
31862 #include <asm/system.h>
31863 +#include <asm/local.h>
31864
31865 #include <linux/sonypi.h>
31866
31867 @@ -491,7 +492,7 @@ static struct sonypi_device {
31868 spinlock_t fifo_lock;
31869 wait_queue_head_t fifo_proc_list;
31870 struct fasync_struct *fifo_async;
31871 - int open_count;
31872 + local_t open_count;
31873 int model;
31874 struct input_dev *input_jog_dev;
31875 struct input_dev *input_key_dev;
31876 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
31877 static int sonypi_misc_release(struct inode *inode, struct file *file)
31878 {
31879 mutex_lock(&sonypi_device.lock);
31880 - sonypi_device.open_count--;
31881 + local_dec(&sonypi_device.open_count);
31882 mutex_unlock(&sonypi_device.lock);
31883 return 0;
31884 }
31885 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
31886 lock_kernel();
31887 mutex_lock(&sonypi_device.lock);
31888 /* Flush input queue on first open */
31889 - if (!sonypi_device.open_count)
31890 + if (!local_read(&sonypi_device.open_count))
31891 kfifo_reset(sonypi_device.fifo);
31892 - sonypi_device.open_count++;
31893 + local_inc(&sonypi_device.open_count);
31894 mutex_unlock(&sonypi_device.lock);
31895 unlock_kernel();
31896 return 0;
31897 diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
31898 index db6dcfa..13834cb 100644
31899 --- a/drivers/char/stallion.c
31900 +++ b/drivers/char/stallion.c
31901 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
31902 struct stlport stl_dummyport;
31903 struct stlport *portp;
31904
31905 + pax_track_stack();
31906 +
31907 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
31908 return -EFAULT;
31909 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
31910 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
31911 index a0789f6..cea3902 100644
31912 --- a/drivers/char/tpm/tpm.c
31913 +++ b/drivers/char/tpm/tpm.c
31914 @@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
31915 chip->vendor.req_complete_val)
31916 goto out_recv;
31917
31918 - if ((status == chip->vendor.req_canceled)) {
31919 + if (status == chip->vendor.req_canceled) {
31920 dev_err(chip->dev, "Operation Canceled\n");
31921 rc = -ECANCELED;
31922 goto out;
31923 @@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
31924
31925 struct tpm_chip *chip = dev_get_drvdata(dev);
31926
31927 + pax_track_stack();
31928 +
31929 tpm_cmd.header.in = tpm_readpubek_header;
31930 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
31931 "attempting to read the PUBEK");
31932 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
31933 index bf2170f..ce8cab9 100644
31934 --- a/drivers/char/tpm/tpm_bios.c
31935 +++ b/drivers/char/tpm/tpm_bios.c
31936 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
31937 event = addr;
31938
31939 if ((event->event_type == 0 && event->event_size == 0) ||
31940 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
31941 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
31942 return NULL;
31943
31944 return addr;
31945 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
31946 return NULL;
31947
31948 if ((event->event_type == 0 && event->event_size == 0) ||
31949 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
31950 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
31951 return NULL;
31952
31953 (*pos)++;
31954 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
31955 int i;
31956
31957 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
31958 - seq_putc(m, data[i]);
31959 + if (!seq_putc(m, data[i]))
31960 + return -EFAULT;
31961
31962 return 0;
31963 }
31964 @@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
31965 log->bios_event_log_end = log->bios_event_log + len;
31966
31967 virt = acpi_os_map_memory(start, len);
31968 + if (!virt) {
31969 + kfree(log->bios_event_log);
31970 + log->bios_event_log = NULL;
31971 + return -EFAULT;
31972 + }
31973
31974 - memcpy(log->bios_event_log, virt, len);
31975 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
31976
31977 acpi_os_unmap_memory(virt, len);
31978 return 0;
31979 diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
31980 index 123cedf..6664cb4 100644
31981 --- a/drivers/char/tty_io.c
31982 +++ b/drivers/char/tty_io.c
31983 @@ -146,7 +146,7 @@ static int tty_open(struct inode *, struct file *);
31984 static int tty_release(struct inode *, struct file *);
31985 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
31986 #ifdef CONFIG_COMPAT
31987 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31988 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
31989 unsigned long arg);
31990 #else
31991 #define tty_compat_ioctl NULL
31992 @@ -1774,6 +1774,7 @@ got_driver:
31993
31994 if (IS_ERR(tty)) {
31995 mutex_unlock(&tty_mutex);
31996 + tty_driver_kref_put(driver);
31997 return PTR_ERR(tty);
31998 }
31999 }
32000 @@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
32001 return retval;
32002 }
32003
32004 +EXPORT_SYMBOL(tty_ioctl);
32005 +
32006 #ifdef CONFIG_COMPAT
32007 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32008 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
32009 unsigned long arg)
32010 {
32011 struct inode *inode = file->f_dentry->d_inode;
32012 @@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32013
32014 return retval;
32015 }
32016 +
32017 +EXPORT_SYMBOL(tty_compat_ioctl);
32018 #endif
32019
32020 /*
32021 @@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
32022
32023 void tty_default_fops(struct file_operations *fops)
32024 {
32025 - *fops = tty_fops;
32026 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
32027 }
32028
32029 /*
32030 diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
32031 index d814a3d..b55b9c9 100644
32032 --- a/drivers/char/tty_ldisc.c
32033 +++ b/drivers/char/tty_ldisc.c
32034 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
32035 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
32036 struct tty_ldisc_ops *ldo = ld->ops;
32037
32038 - ldo->refcount--;
32039 + atomic_dec(&ldo->refcount);
32040 module_put(ldo->owner);
32041 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32042
32043 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
32044 spin_lock_irqsave(&tty_ldisc_lock, flags);
32045 tty_ldiscs[disc] = new_ldisc;
32046 new_ldisc->num = disc;
32047 - new_ldisc->refcount = 0;
32048 + atomic_set(&new_ldisc->refcount, 0);
32049 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32050
32051 return ret;
32052 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
32053 return -EINVAL;
32054
32055 spin_lock_irqsave(&tty_ldisc_lock, flags);
32056 - if (tty_ldiscs[disc]->refcount)
32057 + if (atomic_read(&tty_ldiscs[disc]->refcount))
32058 ret = -EBUSY;
32059 else
32060 tty_ldiscs[disc] = NULL;
32061 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
32062 if (ldops) {
32063 ret = ERR_PTR(-EAGAIN);
32064 if (try_module_get(ldops->owner)) {
32065 - ldops->refcount++;
32066 + atomic_inc(&ldops->refcount);
32067 ret = ldops;
32068 }
32069 }
32070 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
32071 unsigned long flags;
32072
32073 spin_lock_irqsave(&tty_ldisc_lock, flags);
32074 - ldops->refcount--;
32075 + atomic_dec(&ldops->refcount);
32076 module_put(ldops->owner);
32077 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32078 }
32079 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
32080 index a035ae3..c27fe2c 100644
32081 --- a/drivers/char/virtio_console.c
32082 +++ b/drivers/char/virtio_console.c
32083 @@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
32084 * virtqueue, so we let the drivers do some boutique early-output thing. */
32085 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
32086 {
32087 - virtio_cons.put_chars = put_chars;
32088 + pax_open_kernel();
32089 + *(void **)&virtio_cons.put_chars = put_chars;
32090 + pax_close_kernel();
32091 return hvc_instantiate(0, 0, &virtio_cons);
32092 }
32093
32094 @@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
32095 out_vq = vqs[1];
32096
32097 /* Start using the new console output. */
32098 - virtio_cons.get_chars = get_chars;
32099 - virtio_cons.put_chars = put_chars;
32100 - virtio_cons.notifier_add = notifier_add_vio;
32101 - virtio_cons.notifier_del = notifier_del_vio;
32102 - virtio_cons.notifier_hangup = notifier_del_vio;
32103 + pax_open_kernel();
32104 + *(void **)&virtio_cons.get_chars = get_chars;
32105 + *(void **)&virtio_cons.put_chars = put_chars;
32106 + *(void **)&virtio_cons.notifier_add = notifier_add_vio;
32107 + *(void **)&virtio_cons.notifier_del = notifier_del_vio;
32108 + *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
32109 + pax_close_kernel();
32110
32111 /* The first argument of hvc_alloc() is the virtual console number, so
32112 * we use zero. The second argument is the parameter for the
32113 diff --git a/drivers/char/vt.c b/drivers/char/vt.c
32114 index 0c80c68..53d59c1 100644
32115 --- a/drivers/char/vt.c
32116 +++ b/drivers/char/vt.c
32117 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
32118
32119 static void notify_write(struct vc_data *vc, unsigned int unicode)
32120 {
32121 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
32122 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
32123 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
32124 }
32125
32126 diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
32127 index 6351a26..999af95 100644
32128 --- a/drivers/char/vt_ioctl.c
32129 +++ b/drivers/char/vt_ioctl.c
32130 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
32131 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
32132 return -EFAULT;
32133
32134 - if (!capable(CAP_SYS_TTY_CONFIG))
32135 - perm = 0;
32136 -
32137 switch (cmd) {
32138 case KDGKBENT:
32139 key_map = key_maps[s];
32140 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
32141 val = (i ? K_HOLE : K_NOSUCHMAP);
32142 return put_user(val, &user_kbe->kb_value);
32143 case KDSKBENT:
32144 + if (!capable(CAP_SYS_TTY_CONFIG))
32145 + perm = 0;
32146 +
32147 if (!perm)
32148 return -EPERM;
32149 +
32150 if (!i && v == K_NOSUCHMAP) {
32151 /* deallocate map */
32152 key_map = key_maps[s];
32153 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
32154 int i, j, k;
32155 int ret;
32156
32157 - if (!capable(CAP_SYS_TTY_CONFIG))
32158 - perm = 0;
32159 -
32160 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
32161 if (!kbs) {
32162 ret = -ENOMEM;
32163 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
32164 kfree(kbs);
32165 return ((p && *p) ? -EOVERFLOW : 0);
32166 case KDSKBSENT:
32167 + if (!capable(CAP_SYS_TTY_CONFIG))
32168 + perm = 0;
32169 +
32170 if (!perm) {
32171 ret = -EPERM;
32172 goto reterr;
32173 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
32174 index c7ae026..1769c1d 100644
32175 --- a/drivers/cpufreq/cpufreq.c
32176 +++ b/drivers/cpufreq/cpufreq.c
32177 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
32178 complete(&policy->kobj_unregister);
32179 }
32180
32181 -static struct sysfs_ops sysfs_ops = {
32182 +static const struct sysfs_ops sysfs_ops = {
32183 .show = show,
32184 .store = store,
32185 };
32186 diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
32187 index 97b0038..2056670 100644
32188 --- a/drivers/cpuidle/sysfs.c
32189 +++ b/drivers/cpuidle/sysfs.c
32190 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
32191 return ret;
32192 }
32193
32194 -static struct sysfs_ops cpuidle_sysfs_ops = {
32195 +static const struct sysfs_ops cpuidle_sysfs_ops = {
32196 .show = cpuidle_show,
32197 .store = cpuidle_store,
32198 };
32199 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
32200 return ret;
32201 }
32202
32203 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
32204 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
32205 .show = cpuidle_state_show,
32206 };
32207
32208 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
32209 .release = cpuidle_state_sysfs_release,
32210 };
32211
32212 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
32213 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
32214 {
32215 kobject_put(&device->kobjs[i]->kobj);
32216 wait_for_completion(&device->kobjs[i]->kobj_unregister);
32217 diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
32218 index 5f753fc..0377ae9 100644
32219 --- a/drivers/crypto/hifn_795x.c
32220 +++ b/drivers/crypto/hifn_795x.c
32221 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
32222 0xCA, 0x34, 0x2B, 0x2E};
32223 struct scatterlist sg;
32224
32225 + pax_track_stack();
32226 +
32227 memset(src, 0, sizeof(src));
32228 memset(ctx.key, 0, sizeof(ctx.key));
32229
32230 diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
32231 index 71e6482..de8d96c 100644
32232 --- a/drivers/crypto/padlock-aes.c
32233 +++ b/drivers/crypto/padlock-aes.c
32234 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
32235 struct crypto_aes_ctx gen_aes;
32236 int cpu;
32237
32238 + pax_track_stack();
32239 +
32240 if (key_len % 8) {
32241 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
32242 return -EINVAL;
32243 diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
32244 index dcc4ab7..cc834bb 100644
32245 --- a/drivers/dma/ioat/dma.c
32246 +++ b/drivers/dma/ioat/dma.c
32247 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
32248 return entry->show(&chan->common, page);
32249 }
32250
32251 -struct sysfs_ops ioat_sysfs_ops = {
32252 +const struct sysfs_ops ioat_sysfs_ops = {
32253 .show = ioat_attr_show,
32254 };
32255
32256 diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
32257 index bbc3e78..f2db62c 100644
32258 --- a/drivers/dma/ioat/dma.h
32259 +++ b/drivers/dma/ioat/dma.h
32260 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
32261 unsigned long *phys_complete);
32262 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
32263 void ioat_kobject_del(struct ioatdma_device *device);
32264 -extern struct sysfs_ops ioat_sysfs_ops;
32265 +extern const struct sysfs_ops ioat_sysfs_ops;
32266 extern struct ioat_sysfs_entry ioat_version_attr;
32267 extern struct ioat_sysfs_entry ioat_cap_attr;
32268 #endif /* IOATDMA_H */
32269 diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
32270 index 9908c9e..3ceb0e5 100644
32271 --- a/drivers/dma/ioat/dma_v3.c
32272 +++ b/drivers/dma/ioat/dma_v3.c
32273 @@ -71,10 +71,10 @@
32274 /* provide a lookup table for setting the source address in the base or
32275 * extended descriptor of an xor or pq descriptor
32276 */
32277 -static const u8 xor_idx_to_desc __read_mostly = 0xd0;
32278 -static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
32279 -static const u8 pq_idx_to_desc __read_mostly = 0xf8;
32280 -static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
32281 +static const u8 xor_idx_to_desc = 0xd0;
32282 +static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
32283 +static const u8 pq_idx_to_desc = 0xf8;
32284 +static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
32285
32286 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
32287 {
32288 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
32289 index 85c464a..afd1e73 100644
32290 --- a/drivers/edac/amd64_edac.c
32291 +++ b/drivers/edac/amd64_edac.c
32292 @@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
32293 * PCI core identifies what devices are on a system during boot, and then
32294 * inquiry this table to see if this driver is for a given device found.
32295 */
32296 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
32297 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
32298 {
32299 .vendor = PCI_VENDOR_ID_AMD,
32300 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
32301 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
32302 index 2b95f1a..4f52793 100644
32303 --- a/drivers/edac/amd76x_edac.c
32304 +++ b/drivers/edac/amd76x_edac.c
32305 @@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
32306 edac_mc_free(mci);
32307 }
32308
32309 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
32310 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
32311 {
32312 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32313 AMD762},
32314 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
32315 index d205d49..74c9672 100644
32316 --- a/drivers/edac/e752x_edac.c
32317 +++ b/drivers/edac/e752x_edac.c
32318 @@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
32319 edac_mc_free(mci);
32320 }
32321
32322 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
32323 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
32324 {
32325 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32326 E7520},
32327 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
32328 index c7d11cc..c59c1ca 100644
32329 --- a/drivers/edac/e7xxx_edac.c
32330 +++ b/drivers/edac/e7xxx_edac.c
32331 @@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
32332 edac_mc_free(mci);
32333 }
32334
32335 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
32336 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
32337 {
32338 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32339 E7205},
32340 diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
32341 index 5376457..5fdedbc 100644
32342 --- a/drivers/edac/edac_device_sysfs.c
32343 +++ b/drivers/edac/edac_device_sysfs.c
32344 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
32345 }
32346
32347 /* edac_dev file operations for an 'ctl_info' */
32348 -static struct sysfs_ops device_ctl_info_ops = {
32349 +static const struct sysfs_ops device_ctl_info_ops = {
32350 .show = edac_dev_ctl_info_show,
32351 .store = edac_dev_ctl_info_store
32352 };
32353 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
32354 }
32355
32356 /* edac_dev file operations for an 'instance' */
32357 -static struct sysfs_ops device_instance_ops = {
32358 +static const struct sysfs_ops device_instance_ops = {
32359 .show = edac_dev_instance_show,
32360 .store = edac_dev_instance_store
32361 };
32362 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
32363 }
32364
32365 /* edac_dev file operations for a 'block' */
32366 -static struct sysfs_ops device_block_ops = {
32367 +static const struct sysfs_ops device_block_ops = {
32368 .show = edac_dev_block_show,
32369 .store = edac_dev_block_store
32370 };
32371 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
32372 index e1d4ce0..88840e9 100644
32373 --- a/drivers/edac/edac_mc_sysfs.c
32374 +++ b/drivers/edac/edac_mc_sysfs.c
32375 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
32376 return -EIO;
32377 }
32378
32379 -static struct sysfs_ops csrowfs_ops = {
32380 +static const struct sysfs_ops csrowfs_ops = {
32381 .show = csrowdev_show,
32382 .store = csrowdev_store
32383 };
32384 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
32385 }
32386
32387 /* Intermediate show/store table */
32388 -static struct sysfs_ops mci_ops = {
32389 +static const struct sysfs_ops mci_ops = {
32390 .show = mcidev_show,
32391 .store = mcidev_store
32392 };
32393 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
32394 index 422728c..d8d9c88 100644
32395 --- a/drivers/edac/edac_pci_sysfs.c
32396 +++ b/drivers/edac/edac_pci_sysfs.c
32397 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
32398 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
32399 static int edac_pci_poll_msec = 1000; /* one second workq period */
32400
32401 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
32402 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
32403 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
32404 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
32405
32406 static struct kobject *edac_pci_top_main_kobj;
32407 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
32408 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
32409 }
32410
32411 /* fs_ops table */
32412 -static struct sysfs_ops pci_instance_ops = {
32413 +static const struct sysfs_ops pci_instance_ops = {
32414 .show = edac_pci_instance_show,
32415 .store = edac_pci_instance_store
32416 };
32417 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
32418 return -EIO;
32419 }
32420
32421 -static struct sysfs_ops edac_pci_sysfs_ops = {
32422 +static const struct sysfs_ops edac_pci_sysfs_ops = {
32423 .show = edac_pci_dev_show,
32424 .store = edac_pci_dev_store
32425 };
32426 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32427 edac_printk(KERN_CRIT, EDAC_PCI,
32428 "Signaled System Error on %s\n",
32429 pci_name(dev));
32430 - atomic_inc(&pci_nonparity_count);
32431 + atomic_inc_unchecked(&pci_nonparity_count);
32432 }
32433
32434 if (status & (PCI_STATUS_PARITY)) {
32435 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32436 "Master Data Parity Error on %s\n",
32437 pci_name(dev));
32438
32439 - atomic_inc(&pci_parity_count);
32440 + atomic_inc_unchecked(&pci_parity_count);
32441 }
32442
32443 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32444 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32445 "Detected Parity Error on %s\n",
32446 pci_name(dev));
32447
32448 - atomic_inc(&pci_parity_count);
32449 + atomic_inc_unchecked(&pci_parity_count);
32450 }
32451 }
32452
32453 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32454 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
32455 "Signaled System Error on %s\n",
32456 pci_name(dev));
32457 - atomic_inc(&pci_nonparity_count);
32458 + atomic_inc_unchecked(&pci_nonparity_count);
32459 }
32460
32461 if (status & (PCI_STATUS_PARITY)) {
32462 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32463 "Master Data Parity Error on "
32464 "%s\n", pci_name(dev));
32465
32466 - atomic_inc(&pci_parity_count);
32467 + atomic_inc_unchecked(&pci_parity_count);
32468 }
32469
32470 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32471 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32472 "Detected Parity Error on %s\n",
32473 pci_name(dev));
32474
32475 - atomic_inc(&pci_parity_count);
32476 + atomic_inc_unchecked(&pci_parity_count);
32477 }
32478 }
32479 }
32480 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
32481 if (!check_pci_errors)
32482 return;
32483
32484 - before_count = atomic_read(&pci_parity_count);
32485 + before_count = atomic_read_unchecked(&pci_parity_count);
32486
32487 /* scan all PCI devices looking for a Parity Error on devices and
32488 * bridges.
32489 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
32490 /* Only if operator has selected panic on PCI Error */
32491 if (edac_pci_get_panic_on_pe()) {
32492 /* If the count is different 'after' from 'before' */
32493 - if (before_count != atomic_read(&pci_parity_count))
32494 + if (before_count != atomic_read_unchecked(&pci_parity_count))
32495 panic("EDAC: PCI Parity Error");
32496 }
32497 }
32498 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
32499 index 6c9a0f2..9c1cf7e 100644
32500 --- a/drivers/edac/i3000_edac.c
32501 +++ b/drivers/edac/i3000_edac.c
32502 @@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
32503 edac_mc_free(mci);
32504 }
32505
32506 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
32507 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
32508 {
32509 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32510 I3000},
32511 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
32512 index fde4db9..fe108f9 100644
32513 --- a/drivers/edac/i3200_edac.c
32514 +++ b/drivers/edac/i3200_edac.c
32515 @@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
32516 edac_mc_free(mci);
32517 }
32518
32519 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
32520 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
32521 {
32522 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32523 I3200},
32524 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
32525 index adc10a2..57d4ccf 100644
32526 --- a/drivers/edac/i5000_edac.c
32527 +++ b/drivers/edac/i5000_edac.c
32528 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
32529 *
32530 * The "E500P" device is the first device supported.
32531 */
32532 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
32533 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
32534 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
32535 .driver_data = I5000P},
32536
32537 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
32538 index 22db05a..b2b5503 100644
32539 --- a/drivers/edac/i5100_edac.c
32540 +++ b/drivers/edac/i5100_edac.c
32541 @@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
32542 edac_mc_free(mci);
32543 }
32544
32545 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
32546 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
32547 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
32548 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
32549 { 0, }
32550 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
32551 index f99d106..f050710 100644
32552 --- a/drivers/edac/i5400_edac.c
32553 +++ b/drivers/edac/i5400_edac.c
32554 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
32555 *
32556 * The "E500P" device is the first device supported.
32557 */
32558 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
32559 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
32560 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
32561 {0,} /* 0 terminated list. */
32562 };
32563 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
32564 index 577760a..9ce16ce 100644
32565 --- a/drivers/edac/i82443bxgx_edac.c
32566 +++ b/drivers/edac/i82443bxgx_edac.c
32567 @@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
32568
32569 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
32570
32571 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
32572 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
32573 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
32574 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
32575 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
32576 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
32577 index c0088ba..64a7b98 100644
32578 --- a/drivers/edac/i82860_edac.c
32579 +++ b/drivers/edac/i82860_edac.c
32580 @@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
32581 edac_mc_free(mci);
32582 }
32583
32584 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
32585 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
32586 {
32587 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32588 I82860},
32589 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
32590 index b2d83b9..a34357b 100644
32591 --- a/drivers/edac/i82875p_edac.c
32592 +++ b/drivers/edac/i82875p_edac.c
32593 @@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
32594 edac_mc_free(mci);
32595 }
32596
32597 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
32598 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
32599 {
32600 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32601 I82875P},
32602 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
32603 index 2eed3ea..87bbbd1 100644
32604 --- a/drivers/edac/i82975x_edac.c
32605 +++ b/drivers/edac/i82975x_edac.c
32606 @@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
32607 edac_mc_free(mci);
32608 }
32609
32610 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
32611 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
32612 {
32613 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32614 I82975X
32615 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
32616 index 9900675..78ac2b6 100644
32617 --- a/drivers/edac/r82600_edac.c
32618 +++ b/drivers/edac/r82600_edac.c
32619 @@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
32620 edac_mc_free(mci);
32621 }
32622
32623 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
32624 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
32625 {
32626 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
32627 },
32628 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
32629 index d4ec605..4cfec4e 100644
32630 --- a/drivers/edac/x38_edac.c
32631 +++ b/drivers/edac/x38_edac.c
32632 @@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
32633 edac_mc_free(mci);
32634 }
32635
32636 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
32637 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
32638 {
32639 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32640 X38},
32641 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
32642 index 3fc2ceb..daf098f 100644
32643 --- a/drivers/firewire/core-card.c
32644 +++ b/drivers/firewire/core-card.c
32645 @@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
32646
32647 void fw_core_remove_card(struct fw_card *card)
32648 {
32649 - struct fw_card_driver dummy_driver = dummy_driver_template;
32650 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
32651
32652 card->driver->update_phy_reg(card, 4,
32653 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
32654 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
32655 index 4560d8f..36db24a 100644
32656 --- a/drivers/firewire/core-cdev.c
32657 +++ b/drivers/firewire/core-cdev.c
32658 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
32659 int ret;
32660
32661 if ((request->channels == 0 && request->bandwidth == 0) ||
32662 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
32663 - request->bandwidth < 0)
32664 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
32665 return -EINVAL;
32666
32667 r = kmalloc(sizeof(*r), GFP_KERNEL);
32668 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
32669 index da628c7..cf54a2c 100644
32670 --- a/drivers/firewire/core-transaction.c
32671 +++ b/drivers/firewire/core-transaction.c
32672 @@ -36,6 +36,7 @@
32673 #include <linux/string.h>
32674 #include <linux/timer.h>
32675 #include <linux/types.h>
32676 +#include <linux/sched.h>
32677
32678 #include <asm/byteorder.h>
32679
32680 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
32681 struct transaction_callback_data d;
32682 struct fw_transaction t;
32683
32684 + pax_track_stack();
32685 +
32686 init_completion(&d.done);
32687 d.payload = payload;
32688 fw_send_request(card, &t, tcode, destination_id, generation, speed,
32689 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
32690 index 7ff6e75..a2965d9 100644
32691 --- a/drivers/firewire/core.h
32692 +++ b/drivers/firewire/core.h
32693 @@ -86,6 +86,7 @@ struct fw_card_driver {
32694
32695 int (*stop_iso)(struct fw_iso_context *ctx);
32696 };
32697 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
32698
32699 void fw_card_initialize(struct fw_card *card,
32700 const struct fw_card_driver *driver, struct device *device);
32701 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
32702 index 3a2ccb0..82fd7c4 100644
32703 --- a/drivers/firmware/dmi_scan.c
32704 +++ b/drivers/firmware/dmi_scan.c
32705 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
32706 }
32707 }
32708 else {
32709 - /*
32710 - * no iounmap() for that ioremap(); it would be a no-op, but
32711 - * it's so early in setup that sucker gets confused into doing
32712 - * what it shouldn't if we actually call it.
32713 - */
32714 p = dmi_ioremap(0xF0000, 0x10000);
32715 if (p == NULL)
32716 goto error;
32717 @@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
32718 if (buf == NULL)
32719 return -1;
32720
32721 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
32722 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
32723
32724 iounmap(buf);
32725 return 0;
32726 diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
32727 index 9e4f59d..110e24e 100644
32728 --- a/drivers/firmware/edd.c
32729 +++ b/drivers/firmware/edd.c
32730 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
32731 return ret;
32732 }
32733
32734 -static struct sysfs_ops edd_attr_ops = {
32735 +static const struct sysfs_ops edd_attr_ops = {
32736 .show = edd_attr_show,
32737 };
32738
32739 diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
32740 index f4f709d..082f06e 100644
32741 --- a/drivers/firmware/efivars.c
32742 +++ b/drivers/firmware/efivars.c
32743 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
32744 return ret;
32745 }
32746
32747 -static struct sysfs_ops efivar_attr_ops = {
32748 +static const struct sysfs_ops efivar_attr_ops = {
32749 .show = efivar_attr_show,
32750 .store = efivar_attr_store,
32751 };
32752 diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
32753 index 051d1eb..0a5d4e7 100644
32754 --- a/drivers/firmware/iscsi_ibft.c
32755 +++ b/drivers/firmware/iscsi_ibft.c
32756 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
32757 return ret;
32758 }
32759
32760 -static struct sysfs_ops ibft_attr_ops = {
32761 +static const struct sysfs_ops ibft_attr_ops = {
32762 .show = ibft_show_attribute,
32763 };
32764
32765 diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
32766 index 56f9234..8c58c7b 100644
32767 --- a/drivers/firmware/memmap.c
32768 +++ b/drivers/firmware/memmap.c
32769 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
32770 NULL
32771 };
32772
32773 -static struct sysfs_ops memmap_attr_ops = {
32774 +static const struct sysfs_ops memmap_attr_ops = {
32775 .show = memmap_attr_show,
32776 };
32777
32778 diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
32779 index b16c9a8..2af7d3f 100644
32780 --- a/drivers/gpio/vr41xx_giu.c
32781 +++ b/drivers/gpio/vr41xx_giu.c
32782 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
32783 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
32784 maskl, pendl, maskh, pendh);
32785
32786 - atomic_inc(&irq_err_count);
32787 + atomic_inc_unchecked(&irq_err_count);
32788
32789 return -EINVAL;
32790 }
32791 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
32792 index bea6efc..3dc0f42 100644
32793 --- a/drivers/gpu/drm/drm_crtc.c
32794 +++ b/drivers/gpu/drm/drm_crtc.c
32795 @@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32796 */
32797 if ((out_resp->count_modes >= mode_count) && mode_count) {
32798 copied = 0;
32799 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
32800 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
32801 list_for_each_entry(mode, &connector->modes, head) {
32802 drm_crtc_convert_to_umode(&u_mode, mode);
32803 if (copy_to_user(mode_ptr + copied,
32804 @@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32805
32806 if ((out_resp->count_props >= props_count) && props_count) {
32807 copied = 0;
32808 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
32809 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
32810 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
32811 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
32812 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
32813 if (connector->property_ids[i] != 0) {
32814 if (put_user(connector->property_ids[i],
32815 @@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32816
32817 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
32818 copied = 0;
32819 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
32820 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
32821 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
32822 if (connector->encoder_ids[i] != 0) {
32823 if (put_user(connector->encoder_ids[i],
32824 @@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
32825 }
32826
32827 for (i = 0; i < crtc_req->count_connectors; i++) {
32828 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
32829 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
32830 if (get_user(out_id, &set_connectors_ptr[i])) {
32831 ret = -EFAULT;
32832 goto out;
32833 @@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32834 out_resp->flags = property->flags;
32835
32836 if ((out_resp->count_values >= value_count) && value_count) {
32837 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
32838 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
32839 for (i = 0; i < value_count; i++) {
32840 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
32841 ret = -EFAULT;
32842 @@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32843 if (property->flags & DRM_MODE_PROP_ENUM) {
32844 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
32845 copied = 0;
32846 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
32847 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
32848 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
32849
32850 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
32851 @@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32852 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
32853 copied = 0;
32854 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
32855 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
32856 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
32857
32858 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
32859 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
32860 @@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
32861 blob = obj_to_blob(obj);
32862
32863 if (out_resp->length == blob->length) {
32864 - blob_ptr = (void *)(unsigned long)out_resp->data;
32865 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
32866 if (copy_to_user(blob_ptr, blob->data, blob->length)){
32867 ret = -EFAULT;
32868 goto done;
32869 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
32870 index 1b8745d..92fdbf6 100644
32871 --- a/drivers/gpu/drm/drm_crtc_helper.c
32872 +++ b/drivers/gpu/drm/drm_crtc_helper.c
32873 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
32874 struct drm_crtc *tmp;
32875 int crtc_mask = 1;
32876
32877 - WARN(!crtc, "checking null crtc?");
32878 + BUG_ON(!crtc);
32879
32880 dev = crtc->dev;
32881
32882 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
32883
32884 adjusted_mode = drm_mode_duplicate(dev, mode);
32885
32886 + pax_track_stack();
32887 +
32888 crtc->enabled = drm_helper_crtc_in_use(crtc);
32889
32890 if (!crtc->enabled)
32891 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
32892 index 0e27d98..dec8768 100644
32893 --- a/drivers/gpu/drm/drm_drv.c
32894 +++ b/drivers/gpu/drm/drm_drv.c
32895 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
32896 char *kdata = NULL;
32897
32898 atomic_inc(&dev->ioctl_count);
32899 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
32900 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
32901 ++file_priv->ioctl_count;
32902
32903 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
32904 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
32905 index 519161e..98c840c 100644
32906 --- a/drivers/gpu/drm/drm_fops.c
32907 +++ b/drivers/gpu/drm/drm_fops.c
32908 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
32909 }
32910
32911 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
32912 - atomic_set(&dev->counts[i], 0);
32913 + atomic_set_unchecked(&dev->counts[i], 0);
32914
32915 dev->sigdata.lock = NULL;
32916
32917 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
32918
32919 retcode = drm_open_helper(inode, filp, dev);
32920 if (!retcode) {
32921 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
32922 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
32923 spin_lock(&dev->count_lock);
32924 - if (!dev->open_count++) {
32925 + if (local_inc_return(&dev->open_count) == 1) {
32926 spin_unlock(&dev->count_lock);
32927 retcode = drm_setup(dev);
32928 goto out;
32929 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
32930
32931 lock_kernel();
32932
32933 - DRM_DEBUG("open_count = %d\n", dev->open_count);
32934 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
32935
32936 if (dev->driver->preclose)
32937 dev->driver->preclose(dev, file_priv);
32938 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
32939 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
32940 task_pid_nr(current),
32941 (long)old_encode_dev(file_priv->minor->device),
32942 - dev->open_count);
32943 + local_read(&dev->open_count));
32944
32945 /* Release any auth tokens that might point to this file_priv,
32946 (do that under the drm_global_mutex) */
32947 @@ -529,9 +529,9 @@ int drm_release(struct inode *inode, struct file *filp)
32948 * End inline drm_release
32949 */
32950
32951 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
32952 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
32953 spin_lock(&dev->count_lock);
32954 - if (!--dev->open_count) {
32955 + if (local_dec_and_test(&dev->open_count)) {
32956 if (atomic_read(&dev->ioctl_count)) {
32957 DRM_ERROR("Device busy: %d\n",
32958 atomic_read(&dev->ioctl_count));
32959 diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
32960 index 8bf3770..79422805 100644
32961 --- a/drivers/gpu/drm/drm_gem.c
32962 +++ b/drivers/gpu/drm/drm_gem.c
32963 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
32964 spin_lock_init(&dev->object_name_lock);
32965 idr_init(&dev->object_name_idr);
32966 atomic_set(&dev->object_count, 0);
32967 - atomic_set(&dev->object_memory, 0);
32968 + atomic_set_unchecked(&dev->object_memory, 0);
32969 atomic_set(&dev->pin_count, 0);
32970 - atomic_set(&dev->pin_memory, 0);
32971 + atomic_set_unchecked(&dev->pin_memory, 0);
32972 atomic_set(&dev->gtt_count, 0);
32973 - atomic_set(&dev->gtt_memory, 0);
32974 + atomic_set_unchecked(&dev->gtt_memory, 0);
32975
32976 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
32977 if (!mm) {
32978 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
32979 goto fput;
32980 }
32981 atomic_inc(&dev->object_count);
32982 - atomic_add(obj->size, &dev->object_memory);
32983 + atomic_add_unchecked(obj->size, &dev->object_memory);
32984 return obj;
32985 fput:
32986 fput(obj->filp);
32987 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
32988
32989 fput(obj->filp);
32990 atomic_dec(&dev->object_count);
32991 - atomic_sub(obj->size, &dev->object_memory);
32992 + atomic_sub_unchecked(obj->size, &dev->object_memory);
32993 kfree(obj);
32994 }
32995 EXPORT_SYMBOL(drm_gem_object_free);
32996 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
32997 index f0f6c6b..34af322 100644
32998 --- a/drivers/gpu/drm/drm_info.c
32999 +++ b/drivers/gpu/drm/drm_info.c
33000 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
33001 struct drm_local_map *map;
33002 struct drm_map_list *r_list;
33003
33004 - /* Hardcoded from _DRM_FRAME_BUFFER,
33005 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
33006 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
33007 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
33008 + static const char * const types[] = {
33009 + [_DRM_FRAME_BUFFER] = "FB",
33010 + [_DRM_REGISTERS] = "REG",
33011 + [_DRM_SHM] = "SHM",
33012 + [_DRM_AGP] = "AGP",
33013 + [_DRM_SCATTER_GATHER] = "SG",
33014 + [_DRM_CONSISTENT] = "PCI",
33015 + [_DRM_GEM] = "GEM" };
33016 const char *type;
33017 int i;
33018
33019 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
33020 map = r_list->map;
33021 if (!map)
33022 continue;
33023 - if (map->type < 0 || map->type > 5)
33024 + if (map->type >= ARRAY_SIZE(types))
33025 type = "??";
33026 else
33027 type = types[map->type];
33028 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
33029 struct drm_device *dev = node->minor->dev;
33030
33031 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
33032 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
33033 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
33034 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
33035 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
33036 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
33037 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
33038 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
33039 seq_printf(m, "%d gtt total\n", dev->gtt_total);
33040 return 0;
33041 }
33042 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
33043 mutex_lock(&dev->struct_mutex);
33044 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
33045 atomic_read(&dev->vma_count),
33046 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33047 + NULL, 0);
33048 +#else
33049 high_memory, (u64)virt_to_phys(high_memory));
33050 +#endif
33051
33052 list_for_each_entry(pt, &dev->vmalist, head) {
33053 vma = pt->vma;
33054 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
33055 continue;
33056 seq_printf(m,
33057 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
33058 - pt->pid, vma->vm_start, vma->vm_end,
33059 + pt->pid,
33060 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33061 + 0, 0,
33062 +#else
33063 + vma->vm_start, vma->vm_end,
33064 +#endif
33065 vma->vm_flags & VM_READ ? 'r' : '-',
33066 vma->vm_flags & VM_WRITE ? 'w' : '-',
33067 vma->vm_flags & VM_EXEC ? 'x' : '-',
33068 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
33069 vma->vm_flags & VM_LOCKED ? 'l' : '-',
33070 vma->vm_flags & VM_IO ? 'i' : '-',
33071 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33072 + 0);
33073 +#else
33074 vma->vm_pgoff);
33075 +#endif
33076
33077 #if defined(__i386__)
33078 pgprot = pgprot_val(vma->vm_page_prot);
33079 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
33080 index 282d9fd..71e5f11 100644
33081 --- a/drivers/gpu/drm/drm_ioc32.c
33082 +++ b/drivers/gpu/drm/drm_ioc32.c
33083 @@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
33084 request = compat_alloc_user_space(nbytes);
33085 if (!access_ok(VERIFY_WRITE, request, nbytes))
33086 return -EFAULT;
33087 - list = (struct drm_buf_desc *) (request + 1);
33088 + list = (struct drm_buf_desc __user *) (request + 1);
33089
33090 if (__put_user(count, &request->count)
33091 || __put_user(list, &request->list))
33092 @@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
33093 request = compat_alloc_user_space(nbytes);
33094 if (!access_ok(VERIFY_WRITE, request, nbytes))
33095 return -EFAULT;
33096 - list = (struct drm_buf_pub *) (request + 1);
33097 + list = (struct drm_buf_pub __user *) (request + 1);
33098
33099 if (__put_user(count, &request->count)
33100 || __put_user(list, &request->list))
33101 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
33102 index 9b9ff46..4ea724c 100644
33103 --- a/drivers/gpu/drm/drm_ioctl.c
33104 +++ b/drivers/gpu/drm/drm_ioctl.c
33105 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
33106 stats->data[i].value =
33107 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
33108 else
33109 - stats->data[i].value = atomic_read(&dev->counts[i]);
33110 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
33111 stats->data[i].type = dev->types[i];
33112 }
33113
33114 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
33115 index e2f70a5..c703e86 100644
33116 --- a/drivers/gpu/drm/drm_lock.c
33117 +++ b/drivers/gpu/drm/drm_lock.c
33118 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
33119 if (drm_lock_take(&master->lock, lock->context)) {
33120 master->lock.file_priv = file_priv;
33121 master->lock.lock_time = jiffies;
33122 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
33123 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
33124 break; /* Got lock */
33125 }
33126
33127 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
33128 return -EINVAL;
33129 }
33130
33131 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
33132 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
33133
33134 /* kernel_context_switch isn't used by any of the x86 drm
33135 * modules but is required by the Sparc driver.
33136 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
33137 index 7d1d88c..b9131b2 100644
33138 --- a/drivers/gpu/drm/i810/i810_dma.c
33139 +++ b/drivers/gpu/drm/i810/i810_dma.c
33140 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
33141 dma->buflist[vertex->idx],
33142 vertex->discard, vertex->used);
33143
33144 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33145 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33146 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33147 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33148 sarea_priv->last_enqueue = dev_priv->counter - 1;
33149 sarea_priv->last_dispatch = (int)hw_status[5];
33150
33151 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
33152 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
33153 mc->last_render);
33154
33155 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33156 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33157 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33158 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33159 sarea_priv->last_enqueue = dev_priv->counter - 1;
33160 sarea_priv->last_dispatch = (int)hw_status[5];
33161
33162 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
33163 index 21e2691..7321edd 100644
33164 --- a/drivers/gpu/drm/i810/i810_drv.h
33165 +++ b/drivers/gpu/drm/i810/i810_drv.h
33166 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
33167 int page_flipping;
33168
33169 wait_queue_head_t irq_queue;
33170 - atomic_t irq_received;
33171 - atomic_t irq_emitted;
33172 + atomic_unchecked_t irq_received;
33173 + atomic_unchecked_t irq_emitted;
33174
33175 int front_offset;
33176 } drm_i810_private_t;
33177 diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
33178 index da82afe..48a45de 100644
33179 --- a/drivers/gpu/drm/i830/i830_drv.h
33180 +++ b/drivers/gpu/drm/i830/i830_drv.h
33181 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
33182 int page_flipping;
33183
33184 wait_queue_head_t irq_queue;
33185 - atomic_t irq_received;
33186 - atomic_t irq_emitted;
33187 + atomic_unchecked_t irq_received;
33188 + atomic_unchecked_t irq_emitted;
33189
33190 int use_mi_batchbuffer_start;
33191
33192 diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
33193 index 91ec2bb..6f21fab 100644
33194 --- a/drivers/gpu/drm/i830/i830_irq.c
33195 +++ b/drivers/gpu/drm/i830/i830_irq.c
33196 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
33197
33198 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
33199
33200 - atomic_inc(&dev_priv->irq_received);
33201 + atomic_inc_unchecked(&dev_priv->irq_received);
33202 wake_up_interruptible(&dev_priv->irq_queue);
33203
33204 return IRQ_HANDLED;
33205 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
33206
33207 DRM_DEBUG("%s\n", __func__);
33208
33209 - atomic_inc(&dev_priv->irq_emitted);
33210 + atomic_inc_unchecked(&dev_priv->irq_emitted);
33211
33212 BEGIN_LP_RING(2);
33213 OUT_RING(0);
33214 OUT_RING(GFX_OP_USER_INTERRUPT);
33215 ADVANCE_LP_RING();
33216
33217 - return atomic_read(&dev_priv->irq_emitted);
33218 + return atomic_read_unchecked(&dev_priv->irq_emitted);
33219 }
33220
33221 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33222 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33223
33224 DRM_DEBUG("%s\n", __func__);
33225
33226 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
33227 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
33228 return 0;
33229
33230 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
33231 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33232
33233 for (;;) {
33234 __set_current_state(TASK_INTERRUPTIBLE);
33235 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
33236 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
33237 break;
33238 if ((signed)(end - jiffies) <= 0) {
33239 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
33240 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
33241 I830_WRITE16(I830REG_HWSTAM, 0xffff);
33242 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
33243 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
33244 - atomic_set(&dev_priv->irq_received, 0);
33245 - atomic_set(&dev_priv->irq_emitted, 0);
33246 + atomic_set_unchecked(&dev_priv->irq_received, 0);
33247 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
33248 init_waitqueue_head(&dev_priv->irq_queue);
33249 }
33250
33251 diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
33252 index 288fc50..c6092055 100644
33253 --- a/drivers/gpu/drm/i915/dvo.h
33254 +++ b/drivers/gpu/drm/i915/dvo.h
33255 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
33256 *
33257 * \return singly-linked list of modes or NULL if no modes found.
33258 */
33259 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
33260 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
33261
33262 /**
33263 * Clean up driver-specific bits of the output
33264 */
33265 - void (*destroy) (struct intel_dvo_device *dvo);
33266 + void (* const destroy) (struct intel_dvo_device *dvo);
33267
33268 /**
33269 * Debugging hook to dump device registers to log file
33270 */
33271 - void (*dump_regs)(struct intel_dvo_device *dvo);
33272 + void (* const dump_regs)(struct intel_dvo_device *dvo);
33273 };
33274
33275 -extern struct intel_dvo_dev_ops sil164_ops;
33276 -extern struct intel_dvo_dev_ops ch7xxx_ops;
33277 -extern struct intel_dvo_dev_ops ivch_ops;
33278 -extern struct intel_dvo_dev_ops tfp410_ops;
33279 -extern struct intel_dvo_dev_ops ch7017_ops;
33280 +extern const struct intel_dvo_dev_ops sil164_ops;
33281 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
33282 +extern const struct intel_dvo_dev_ops ivch_ops;
33283 +extern const struct intel_dvo_dev_ops tfp410_ops;
33284 +extern const struct intel_dvo_dev_ops ch7017_ops;
33285
33286 #endif /* _INTEL_DVO_H */
33287 diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
33288 index 621815b..499d82e 100644
33289 --- a/drivers/gpu/drm/i915/dvo_ch7017.c
33290 +++ b/drivers/gpu/drm/i915/dvo_ch7017.c
33291 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
33292 }
33293 }
33294
33295 -struct intel_dvo_dev_ops ch7017_ops = {
33296 +const struct intel_dvo_dev_ops ch7017_ops = {
33297 .init = ch7017_init,
33298 .detect = ch7017_detect,
33299 .mode_valid = ch7017_mode_valid,
33300 diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33301 index a9b8962..ac769ba 100644
33302 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
33303 +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33304 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
33305 }
33306 }
33307
33308 -struct intel_dvo_dev_ops ch7xxx_ops = {
33309 +const struct intel_dvo_dev_ops ch7xxx_ops = {
33310 .init = ch7xxx_init,
33311 .detect = ch7xxx_detect,
33312 .mode_valid = ch7xxx_mode_valid,
33313 diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
33314 index aa176f9..ed2930c 100644
33315 --- a/drivers/gpu/drm/i915/dvo_ivch.c
33316 +++ b/drivers/gpu/drm/i915/dvo_ivch.c
33317 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
33318 }
33319 }
33320
33321 -struct intel_dvo_dev_ops ivch_ops= {
33322 +const struct intel_dvo_dev_ops ivch_ops= {
33323 .init = ivch_init,
33324 .dpms = ivch_dpms,
33325 .save = ivch_save,
33326 diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
33327 index e1c1f73..7dbebcf 100644
33328 --- a/drivers/gpu/drm/i915/dvo_sil164.c
33329 +++ b/drivers/gpu/drm/i915/dvo_sil164.c
33330 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
33331 }
33332 }
33333
33334 -struct intel_dvo_dev_ops sil164_ops = {
33335 +const struct intel_dvo_dev_ops sil164_ops = {
33336 .init = sil164_init,
33337 .detect = sil164_detect,
33338 .mode_valid = sil164_mode_valid,
33339 diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
33340 index 16dce84..7e1b6f8 100644
33341 --- a/drivers/gpu/drm/i915/dvo_tfp410.c
33342 +++ b/drivers/gpu/drm/i915/dvo_tfp410.c
33343 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
33344 }
33345 }
33346
33347 -struct intel_dvo_dev_ops tfp410_ops = {
33348 +const struct intel_dvo_dev_ops tfp410_ops = {
33349 .init = tfp410_init,
33350 .detect = tfp410_detect,
33351 .mode_valid = tfp410_mode_valid,
33352 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
33353 index 7e859d6..7d1cf2b 100644
33354 --- a/drivers/gpu/drm/i915/i915_debugfs.c
33355 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
33356 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
33357 I915_READ(GTIMR));
33358 }
33359 seq_printf(m, "Interrupts received: %d\n",
33360 - atomic_read(&dev_priv->irq_received));
33361 + atomic_read_unchecked(&dev_priv->irq_received));
33362 if (dev_priv->hw_status_page != NULL) {
33363 seq_printf(m, "Current sequence: %d\n",
33364 i915_get_gem_seqno(dev));
33365 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
33366 index 5449239..7e4f68d 100644
33367 --- a/drivers/gpu/drm/i915/i915_drv.c
33368 +++ b/drivers/gpu/drm/i915/i915_drv.c
33369 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
33370 return i915_resume(dev);
33371 }
33372
33373 -static struct vm_operations_struct i915_gem_vm_ops = {
33374 +static const struct vm_operations_struct i915_gem_vm_ops = {
33375 .fault = i915_gem_fault,
33376 .open = drm_gem_vm_open,
33377 .close = drm_gem_vm_close,
33378 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
33379 index 97163f7..c24c7c7 100644
33380 --- a/drivers/gpu/drm/i915/i915_drv.h
33381 +++ b/drivers/gpu/drm/i915/i915_drv.h
33382 @@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
33383 /* display clock increase/decrease */
33384 /* pll clock increase/decrease */
33385 /* clock gating init */
33386 -};
33387 +} __no_const;
33388
33389 typedef struct drm_i915_private {
33390 struct drm_device *dev;
33391 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
33392 int page_flipping;
33393
33394 wait_queue_head_t irq_queue;
33395 - atomic_t irq_received;
33396 + atomic_unchecked_t irq_received;
33397 /** Protects user_irq_refcount and irq_mask_reg */
33398 spinlock_t user_irq_lock;
33399 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
33400 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
33401 index 27a3074..eb3f959 100644
33402 --- a/drivers/gpu/drm/i915/i915_gem.c
33403 +++ b/drivers/gpu/drm/i915/i915_gem.c
33404 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
33405
33406 args->aper_size = dev->gtt_total;
33407 args->aper_available_size = (args->aper_size -
33408 - atomic_read(&dev->pin_memory));
33409 + atomic_read_unchecked(&dev->pin_memory));
33410
33411 return 0;
33412 }
33413 @@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
33414
33415 if (obj_priv->gtt_space) {
33416 atomic_dec(&dev->gtt_count);
33417 - atomic_sub(obj->size, &dev->gtt_memory);
33418 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
33419
33420 drm_mm_put_block(obj_priv->gtt_space);
33421 obj_priv->gtt_space = NULL;
33422 @@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
33423 goto search_free;
33424 }
33425 atomic_inc(&dev->gtt_count);
33426 - atomic_add(obj->size, &dev->gtt_memory);
33427 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
33428
33429 /* Assert that the object is not currently in any GPU domain. As it
33430 * wasn't in the GTT, there shouldn't be any way it could have been in
33431 @@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
33432 "%d/%d gtt bytes\n",
33433 atomic_read(&dev->object_count),
33434 atomic_read(&dev->pin_count),
33435 - atomic_read(&dev->object_memory),
33436 - atomic_read(&dev->pin_memory),
33437 - atomic_read(&dev->gtt_memory),
33438 + atomic_read_unchecked(&dev->object_memory),
33439 + atomic_read_unchecked(&dev->pin_memory),
33440 + atomic_read_unchecked(&dev->gtt_memory),
33441 dev->gtt_total);
33442 }
33443 goto err;
33444 @@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
33445 */
33446 if (obj_priv->pin_count == 1) {
33447 atomic_inc(&dev->pin_count);
33448 - atomic_add(obj->size, &dev->pin_memory);
33449 + atomic_add_unchecked(obj->size, &dev->pin_memory);
33450 if (!obj_priv->active &&
33451 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
33452 !list_empty(&obj_priv->list))
33453 @@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
33454 list_move_tail(&obj_priv->list,
33455 &dev_priv->mm.inactive_list);
33456 atomic_dec(&dev->pin_count);
33457 - atomic_sub(obj->size, &dev->pin_memory);
33458 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
33459 }
33460 i915_verify_inactive(dev, __FILE__, __LINE__);
33461 }
33462 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
33463 index 63f28ad..f5469da 100644
33464 --- a/drivers/gpu/drm/i915/i915_irq.c
33465 +++ b/drivers/gpu/drm/i915/i915_irq.c
33466 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
33467 int irq_received;
33468 int ret = IRQ_NONE;
33469
33470 - atomic_inc(&dev_priv->irq_received);
33471 + atomic_inc_unchecked(&dev_priv->irq_received);
33472
33473 if (IS_IGDNG(dev))
33474 return igdng_irq_handler(dev);
33475 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
33476 {
33477 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33478
33479 - atomic_set(&dev_priv->irq_received, 0);
33480 + atomic_set_unchecked(&dev_priv->irq_received, 0);
33481
33482 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
33483 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
33484 diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
33485 index 5d9c6a7..d1b0e29 100644
33486 --- a/drivers/gpu/drm/i915/intel_sdvo.c
33487 +++ b/drivers/gpu/drm/i915/intel_sdvo.c
33488 @@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
33489 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
33490
33491 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
33492 - intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33493 + pax_open_kernel();
33494 + *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33495 + pax_close_kernel();
33496
33497 /* Read the regs to test if we can talk to the device */
33498 for (i = 0; i < 0x40; i++) {
33499 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
33500 index be6c6b9..8615d9c 100644
33501 --- a/drivers/gpu/drm/mga/mga_drv.h
33502 +++ b/drivers/gpu/drm/mga/mga_drv.h
33503 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
33504 u32 clear_cmd;
33505 u32 maccess;
33506
33507 - atomic_t vbl_received; /**< Number of vblanks received. */
33508 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
33509 wait_queue_head_t fence_queue;
33510 - atomic_t last_fence_retired;
33511 + atomic_unchecked_t last_fence_retired;
33512 u32 next_fence_to_post;
33513
33514 unsigned int fb_cpp;
33515 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
33516 index daa6041..a28a5da 100644
33517 --- a/drivers/gpu/drm/mga/mga_irq.c
33518 +++ b/drivers/gpu/drm/mga/mga_irq.c
33519 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
33520 if (crtc != 0)
33521 return 0;
33522
33523 - return atomic_read(&dev_priv->vbl_received);
33524 + return atomic_read_unchecked(&dev_priv->vbl_received);
33525 }
33526
33527
33528 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33529 /* VBLANK interrupt */
33530 if (status & MGA_VLINEPEN) {
33531 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
33532 - atomic_inc(&dev_priv->vbl_received);
33533 + atomic_inc_unchecked(&dev_priv->vbl_received);
33534 drm_handle_vblank(dev, 0);
33535 handled = 1;
33536 }
33537 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33538 MGA_WRITE(MGA_PRIMEND, prim_end);
33539 }
33540
33541 - atomic_inc(&dev_priv->last_fence_retired);
33542 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
33543 DRM_WAKEUP(&dev_priv->fence_queue);
33544 handled = 1;
33545 }
33546 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
33547 * using fences.
33548 */
33549 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
33550 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
33551 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
33552 - *sequence) <= (1 << 23)));
33553
33554 *sequence = cur_fence;
33555 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
33556 index 4c39a40..b22a9ea 100644
33557 --- a/drivers/gpu/drm/r128/r128_cce.c
33558 +++ b/drivers/gpu/drm/r128/r128_cce.c
33559 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
33560
33561 /* GH: Simple idle check.
33562 */
33563 - atomic_set(&dev_priv->idle_count, 0);
33564 + atomic_set_unchecked(&dev_priv->idle_count, 0);
33565
33566 /* We don't support anything other than bus-mastering ring mode,
33567 * but the ring can be in either AGP or PCI space for the ring
33568 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
33569 index 3c60829..4faf484 100644
33570 --- a/drivers/gpu/drm/r128/r128_drv.h
33571 +++ b/drivers/gpu/drm/r128/r128_drv.h
33572 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
33573 int is_pci;
33574 unsigned long cce_buffers_offset;
33575
33576 - atomic_t idle_count;
33577 + atomic_unchecked_t idle_count;
33578
33579 int page_flipping;
33580 int current_page;
33581 u32 crtc_offset;
33582 u32 crtc_offset_cntl;
33583
33584 - atomic_t vbl_received;
33585 + atomic_unchecked_t vbl_received;
33586
33587 u32 color_fmt;
33588 unsigned int front_offset;
33589 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
33590 index 69810fb..97bf17a 100644
33591 --- a/drivers/gpu/drm/r128/r128_irq.c
33592 +++ b/drivers/gpu/drm/r128/r128_irq.c
33593 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
33594 if (crtc != 0)
33595 return 0;
33596
33597 - return atomic_read(&dev_priv->vbl_received);
33598 + return atomic_read_unchecked(&dev_priv->vbl_received);
33599 }
33600
33601 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33602 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33603 /* VBLANK interrupt */
33604 if (status & R128_CRTC_VBLANK_INT) {
33605 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
33606 - atomic_inc(&dev_priv->vbl_received);
33607 + atomic_inc_unchecked(&dev_priv->vbl_received);
33608 drm_handle_vblank(dev, 0);
33609 return IRQ_HANDLED;
33610 }
33611 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
33612 index af2665c..51922d2 100644
33613 --- a/drivers/gpu/drm/r128/r128_state.c
33614 +++ b/drivers/gpu/drm/r128/r128_state.c
33615 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
33616
33617 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
33618 {
33619 - if (atomic_read(&dev_priv->idle_count) == 0) {
33620 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
33621 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
33622 } else {
33623 - atomic_set(&dev_priv->idle_count, 0);
33624 + atomic_set_unchecked(&dev_priv->idle_count, 0);
33625 }
33626 }
33627
33628 diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
33629 index dd72b91..8644b3c 100644
33630 --- a/drivers/gpu/drm/radeon/atom.c
33631 +++ b/drivers/gpu/drm/radeon/atom.c
33632 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
33633 char name[512];
33634 int i;
33635
33636 + pax_track_stack();
33637 +
33638 ctx->card = card;
33639 ctx->bios = bios;
33640
33641 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
33642 index 0d79577..efaa7a5 100644
33643 --- a/drivers/gpu/drm/radeon/mkregtable.c
33644 +++ b/drivers/gpu/drm/radeon/mkregtable.c
33645 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
33646 regex_t mask_rex;
33647 regmatch_t match[4];
33648 char buf[1024];
33649 - size_t end;
33650 + long end;
33651 int len;
33652 int done = 0;
33653 int r;
33654 unsigned o;
33655 struct offset *offset;
33656 char last_reg_s[10];
33657 - int last_reg;
33658 + unsigned long last_reg;
33659
33660 if (regcomp
33661 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
33662 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
33663 index 6735213..38c2c67 100644
33664 --- a/drivers/gpu/drm/radeon/radeon.h
33665 +++ b/drivers/gpu/drm/radeon/radeon.h
33666 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
33667 */
33668 struct radeon_fence_driver {
33669 uint32_t scratch_reg;
33670 - atomic_t seq;
33671 + atomic_unchecked_t seq;
33672 uint32_t last_seq;
33673 unsigned long count_timeout;
33674 wait_queue_head_t queue;
33675 @@ -640,7 +640,7 @@ struct radeon_asic {
33676 uint32_t offset, uint32_t obj_size);
33677 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
33678 void (*bandwidth_update)(struct radeon_device *rdev);
33679 -};
33680 +} __no_const;
33681
33682 /*
33683 * Asic structures
33684 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
33685 index 4e928b9..d8b6008 100644
33686 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
33687 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
33688 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
33689 bool linkb;
33690 struct radeon_i2c_bus_rec ddc_bus;
33691
33692 + pax_track_stack();
33693 +
33694 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33695
33696 if (data_offset == 0)
33697 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
33698 }
33699 }
33700
33701 -struct bios_connector {
33702 +static struct bios_connector {
33703 bool valid;
33704 uint16_t line_mux;
33705 uint16_t devices;
33706 int connector_type;
33707 struct radeon_i2c_bus_rec ddc_bus;
33708 -};
33709 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33710
33711 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33712 drm_device
33713 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33714 uint8_t dac;
33715 union atom_supported_devices *supported_devices;
33716 int i, j;
33717 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33718
33719 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33720
33721 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
33722 index 083a181..ccccae0 100644
33723 --- a/drivers/gpu/drm/radeon/radeon_display.c
33724 +++ b/drivers/gpu/drm/radeon/radeon_display.c
33725 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
33726
33727 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
33728 error = freq - current_freq;
33729 - error = error < 0 ? 0xffffffff : error;
33730 + error = (int32_t)error < 0 ? 0xffffffff : error;
33731 } else
33732 error = abs(current_freq - freq);
33733 vco_diff = abs(vco - best_vco);
33734 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
33735 index 76e4070..193fa7f 100644
33736 --- a/drivers/gpu/drm/radeon/radeon_drv.h
33737 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
33738 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
33739
33740 /* SW interrupt */
33741 wait_queue_head_t swi_queue;
33742 - atomic_t swi_emitted;
33743 + atomic_unchecked_t swi_emitted;
33744 int vblank_crtc;
33745 uint32_t irq_enable_reg;
33746 uint32_t r500_disp_irq_reg;
33747 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
33748 index 3beb26d..6ce9c4a 100644
33749 --- a/drivers/gpu/drm/radeon/radeon_fence.c
33750 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
33751 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
33752 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
33753 return 0;
33754 }
33755 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
33756 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
33757 if (!rdev->cp.ready) {
33758 /* FIXME: cp is not running assume everythings is done right
33759 * away
33760 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
33761 return r;
33762 }
33763 WREG32(rdev->fence_drv.scratch_reg, 0);
33764 - atomic_set(&rdev->fence_drv.seq, 0);
33765 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
33766 INIT_LIST_HEAD(&rdev->fence_drv.created);
33767 INIT_LIST_HEAD(&rdev->fence_drv.emited);
33768 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
33769 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
33770 index a1bf11d..4a123c0 100644
33771 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
33772 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
33773 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
33774 request = compat_alloc_user_space(sizeof(*request));
33775 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
33776 || __put_user(req32.param, &request->param)
33777 - || __put_user((void __user *)(unsigned long)req32.value,
33778 + || __put_user((unsigned long)req32.value,
33779 &request->value))
33780 return -EFAULT;
33781
33782 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
33783 index b79ecc4..8dab92d 100644
33784 --- a/drivers/gpu/drm/radeon/radeon_irq.c
33785 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
33786 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
33787 unsigned int ret;
33788 RING_LOCALS;
33789
33790 - atomic_inc(&dev_priv->swi_emitted);
33791 - ret = atomic_read(&dev_priv->swi_emitted);
33792 + atomic_inc_unchecked(&dev_priv->swi_emitted);
33793 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
33794
33795 BEGIN_RING(4);
33796 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
33797 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
33798 drm_radeon_private_t *dev_priv =
33799 (drm_radeon_private_t *) dev->dev_private;
33800
33801 - atomic_set(&dev_priv->swi_emitted, 0);
33802 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
33803 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
33804
33805 dev->max_vblank_count = 0x001fffff;
33806 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
33807 index 4747910..48ca4b3 100644
33808 --- a/drivers/gpu/drm/radeon/radeon_state.c
33809 +++ b/drivers/gpu/drm/radeon/radeon_state.c
33810 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
33811 {
33812 drm_radeon_private_t *dev_priv = dev->dev_private;
33813 drm_radeon_getparam_t *param = data;
33814 - int value;
33815 + int value = 0;
33816
33817 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
33818
33819 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
33820 index 1381e06..0e53b17 100644
33821 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
33822 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
33823 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
33824 DRM_INFO("radeon: ttm finalized\n");
33825 }
33826
33827 -static struct vm_operations_struct radeon_ttm_vm_ops;
33828 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
33829 -
33830 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33831 -{
33832 - struct ttm_buffer_object *bo;
33833 - int r;
33834 -
33835 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
33836 - if (bo == NULL) {
33837 - return VM_FAULT_NOPAGE;
33838 - }
33839 - r = ttm_vm_ops->fault(vma, vmf);
33840 - return r;
33841 -}
33842 -
33843 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33844 {
33845 struct drm_file *file_priv;
33846 struct radeon_device *rdev;
33847 - int r;
33848
33849 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
33850 return drm_mmap(filp, vma);
33851 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33852
33853 file_priv = (struct drm_file *)filp->private_data;
33854 rdev = file_priv->minor->dev->dev_private;
33855 - if (rdev == NULL) {
33856 + if (!rdev)
33857 return -EINVAL;
33858 - }
33859 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33860 - if (unlikely(r != 0)) {
33861 - return r;
33862 - }
33863 - if (unlikely(ttm_vm_ops == NULL)) {
33864 - ttm_vm_ops = vma->vm_ops;
33865 - radeon_ttm_vm_ops = *ttm_vm_ops;
33866 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33867 - }
33868 - vma->vm_ops = &radeon_ttm_vm_ops;
33869 - return 0;
33870 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33871 }
33872
33873
33874 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
33875 index b12ff76..0bd0c6e 100644
33876 --- a/drivers/gpu/drm/radeon/rs690.c
33877 +++ b/drivers/gpu/drm/radeon/rs690.c
33878 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
33879 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
33880 rdev->pm.sideport_bandwidth.full)
33881 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
33882 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
33883 + read_delay_latency.full = rfixed_const(800 * 1000);
33884 read_delay_latency.full = rfixed_div(read_delay_latency,
33885 rdev->pm.igp_sideport_mclk);
33886 + a.full = rfixed_const(370);
33887 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
33888 } else {
33889 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
33890 rdev->pm.k8_bandwidth.full)
33891 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
33892 index 0ed436e..e6e7ce3 100644
33893 --- a/drivers/gpu/drm/ttm/ttm_bo.c
33894 +++ b/drivers/gpu/drm/ttm/ttm_bo.c
33895 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
33896 NULL
33897 };
33898
33899 -static struct sysfs_ops ttm_bo_global_ops = {
33900 +static const struct sysfs_ops ttm_bo_global_ops = {
33901 .show = &ttm_bo_global_show
33902 };
33903
33904 diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33905 index 1c040d0..f9e4af8 100644
33906 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
33907 +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33908 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33909 {
33910 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
33911 vma->vm_private_data;
33912 - struct ttm_bo_device *bdev = bo->bdev;
33913 + struct ttm_bo_device *bdev;
33914 unsigned long bus_base;
33915 unsigned long bus_offset;
33916 unsigned long bus_size;
33917 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33918 unsigned long address = (unsigned long)vmf->virtual_address;
33919 int retval = VM_FAULT_NOPAGE;
33920
33921 + if (!bo)
33922 + return VM_FAULT_NOPAGE;
33923 + bdev = bo->bdev;
33924 +
33925 /*
33926 * Work around locking order reversal in fault / nopfn
33927 * between mmap_sem and bo_reserve: Perform a trylock operation
33928 diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
33929 index b170071..28ae90e 100644
33930 --- a/drivers/gpu/drm/ttm/ttm_global.c
33931 +++ b/drivers/gpu/drm/ttm/ttm_global.c
33932 @@ -36,7 +36,7 @@
33933 struct ttm_global_item {
33934 struct mutex mutex;
33935 void *object;
33936 - int refcount;
33937 + atomic_t refcount;
33938 };
33939
33940 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
33941 @@ -49,7 +49,7 @@ void ttm_global_init(void)
33942 struct ttm_global_item *item = &glob[i];
33943 mutex_init(&item->mutex);
33944 item->object = NULL;
33945 - item->refcount = 0;
33946 + atomic_set(&item->refcount, 0);
33947 }
33948 }
33949
33950 @@ -59,7 +59,7 @@ void ttm_global_release(void)
33951 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
33952 struct ttm_global_item *item = &glob[i];
33953 BUG_ON(item->object != NULL);
33954 - BUG_ON(item->refcount != 0);
33955 + BUG_ON(atomic_read(&item->refcount) != 0);
33956 }
33957 }
33958
33959 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33960 void *object;
33961
33962 mutex_lock(&item->mutex);
33963 - if (item->refcount == 0) {
33964 + if (atomic_read(&item->refcount) == 0) {
33965 item->object = kzalloc(ref->size, GFP_KERNEL);
33966 if (unlikely(item->object == NULL)) {
33967 ret = -ENOMEM;
33968 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33969 goto out_err;
33970
33971 }
33972 - ++item->refcount;
33973 + atomic_inc(&item->refcount);
33974 ref->object = item->object;
33975 object = item->object;
33976 mutex_unlock(&item->mutex);
33977 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
33978 struct ttm_global_item *item = &glob[ref->global_type];
33979
33980 mutex_lock(&item->mutex);
33981 - BUG_ON(item->refcount == 0);
33982 + BUG_ON(atomic_read(&item->refcount) == 0);
33983 BUG_ON(ref->object != item->object);
33984 - if (--item->refcount == 0) {
33985 + if (atomic_dec_and_test(&item->refcount)) {
33986 ref->release(ref);
33987 item->object = NULL;
33988 }
33989 diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
33990 index 072c281..d8ef483 100644
33991 --- a/drivers/gpu/drm/ttm/ttm_memory.c
33992 +++ b/drivers/gpu/drm/ttm/ttm_memory.c
33993 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
33994 NULL
33995 };
33996
33997 -static struct sysfs_ops ttm_mem_zone_ops = {
33998 +static const struct sysfs_ops ttm_mem_zone_ops = {
33999 .show = &ttm_mem_zone_show,
34000 .store = &ttm_mem_zone_store
34001 };
34002 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
34003 index cafcb84..b8e66cc 100644
34004 --- a/drivers/gpu/drm/via/via_drv.h
34005 +++ b/drivers/gpu/drm/via/via_drv.h
34006 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
34007 typedef uint32_t maskarray_t[5];
34008
34009 typedef struct drm_via_irq {
34010 - atomic_t irq_received;
34011 + atomic_unchecked_t irq_received;
34012 uint32_t pending_mask;
34013 uint32_t enable_mask;
34014 wait_queue_head_t irq_queue;
34015 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
34016 struct timeval last_vblank;
34017 int last_vblank_valid;
34018 unsigned usec_per_vblank;
34019 - atomic_t vbl_received;
34020 + atomic_unchecked_t vbl_received;
34021 drm_via_state_t hc_state;
34022 char pci_buf[VIA_PCI_BUF_SIZE];
34023 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
34024 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
34025 index 5935b88..127a8a6 100644
34026 --- a/drivers/gpu/drm/via/via_irq.c
34027 +++ b/drivers/gpu/drm/via/via_irq.c
34028 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
34029 if (crtc != 0)
34030 return 0;
34031
34032 - return atomic_read(&dev_priv->vbl_received);
34033 + return atomic_read_unchecked(&dev_priv->vbl_received);
34034 }
34035
34036 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34037 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34038
34039 status = VIA_READ(VIA_REG_INTERRUPT);
34040 if (status & VIA_IRQ_VBLANK_PENDING) {
34041 - atomic_inc(&dev_priv->vbl_received);
34042 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
34043 + atomic_inc_unchecked(&dev_priv->vbl_received);
34044 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
34045 do_gettimeofday(&cur_vblank);
34046 if (dev_priv->last_vblank_valid) {
34047 dev_priv->usec_per_vblank =
34048 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34049 dev_priv->last_vblank = cur_vblank;
34050 dev_priv->last_vblank_valid = 1;
34051 }
34052 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
34053 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
34054 DRM_DEBUG("US per vblank is: %u\n",
34055 dev_priv->usec_per_vblank);
34056 }
34057 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34058
34059 for (i = 0; i < dev_priv->num_irqs; ++i) {
34060 if (status & cur_irq->pending_mask) {
34061 - atomic_inc(&cur_irq->irq_received);
34062 + atomic_inc_unchecked(&cur_irq->irq_received);
34063 DRM_WAKEUP(&cur_irq->irq_queue);
34064 handled = 1;
34065 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
34066 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
34067 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
34068 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
34069 masks[irq][4]));
34070 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
34071 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
34072 } else {
34073 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
34074 (((cur_irq_sequence =
34075 - atomic_read(&cur_irq->irq_received)) -
34076 + atomic_read_unchecked(&cur_irq->irq_received)) -
34077 *sequence) <= (1 << 23)));
34078 }
34079 *sequence = cur_irq_sequence;
34080 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
34081 }
34082
34083 for (i = 0; i < dev_priv->num_irqs; ++i) {
34084 - atomic_set(&cur_irq->irq_received, 0);
34085 + atomic_set_unchecked(&cur_irq->irq_received, 0);
34086 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
34087 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
34088 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
34089 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
34090 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
34091 case VIA_IRQ_RELATIVE:
34092 irqwait->request.sequence +=
34093 - atomic_read(&cur_irq->irq_received);
34094 + atomic_read_unchecked(&cur_irq->irq_received);
34095 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
34096 case VIA_IRQ_ABSOLUTE:
34097 break;
34098 diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
34099 index aa8688d..6a0140c 100644
34100 --- a/drivers/gpu/vga/vgaarb.c
34101 +++ b/drivers/gpu/vga/vgaarb.c
34102 @@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
34103 uc = &priv->cards[i];
34104 }
34105
34106 - if (!uc)
34107 - return -EINVAL;
34108 + if (!uc) {
34109 + ret_val = -EINVAL;
34110 + goto done;
34111 + }
34112
34113 - if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
34114 - return -EINVAL;
34115 + if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
34116 + ret_val = -EINVAL;
34117 + goto done;
34118 + }
34119
34120 - if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
34121 - return -EINVAL;
34122 + if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
34123 + ret_val = -EINVAL;
34124 + goto done;
34125 + }
34126
34127 vga_put(pdev, io_state);
34128
34129 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
34130 index 11f8069..4783396 100644
34131 --- a/drivers/hid/hid-core.c
34132 +++ b/drivers/hid/hid-core.c
34133 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
34134
34135 int hid_add_device(struct hid_device *hdev)
34136 {
34137 - static atomic_t id = ATOMIC_INIT(0);
34138 + static atomic_unchecked_t id = ATOMIC_INIT(0);
34139 int ret;
34140
34141 if (WARN_ON(hdev->status & HID_STAT_ADDED))
34142 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
34143 /* XXX hack, any other cleaner solution after the driver core
34144 * is converted to allow more than 20 bytes as the device name? */
34145 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
34146 - hdev->vendor, hdev->product, atomic_inc_return(&id));
34147 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
34148
34149 ret = device_add(&hdev->dev);
34150 if (!ret)
34151 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
34152 index 8b6ee24..70f657d 100644
34153 --- a/drivers/hid/usbhid/hiddev.c
34154 +++ b/drivers/hid/usbhid/hiddev.c
34155 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
34156 return put_user(HID_VERSION, (int __user *)arg);
34157
34158 case HIDIOCAPPLICATION:
34159 - if (arg < 0 || arg >= hid->maxapplication)
34160 + if (arg >= hid->maxapplication)
34161 return -EINVAL;
34162
34163 for (i = 0; i < hid->maxcollection; i++)
34164 diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
34165 index 5d5ed69..f40533e 100644
34166 --- a/drivers/hwmon/lis3lv02d.c
34167 +++ b/drivers/hwmon/lis3lv02d.c
34168 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
34169 * the lid is closed. This leads to interrupts as soon as a little move
34170 * is done.
34171 */
34172 - atomic_inc(&lis3_dev.count);
34173 + atomic_inc_unchecked(&lis3_dev.count);
34174
34175 wake_up_interruptible(&lis3_dev.misc_wait);
34176 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
34177 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34178 if (test_and_set_bit(0, &lis3_dev.misc_opened))
34179 return -EBUSY; /* already open */
34180
34181 - atomic_set(&lis3_dev.count, 0);
34182 + atomic_set_unchecked(&lis3_dev.count, 0);
34183
34184 /*
34185 * The sensor can generate interrupts for free-fall and direction
34186 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34187 add_wait_queue(&lis3_dev.misc_wait, &wait);
34188 while (true) {
34189 set_current_state(TASK_INTERRUPTIBLE);
34190 - data = atomic_xchg(&lis3_dev.count, 0);
34191 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
34192 if (data)
34193 break;
34194
34195 @@ -244,7 +244,7 @@ out:
34196 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34197 {
34198 poll_wait(file, &lis3_dev.misc_wait, wait);
34199 - if (atomic_read(&lis3_dev.count))
34200 + if (atomic_read_unchecked(&lis3_dev.count))
34201 return POLLIN | POLLRDNORM;
34202 return 0;
34203 }
34204 diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
34205 index 7cdd76f..fe0efdf 100644
34206 --- a/drivers/hwmon/lis3lv02d.h
34207 +++ b/drivers/hwmon/lis3lv02d.h
34208 @@ -201,7 +201,7 @@ struct lis3lv02d {
34209
34210 struct input_polled_dev *idev; /* input device */
34211 struct platform_device *pdev; /* platform device */
34212 - atomic_t count; /* interrupt count after last read */
34213 + atomic_unchecked_t count; /* interrupt count after last read */
34214 int xcalib; /* calibrated null value for x */
34215 int ycalib; /* calibrated null value for y */
34216 int zcalib; /* calibrated null value for z */
34217 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
34218 index 740785e..5a5c6c6 100644
34219 --- a/drivers/hwmon/sht15.c
34220 +++ b/drivers/hwmon/sht15.c
34221 @@ -112,7 +112,7 @@ struct sht15_data {
34222 int supply_uV;
34223 int supply_uV_valid;
34224 struct work_struct update_supply_work;
34225 - atomic_t interrupt_handled;
34226 + atomic_unchecked_t interrupt_handled;
34227 };
34228
34229 /**
34230 @@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
34231 return ret;
34232
34233 gpio_direction_input(data->pdata->gpio_data);
34234 - atomic_set(&data->interrupt_handled, 0);
34235 + atomic_set_unchecked(&data->interrupt_handled, 0);
34236
34237 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34238 if (gpio_get_value(data->pdata->gpio_data) == 0) {
34239 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
34240 /* Only relevant if the interrupt hasn't occured. */
34241 - if (!atomic_read(&data->interrupt_handled))
34242 + if (!atomic_read_unchecked(&data->interrupt_handled))
34243 schedule_work(&data->read_work);
34244 }
34245 ret = wait_event_timeout(data->wait_queue,
34246 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
34247 struct sht15_data *data = d;
34248 /* First disable the interrupt */
34249 disable_irq_nosync(irq);
34250 - atomic_inc(&data->interrupt_handled);
34251 + atomic_inc_unchecked(&data->interrupt_handled);
34252 /* Then schedule a reading work struct */
34253 if (data->flag != SHT15_READING_NOTHING)
34254 schedule_work(&data->read_work);
34255 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
34256 here as could have gone low in meantime so verify
34257 it hasn't!
34258 */
34259 - atomic_set(&data->interrupt_handled, 0);
34260 + atomic_set_unchecked(&data->interrupt_handled, 0);
34261 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34262 /* If still not occured or another handler has been scheduled */
34263 if (gpio_get_value(data->pdata->gpio_data)
34264 - || atomic_read(&data->interrupt_handled))
34265 + || atomic_read_unchecked(&data->interrupt_handled))
34266 return;
34267 }
34268 /* Read the data back from the device */
34269 diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
34270 index 97851c5..cb40626 100644
34271 --- a/drivers/hwmon/w83791d.c
34272 +++ b/drivers/hwmon/w83791d.c
34273 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
34274 struct i2c_board_info *info);
34275 static int w83791d_remove(struct i2c_client *client);
34276
34277 -static int w83791d_read(struct i2c_client *client, u8 register);
34278 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
34279 +static int w83791d_read(struct i2c_client *client, u8 reg);
34280 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
34281 static struct w83791d_data *w83791d_update_device(struct device *dev);
34282
34283 #ifdef DEBUG
34284 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
34285 index 378fcb5..5e91fa8 100644
34286 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
34287 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
34288 @@ -43,7 +43,7 @@
34289 extern struct i2c_adapter amd756_smbus;
34290
34291 static struct i2c_adapter *s4882_adapter;
34292 -static struct i2c_algorithm *s4882_algo;
34293 +static i2c_algorithm_no_const *s4882_algo;
34294
34295 /* Wrapper access functions for multiplexed SMBus */
34296 static DEFINE_MUTEX(amd756_lock);
34297 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
34298 index 29015eb..af2d8e9 100644
34299 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
34300 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
34301 @@ -41,7 +41,7 @@
34302 extern struct i2c_adapter *nforce2_smbus;
34303
34304 static struct i2c_adapter *s4985_adapter;
34305 -static struct i2c_algorithm *s4985_algo;
34306 +static i2c_algorithm_no_const *s4985_algo;
34307
34308 /* Wrapper access functions for multiplexed SMBus */
34309 static DEFINE_MUTEX(nforce2_lock);
34310 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
34311 index 878f8ec..12376fc 100644
34312 --- a/drivers/ide/aec62xx.c
34313 +++ b/drivers/ide/aec62xx.c
34314 @@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
34315 .cable_detect = atp86x_cable_detect,
34316 };
34317
34318 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
34319 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
34320 { /* 0: AEC6210 */
34321 .name = DRV_NAME,
34322 .init_chipset = init_chipset_aec62xx,
34323 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
34324 index e59b6de..4b4fc65 100644
34325 --- a/drivers/ide/alim15x3.c
34326 +++ b/drivers/ide/alim15x3.c
34327 @@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
34328 .dma_sff_read_status = ide_dma_sff_read_status,
34329 };
34330
34331 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
34332 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
34333 .name = DRV_NAME,
34334 .init_chipset = init_chipset_ali15x3,
34335 .init_hwif = init_hwif_ali15x3,
34336 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
34337 index 628cd2e..087a414 100644
34338 --- a/drivers/ide/amd74xx.c
34339 +++ b/drivers/ide/amd74xx.c
34340 @@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
34341 .udma_mask = udma, \
34342 }
34343
34344 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
34345 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
34346 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
34347 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
34348 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
34349 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
34350 index 837322b..837fd71 100644
34351 --- a/drivers/ide/atiixp.c
34352 +++ b/drivers/ide/atiixp.c
34353 @@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
34354 .cable_detect = atiixp_cable_detect,
34355 };
34356
34357 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
34358 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
34359 { /* 0: IXP200/300/400/700 */
34360 .name = DRV_NAME,
34361 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
34362 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
34363 index ca0c46f..d55318a 100644
34364 --- a/drivers/ide/cmd64x.c
34365 +++ b/drivers/ide/cmd64x.c
34366 @@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
34367 .dma_sff_read_status = ide_dma_sff_read_status,
34368 };
34369
34370 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
34371 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
34372 { /* 0: CMD643 */
34373 .name = DRV_NAME,
34374 .init_chipset = init_chipset_cmd64x,
34375 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
34376 index 09f98ed..cebc5bc 100644
34377 --- a/drivers/ide/cs5520.c
34378 +++ b/drivers/ide/cs5520.c
34379 @@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
34380 .set_dma_mode = cs5520_set_dma_mode,
34381 };
34382
34383 -static const struct ide_port_info cyrix_chipset __devinitdata = {
34384 +static const struct ide_port_info cyrix_chipset __devinitconst = {
34385 .name = DRV_NAME,
34386 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
34387 .port_ops = &cs5520_port_ops,
34388 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
34389 index 40bf05e..7d58ca0 100644
34390 --- a/drivers/ide/cs5530.c
34391 +++ b/drivers/ide/cs5530.c
34392 @@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
34393 .udma_filter = cs5530_udma_filter,
34394 };
34395
34396 -static const struct ide_port_info cs5530_chipset __devinitdata = {
34397 +static const struct ide_port_info cs5530_chipset __devinitconst = {
34398 .name = DRV_NAME,
34399 .init_chipset = init_chipset_cs5530,
34400 .init_hwif = init_hwif_cs5530,
34401 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
34402 index 983d957..53e6172 100644
34403 --- a/drivers/ide/cs5535.c
34404 +++ b/drivers/ide/cs5535.c
34405 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
34406 .cable_detect = cs5535_cable_detect,
34407 };
34408
34409 -static const struct ide_port_info cs5535_chipset __devinitdata = {
34410 +static const struct ide_port_info cs5535_chipset __devinitconst = {
34411 .name = DRV_NAME,
34412 .port_ops = &cs5535_port_ops,
34413 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
34414 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
34415 index 74fc540..8e933d8 100644
34416 --- a/drivers/ide/cy82c693.c
34417 +++ b/drivers/ide/cy82c693.c
34418 @@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
34419 .set_dma_mode = cy82c693_set_dma_mode,
34420 };
34421
34422 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
34423 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
34424 .name = DRV_NAME,
34425 .init_iops = init_iops_cy82c693,
34426 .port_ops = &cy82c693_port_ops,
34427 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
34428 index 7ce68ef..e78197d 100644
34429 --- a/drivers/ide/hpt366.c
34430 +++ b/drivers/ide/hpt366.c
34431 @@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
34432 }
34433 };
34434
34435 -static const struct hpt_info hpt36x __devinitdata = {
34436 +static const struct hpt_info hpt36x __devinitconst = {
34437 .chip_name = "HPT36x",
34438 .chip_type = HPT36x,
34439 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
34440 @@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
34441 .timings = &hpt36x_timings
34442 };
34443
34444 -static const struct hpt_info hpt370 __devinitdata = {
34445 +static const struct hpt_info hpt370 __devinitconst = {
34446 .chip_name = "HPT370",
34447 .chip_type = HPT370,
34448 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34449 @@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
34450 .timings = &hpt37x_timings
34451 };
34452
34453 -static const struct hpt_info hpt370a __devinitdata = {
34454 +static const struct hpt_info hpt370a __devinitconst = {
34455 .chip_name = "HPT370A",
34456 .chip_type = HPT370A,
34457 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34458 @@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
34459 .timings = &hpt37x_timings
34460 };
34461
34462 -static const struct hpt_info hpt374 __devinitdata = {
34463 +static const struct hpt_info hpt374 __devinitconst = {
34464 .chip_name = "HPT374",
34465 .chip_type = HPT374,
34466 .udma_mask = ATA_UDMA5,
34467 @@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
34468 .timings = &hpt37x_timings
34469 };
34470
34471 -static const struct hpt_info hpt372 __devinitdata = {
34472 +static const struct hpt_info hpt372 __devinitconst = {
34473 .chip_name = "HPT372",
34474 .chip_type = HPT372,
34475 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34476 @@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
34477 .timings = &hpt37x_timings
34478 };
34479
34480 -static const struct hpt_info hpt372a __devinitdata = {
34481 +static const struct hpt_info hpt372a __devinitconst = {
34482 .chip_name = "HPT372A",
34483 .chip_type = HPT372A,
34484 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34485 @@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
34486 .timings = &hpt37x_timings
34487 };
34488
34489 -static const struct hpt_info hpt302 __devinitdata = {
34490 +static const struct hpt_info hpt302 __devinitconst = {
34491 .chip_name = "HPT302",
34492 .chip_type = HPT302,
34493 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34494 @@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
34495 .timings = &hpt37x_timings
34496 };
34497
34498 -static const struct hpt_info hpt371 __devinitdata = {
34499 +static const struct hpt_info hpt371 __devinitconst = {
34500 .chip_name = "HPT371",
34501 .chip_type = HPT371,
34502 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34503 @@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
34504 .timings = &hpt37x_timings
34505 };
34506
34507 -static const struct hpt_info hpt372n __devinitdata = {
34508 +static const struct hpt_info hpt372n __devinitconst = {
34509 .chip_name = "HPT372N",
34510 .chip_type = HPT372N,
34511 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34512 @@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
34513 .timings = &hpt37x_timings
34514 };
34515
34516 -static const struct hpt_info hpt302n __devinitdata = {
34517 +static const struct hpt_info hpt302n __devinitconst = {
34518 .chip_name = "HPT302N",
34519 .chip_type = HPT302N,
34520 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34521 @@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
34522 .timings = &hpt37x_timings
34523 };
34524
34525 -static const struct hpt_info hpt371n __devinitdata = {
34526 +static const struct hpt_info hpt371n __devinitconst = {
34527 .chip_name = "HPT371N",
34528 .chip_type = HPT371N,
34529 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34530 @@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
34531 .dma_sff_read_status = ide_dma_sff_read_status,
34532 };
34533
34534 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
34535 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
34536 { /* 0: HPT36x */
34537 .name = DRV_NAME,
34538 .init_chipset = init_chipset_hpt366,
34539 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
34540 index 2de76cc..74186a1 100644
34541 --- a/drivers/ide/ide-cd.c
34542 +++ b/drivers/ide/ide-cd.c
34543 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
34544 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
34545 if ((unsigned long)buf & alignment
34546 || blk_rq_bytes(rq) & q->dma_pad_mask
34547 - || object_is_on_stack(buf))
34548 + || object_starts_on_stack(buf))
34549 drive->dma = 0;
34550 }
34551 }
34552 diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
34553 index fefbdfc..62ff465 100644
34554 --- a/drivers/ide/ide-floppy.c
34555 +++ b/drivers/ide/ide-floppy.c
34556 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
34557 u8 pc_buf[256], header_len, desc_cnt;
34558 int i, rc = 1, blocks, length;
34559
34560 + pax_track_stack();
34561 +
34562 ide_debug_log(IDE_DBG_FUNC, "enter");
34563
34564 drive->bios_cyl = 0;
34565 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
34566 index 39d4e01..11538ce 100644
34567 --- a/drivers/ide/ide-pci-generic.c
34568 +++ b/drivers/ide/ide-pci-generic.c
34569 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
34570 .udma_mask = ATA_UDMA6, \
34571 }
34572
34573 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
34574 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
34575 /* 0: Unknown */
34576 DECLARE_GENERIC_PCI_DEV(0),
34577
34578 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
34579 index 0d266a5..aaca790 100644
34580 --- a/drivers/ide/it8172.c
34581 +++ b/drivers/ide/it8172.c
34582 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
34583 .set_dma_mode = it8172_set_dma_mode,
34584 };
34585
34586 -static const struct ide_port_info it8172_port_info __devinitdata = {
34587 +static const struct ide_port_info it8172_port_info __devinitconst = {
34588 .name = DRV_NAME,
34589 .port_ops = &it8172_port_ops,
34590 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
34591 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
34592 index 4797616..4be488a 100644
34593 --- a/drivers/ide/it8213.c
34594 +++ b/drivers/ide/it8213.c
34595 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
34596 .cable_detect = it8213_cable_detect,
34597 };
34598
34599 -static const struct ide_port_info it8213_chipset __devinitdata = {
34600 +static const struct ide_port_info it8213_chipset __devinitconst = {
34601 .name = DRV_NAME,
34602 .enablebits = { {0x41, 0x80, 0x80} },
34603 .port_ops = &it8213_port_ops,
34604 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
34605 index 51aa745..146ee60 100644
34606 --- a/drivers/ide/it821x.c
34607 +++ b/drivers/ide/it821x.c
34608 @@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
34609 .cable_detect = it821x_cable_detect,
34610 };
34611
34612 -static const struct ide_port_info it821x_chipset __devinitdata = {
34613 +static const struct ide_port_info it821x_chipset __devinitconst = {
34614 .name = DRV_NAME,
34615 .init_chipset = init_chipset_it821x,
34616 .init_hwif = init_hwif_it821x,
34617 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
34618 index bf2be64..9270098 100644
34619 --- a/drivers/ide/jmicron.c
34620 +++ b/drivers/ide/jmicron.c
34621 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
34622 .cable_detect = jmicron_cable_detect,
34623 };
34624
34625 -static const struct ide_port_info jmicron_chipset __devinitdata = {
34626 +static const struct ide_port_info jmicron_chipset __devinitconst = {
34627 .name = DRV_NAME,
34628 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
34629 .port_ops = &jmicron_port_ops,
34630 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
34631 index 95327a2..73f78d8 100644
34632 --- a/drivers/ide/ns87415.c
34633 +++ b/drivers/ide/ns87415.c
34634 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
34635 .dma_sff_read_status = superio_dma_sff_read_status,
34636 };
34637
34638 -static const struct ide_port_info ns87415_chipset __devinitdata = {
34639 +static const struct ide_port_info ns87415_chipset __devinitconst = {
34640 .name = DRV_NAME,
34641 .init_hwif = init_hwif_ns87415,
34642 .tp_ops = &ns87415_tp_ops,
34643 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
34644 index f1d70d6..e1de05b 100644
34645 --- a/drivers/ide/opti621.c
34646 +++ b/drivers/ide/opti621.c
34647 @@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
34648 .set_pio_mode = opti621_set_pio_mode,
34649 };
34650
34651 -static const struct ide_port_info opti621_chipset __devinitdata = {
34652 +static const struct ide_port_info opti621_chipset __devinitconst = {
34653 .name = DRV_NAME,
34654 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
34655 .port_ops = &opti621_port_ops,
34656 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
34657 index 65ba823..7311f4d 100644
34658 --- a/drivers/ide/pdc202xx_new.c
34659 +++ b/drivers/ide/pdc202xx_new.c
34660 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
34661 .udma_mask = udma, \
34662 }
34663
34664 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
34665 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
34666 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
34667 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
34668 };
34669 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
34670 index cb812f3..af816ef 100644
34671 --- a/drivers/ide/pdc202xx_old.c
34672 +++ b/drivers/ide/pdc202xx_old.c
34673 @@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
34674 .max_sectors = sectors, \
34675 }
34676
34677 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
34678 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
34679 { /* 0: PDC20246 */
34680 .name = DRV_NAME,
34681 .init_chipset = init_chipset_pdc202xx,
34682 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
34683 index bf14f39..15c4b98 100644
34684 --- a/drivers/ide/piix.c
34685 +++ b/drivers/ide/piix.c
34686 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
34687 .udma_mask = udma, \
34688 }
34689
34690 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
34691 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
34692 /* 0: MPIIX */
34693 { /*
34694 * MPIIX actually has only a single IDE channel mapped to
34695 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
34696 index a6414a8..c04173e 100644
34697 --- a/drivers/ide/rz1000.c
34698 +++ b/drivers/ide/rz1000.c
34699 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
34700 }
34701 }
34702
34703 -static const struct ide_port_info rz1000_chipset __devinitdata = {
34704 +static const struct ide_port_info rz1000_chipset __devinitconst = {
34705 .name = DRV_NAME,
34706 .host_flags = IDE_HFLAG_NO_DMA,
34707 };
34708 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
34709 index d467478..9203942 100644
34710 --- a/drivers/ide/sc1200.c
34711 +++ b/drivers/ide/sc1200.c
34712 @@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
34713 .dma_sff_read_status = ide_dma_sff_read_status,
34714 };
34715
34716 -static const struct ide_port_info sc1200_chipset __devinitdata = {
34717 +static const struct ide_port_info sc1200_chipset __devinitconst = {
34718 .name = DRV_NAME,
34719 .port_ops = &sc1200_port_ops,
34720 .dma_ops = &sc1200_dma_ops,
34721 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
34722 index 1104bb3..59c5194 100644
34723 --- a/drivers/ide/scc_pata.c
34724 +++ b/drivers/ide/scc_pata.c
34725 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
34726 .dma_sff_read_status = scc_dma_sff_read_status,
34727 };
34728
34729 -static const struct ide_port_info scc_chipset __devinitdata = {
34730 +static const struct ide_port_info scc_chipset __devinitconst = {
34731 .name = "sccIDE",
34732 .init_iops = init_iops_scc,
34733 .init_dma = scc_init_dma,
34734 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
34735 index b6554ef..6cc2cc3 100644
34736 --- a/drivers/ide/serverworks.c
34737 +++ b/drivers/ide/serverworks.c
34738 @@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
34739 .cable_detect = svwks_cable_detect,
34740 };
34741
34742 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
34743 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
34744 { /* 0: OSB4 */
34745 .name = DRV_NAME,
34746 .init_chipset = init_chipset_svwks,
34747 diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
34748 index ab3db61..afed580 100644
34749 --- a/drivers/ide/setup-pci.c
34750 +++ b/drivers/ide/setup-pci.c
34751 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
34752 int ret, i, n_ports = dev2 ? 4 : 2;
34753 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
34754
34755 + pax_track_stack();
34756 +
34757 for (i = 0; i < n_ports / 2; i++) {
34758 ret = ide_setup_pci_controller(pdev[i], d, !i);
34759 if (ret < 0)
34760 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
34761 index d95df52..0b03a39 100644
34762 --- a/drivers/ide/siimage.c
34763 +++ b/drivers/ide/siimage.c
34764 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
34765 .udma_mask = ATA_UDMA6, \
34766 }
34767
34768 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
34769 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
34770 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
34771 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
34772 };
34773 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
34774 index 3b88eba..ca8699d 100644
34775 --- a/drivers/ide/sis5513.c
34776 +++ b/drivers/ide/sis5513.c
34777 @@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
34778 .cable_detect = sis_cable_detect,
34779 };
34780
34781 -static const struct ide_port_info sis5513_chipset __devinitdata = {
34782 +static const struct ide_port_info sis5513_chipset __devinitconst = {
34783 .name = DRV_NAME,
34784 .init_chipset = init_chipset_sis5513,
34785 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
34786 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
34787 index d698da4..fca42a4 100644
34788 --- a/drivers/ide/sl82c105.c
34789 +++ b/drivers/ide/sl82c105.c
34790 @@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
34791 .dma_sff_read_status = ide_dma_sff_read_status,
34792 };
34793
34794 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
34795 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
34796 .name = DRV_NAME,
34797 .init_chipset = init_chipset_sl82c105,
34798 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
34799 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
34800 index 1ccfb40..83d5779 100644
34801 --- a/drivers/ide/slc90e66.c
34802 +++ b/drivers/ide/slc90e66.c
34803 @@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
34804 .cable_detect = slc90e66_cable_detect,
34805 };
34806
34807 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
34808 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
34809 .name = DRV_NAME,
34810 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
34811 .port_ops = &slc90e66_port_ops,
34812 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
34813 index 05a93d6..5f9e325 100644
34814 --- a/drivers/ide/tc86c001.c
34815 +++ b/drivers/ide/tc86c001.c
34816 @@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
34817 .dma_sff_read_status = ide_dma_sff_read_status,
34818 };
34819
34820 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
34821 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
34822 .name = DRV_NAME,
34823 .init_hwif = init_hwif_tc86c001,
34824 .port_ops = &tc86c001_port_ops,
34825 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
34826 index 8773c3b..7907d6c 100644
34827 --- a/drivers/ide/triflex.c
34828 +++ b/drivers/ide/triflex.c
34829 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
34830 .set_dma_mode = triflex_set_mode,
34831 };
34832
34833 -static const struct ide_port_info triflex_device __devinitdata = {
34834 +static const struct ide_port_info triflex_device __devinitconst = {
34835 .name = DRV_NAME,
34836 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
34837 .port_ops = &triflex_port_ops,
34838 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
34839 index 4b42ca0..e494a98 100644
34840 --- a/drivers/ide/trm290.c
34841 +++ b/drivers/ide/trm290.c
34842 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
34843 .dma_check = trm290_dma_check,
34844 };
34845
34846 -static const struct ide_port_info trm290_chipset __devinitdata = {
34847 +static const struct ide_port_info trm290_chipset __devinitconst = {
34848 .name = DRV_NAME,
34849 .init_hwif = init_hwif_trm290,
34850 .tp_ops = &trm290_tp_ops,
34851 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
34852 index 028de26..520d5d5 100644
34853 --- a/drivers/ide/via82cxxx.c
34854 +++ b/drivers/ide/via82cxxx.c
34855 @@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
34856 .cable_detect = via82cxxx_cable_detect,
34857 };
34858
34859 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
34860 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
34861 .name = DRV_NAME,
34862 .init_chipset = init_chipset_via82cxxx,
34863 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
34864 diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
34865 index 2cd00b5..14de699 100644
34866 --- a/drivers/ieee1394/dv1394.c
34867 +++ b/drivers/ieee1394/dv1394.c
34868 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
34869 based upon DIF section and sequence
34870 */
34871
34872 -static void inline
34873 +static inline void
34874 frame_put_packet (struct frame *f, struct packet *p)
34875 {
34876 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
34877 diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
34878 index e947d8f..6a966b9 100644
34879 --- a/drivers/ieee1394/hosts.c
34880 +++ b/drivers/ieee1394/hosts.c
34881 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
34882 }
34883
34884 static struct hpsb_host_driver dummy_driver = {
34885 + .name = "dummy",
34886 .transmit_packet = dummy_transmit_packet,
34887 .devctl = dummy_devctl,
34888 .isoctl = dummy_isoctl
34889 diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
34890 index ddaab6e..8d37435 100644
34891 --- a/drivers/ieee1394/init_ohci1394_dma.c
34892 +++ b/drivers/ieee1394/init_ohci1394_dma.c
34893 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
34894 for (func = 0; func < 8; func++) {
34895 u32 class = read_pci_config(num,slot,func,
34896 PCI_CLASS_REVISION);
34897 - if ((class == 0xffffffff))
34898 + if (class == 0xffffffff)
34899 continue; /* No device at this func */
34900
34901 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
34902 diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
34903 index 65c1429..5d8c11f 100644
34904 --- a/drivers/ieee1394/ohci1394.c
34905 +++ b/drivers/ieee1394/ohci1394.c
34906 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
34907 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
34908
34909 /* Module Parameters */
34910 -static int phys_dma = 1;
34911 +static int phys_dma;
34912 module_param(phys_dma, int, 0444);
34913 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
34914 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
34915
34916 static void dma_trm_tasklet(unsigned long data);
34917 static void dma_trm_reset(struct dma_trm_ctx *d);
34918 diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
34919 index f199896..78c9fc8 100644
34920 --- a/drivers/ieee1394/sbp2.c
34921 +++ b/drivers/ieee1394/sbp2.c
34922 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
34923 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
34924 MODULE_LICENSE("GPL");
34925
34926 -static int sbp2_module_init(void)
34927 +static int __init sbp2_module_init(void)
34928 {
34929 int ret;
34930
34931 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
34932 index a5dea6b..0cefe8f 100644
34933 --- a/drivers/infiniband/core/cm.c
34934 +++ b/drivers/infiniband/core/cm.c
34935 @@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
34936
34937 struct cm_counter_group {
34938 struct kobject obj;
34939 - atomic_long_t counter[CM_ATTR_COUNT];
34940 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
34941 };
34942
34943 struct cm_counter_attribute {
34944 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
34945 struct ib_mad_send_buf *msg = NULL;
34946 int ret;
34947
34948 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34949 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34950 counter[CM_REQ_COUNTER]);
34951
34952 /* Quick state check to discard duplicate REQs. */
34953 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
34954 if (!cm_id_priv)
34955 return;
34956
34957 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34958 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34959 counter[CM_REP_COUNTER]);
34960 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
34961 if (ret)
34962 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
34963 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
34964 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
34965 spin_unlock_irq(&cm_id_priv->lock);
34966 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34967 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34968 counter[CM_RTU_COUNTER]);
34969 goto out;
34970 }
34971 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
34972 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
34973 dreq_msg->local_comm_id);
34974 if (!cm_id_priv) {
34975 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34976 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34977 counter[CM_DREQ_COUNTER]);
34978 cm_issue_drep(work->port, work->mad_recv_wc);
34979 return -EINVAL;
34980 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
34981 case IB_CM_MRA_REP_RCVD:
34982 break;
34983 case IB_CM_TIMEWAIT:
34984 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34985 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34986 counter[CM_DREQ_COUNTER]);
34987 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34988 goto unlock;
34989 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
34990 cm_free_msg(msg);
34991 goto deref;
34992 case IB_CM_DREQ_RCVD:
34993 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34994 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34995 counter[CM_DREQ_COUNTER]);
34996 goto unlock;
34997 default:
34998 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
34999 ib_modify_mad(cm_id_priv->av.port->mad_agent,
35000 cm_id_priv->msg, timeout)) {
35001 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
35002 - atomic_long_inc(&work->port->
35003 + atomic_long_inc_unchecked(&work->port->
35004 counter_group[CM_RECV_DUPLICATES].
35005 counter[CM_MRA_COUNTER]);
35006 goto out;
35007 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
35008 break;
35009 case IB_CM_MRA_REQ_RCVD:
35010 case IB_CM_MRA_REP_RCVD:
35011 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35012 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35013 counter[CM_MRA_COUNTER]);
35014 /* fall through */
35015 default:
35016 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
35017 case IB_CM_LAP_IDLE:
35018 break;
35019 case IB_CM_MRA_LAP_SENT:
35020 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35021 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35022 counter[CM_LAP_COUNTER]);
35023 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
35024 goto unlock;
35025 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
35026 cm_free_msg(msg);
35027 goto deref;
35028 case IB_CM_LAP_RCVD:
35029 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35030 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35031 counter[CM_LAP_COUNTER]);
35032 goto unlock;
35033 default:
35034 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
35035 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
35036 if (cur_cm_id_priv) {
35037 spin_unlock_irq(&cm.lock);
35038 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35039 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35040 counter[CM_SIDR_REQ_COUNTER]);
35041 goto out; /* Duplicate message. */
35042 }
35043 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
35044 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
35045 msg->retries = 1;
35046
35047 - atomic_long_add(1 + msg->retries,
35048 + atomic_long_add_unchecked(1 + msg->retries,
35049 &port->counter_group[CM_XMIT].counter[attr_index]);
35050 if (msg->retries)
35051 - atomic_long_add(msg->retries,
35052 + atomic_long_add_unchecked(msg->retries,
35053 &port->counter_group[CM_XMIT_RETRIES].
35054 counter[attr_index]);
35055
35056 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
35057 }
35058
35059 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
35060 - atomic_long_inc(&port->counter_group[CM_RECV].
35061 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
35062 counter[attr_id - CM_ATTR_ID_OFFSET]);
35063
35064 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
35065 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
35066 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
35067
35068 return sprintf(buf, "%ld\n",
35069 - atomic_long_read(&group->counter[cm_attr->index]));
35070 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
35071 }
35072
35073 -static struct sysfs_ops cm_counter_ops = {
35074 +static const struct sysfs_ops cm_counter_ops = {
35075 .show = cm_show_counter
35076 };
35077
35078 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
35079 index 8fd3a6f..61d8075 100644
35080 --- a/drivers/infiniband/core/cma.c
35081 +++ b/drivers/infiniband/core/cma.c
35082 @@ -2267,6 +2267,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
35083
35084 req.private_data_len = sizeof(struct cma_hdr) +
35085 conn_param->private_data_len;
35086 + if (req.private_data_len < conn_param->private_data_len)
35087 + return -EINVAL;
35088 +
35089 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
35090 if (!req.private_data)
35091 return -ENOMEM;
35092 @@ -2314,6 +2317,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
35093 memset(&req, 0, sizeof req);
35094 offset = cma_user_data_offset(id_priv->id.ps);
35095 req.private_data_len = offset + conn_param->private_data_len;
35096 + if (req.private_data_len < conn_param->private_data_len)
35097 + return -EINVAL;
35098 +
35099 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
35100 if (!private_data)
35101 return -ENOMEM;
35102 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
35103 index 4507043..14ad522 100644
35104 --- a/drivers/infiniband/core/fmr_pool.c
35105 +++ b/drivers/infiniband/core/fmr_pool.c
35106 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
35107
35108 struct task_struct *thread;
35109
35110 - atomic_t req_ser;
35111 - atomic_t flush_ser;
35112 + atomic_unchecked_t req_ser;
35113 + atomic_unchecked_t flush_ser;
35114
35115 wait_queue_head_t force_wait;
35116 };
35117 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
35118 struct ib_fmr_pool *pool = pool_ptr;
35119
35120 do {
35121 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
35122 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
35123 ib_fmr_batch_release(pool);
35124
35125 - atomic_inc(&pool->flush_ser);
35126 + atomic_inc_unchecked(&pool->flush_ser);
35127 wake_up_interruptible(&pool->force_wait);
35128
35129 if (pool->flush_function)
35130 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
35131 }
35132
35133 set_current_state(TASK_INTERRUPTIBLE);
35134 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
35135 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
35136 !kthread_should_stop())
35137 schedule();
35138 __set_current_state(TASK_RUNNING);
35139 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
35140 pool->dirty_watermark = params->dirty_watermark;
35141 pool->dirty_len = 0;
35142 spin_lock_init(&pool->pool_lock);
35143 - atomic_set(&pool->req_ser, 0);
35144 - atomic_set(&pool->flush_ser, 0);
35145 + atomic_set_unchecked(&pool->req_ser, 0);
35146 + atomic_set_unchecked(&pool->flush_ser, 0);
35147 init_waitqueue_head(&pool->force_wait);
35148
35149 pool->thread = kthread_run(ib_fmr_cleanup_thread,
35150 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
35151 }
35152 spin_unlock_irq(&pool->pool_lock);
35153
35154 - serial = atomic_inc_return(&pool->req_ser);
35155 + serial = atomic_inc_return_unchecked(&pool->req_ser);
35156 wake_up_process(pool->thread);
35157
35158 if (wait_event_interruptible(pool->force_wait,
35159 - atomic_read(&pool->flush_ser) - serial >= 0))
35160 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
35161 return -EINTR;
35162
35163 return 0;
35164 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
35165 } else {
35166 list_add_tail(&fmr->list, &pool->dirty_list);
35167 if (++pool->dirty_len >= pool->dirty_watermark) {
35168 - atomic_inc(&pool->req_ser);
35169 + atomic_inc_unchecked(&pool->req_ser);
35170 wake_up_process(pool->thread);
35171 }
35172 }
35173 diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
35174 index 158a214..1558bb7 100644
35175 --- a/drivers/infiniband/core/sysfs.c
35176 +++ b/drivers/infiniband/core/sysfs.c
35177 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
35178 return port_attr->show(p, port_attr, buf);
35179 }
35180
35181 -static struct sysfs_ops port_sysfs_ops = {
35182 +static const struct sysfs_ops port_sysfs_ops = {
35183 .show = port_attr_show
35184 };
35185
35186 diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
35187 index 5440da0..1194ecb 100644
35188 --- a/drivers/infiniband/core/uverbs_marshall.c
35189 +++ b/drivers/infiniband/core/uverbs_marshall.c
35190 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
35191 dst->grh.sgid_index = src->grh.sgid_index;
35192 dst->grh.hop_limit = src->grh.hop_limit;
35193 dst->grh.traffic_class = src->grh.traffic_class;
35194 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
35195 dst->dlid = src->dlid;
35196 dst->sl = src->sl;
35197 dst->src_path_bits = src->src_path_bits;
35198 dst->static_rate = src->static_rate;
35199 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
35200 dst->port_num = src->port_num;
35201 + dst->reserved = 0;
35202 }
35203 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
35204
35205 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
35206 struct ib_qp_attr *src)
35207 {
35208 + dst->qp_state = src->qp_state;
35209 dst->cur_qp_state = src->cur_qp_state;
35210 dst->path_mtu = src->path_mtu;
35211 dst->path_mig_state = src->path_mig_state;
35212 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
35213 dst->rnr_retry = src->rnr_retry;
35214 dst->alt_port_num = src->alt_port_num;
35215 dst->alt_timeout = src->alt_timeout;
35216 + memset(dst->reserved, 0, sizeof(dst->reserved));
35217 }
35218 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
35219
35220 diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
35221 index 100da85..62e6b88 100644
35222 --- a/drivers/infiniband/hw/ipath/ipath_fs.c
35223 +++ b/drivers/infiniband/hw/ipath/ipath_fs.c
35224 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
35225 struct infinipath_counters counters;
35226 struct ipath_devdata *dd;
35227
35228 + pax_track_stack();
35229 +
35230 dd = file->f_path.dentry->d_inode->i_private;
35231 dd->ipath_f_read_counters(dd, &counters);
35232
35233 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
35234 index cbde0cf..afaf55c 100644
35235 --- a/drivers/infiniband/hw/nes/nes.c
35236 +++ b/drivers/infiniband/hw/nes/nes.c
35237 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
35238 LIST_HEAD(nes_adapter_list);
35239 static LIST_HEAD(nes_dev_list);
35240
35241 -atomic_t qps_destroyed;
35242 +atomic_unchecked_t qps_destroyed;
35243
35244 static unsigned int ee_flsh_adapter;
35245 static unsigned int sysfs_nonidx_addr;
35246 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
35247 struct nes_adapter *nesadapter = nesdev->nesadapter;
35248 u32 qp_id;
35249
35250 - atomic_inc(&qps_destroyed);
35251 + atomic_inc_unchecked(&qps_destroyed);
35252
35253 /* Free the control structures */
35254
35255 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
35256 index bcc6abc..9c76b2f 100644
35257 --- a/drivers/infiniband/hw/nes/nes.h
35258 +++ b/drivers/infiniband/hw/nes/nes.h
35259 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
35260 extern unsigned int wqm_quanta;
35261 extern struct list_head nes_adapter_list;
35262
35263 -extern atomic_t cm_connects;
35264 -extern atomic_t cm_accepts;
35265 -extern atomic_t cm_disconnects;
35266 -extern atomic_t cm_closes;
35267 -extern atomic_t cm_connecteds;
35268 -extern atomic_t cm_connect_reqs;
35269 -extern atomic_t cm_rejects;
35270 -extern atomic_t mod_qp_timouts;
35271 -extern atomic_t qps_created;
35272 -extern atomic_t qps_destroyed;
35273 -extern atomic_t sw_qps_destroyed;
35274 +extern atomic_unchecked_t cm_connects;
35275 +extern atomic_unchecked_t cm_accepts;
35276 +extern atomic_unchecked_t cm_disconnects;
35277 +extern atomic_unchecked_t cm_closes;
35278 +extern atomic_unchecked_t cm_connecteds;
35279 +extern atomic_unchecked_t cm_connect_reqs;
35280 +extern atomic_unchecked_t cm_rejects;
35281 +extern atomic_unchecked_t mod_qp_timouts;
35282 +extern atomic_unchecked_t qps_created;
35283 +extern atomic_unchecked_t qps_destroyed;
35284 +extern atomic_unchecked_t sw_qps_destroyed;
35285 extern u32 mh_detected;
35286 extern u32 mh_pauses_sent;
35287 extern u32 cm_packets_sent;
35288 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
35289 extern u32 cm_listens_created;
35290 extern u32 cm_listens_destroyed;
35291 extern u32 cm_backlog_drops;
35292 -extern atomic_t cm_loopbacks;
35293 -extern atomic_t cm_nodes_created;
35294 -extern atomic_t cm_nodes_destroyed;
35295 -extern atomic_t cm_accel_dropped_pkts;
35296 -extern atomic_t cm_resets_recvd;
35297 +extern atomic_unchecked_t cm_loopbacks;
35298 +extern atomic_unchecked_t cm_nodes_created;
35299 +extern atomic_unchecked_t cm_nodes_destroyed;
35300 +extern atomic_unchecked_t cm_accel_dropped_pkts;
35301 +extern atomic_unchecked_t cm_resets_recvd;
35302
35303 extern u32 int_mod_timer_init;
35304 extern u32 int_mod_cq_depth_256;
35305 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
35306 index 73473db..5ed06e8 100644
35307 --- a/drivers/infiniband/hw/nes/nes_cm.c
35308 +++ b/drivers/infiniband/hw/nes/nes_cm.c
35309 @@ -69,11 +69,11 @@ u32 cm_packets_received;
35310 u32 cm_listens_created;
35311 u32 cm_listens_destroyed;
35312 u32 cm_backlog_drops;
35313 -atomic_t cm_loopbacks;
35314 -atomic_t cm_nodes_created;
35315 -atomic_t cm_nodes_destroyed;
35316 -atomic_t cm_accel_dropped_pkts;
35317 -atomic_t cm_resets_recvd;
35318 +atomic_unchecked_t cm_loopbacks;
35319 +atomic_unchecked_t cm_nodes_created;
35320 +atomic_unchecked_t cm_nodes_destroyed;
35321 +atomic_unchecked_t cm_accel_dropped_pkts;
35322 +atomic_unchecked_t cm_resets_recvd;
35323
35324 static inline int mini_cm_accelerated(struct nes_cm_core *,
35325 struct nes_cm_node *);
35326 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
35327
35328 static struct nes_cm_core *g_cm_core;
35329
35330 -atomic_t cm_connects;
35331 -atomic_t cm_accepts;
35332 -atomic_t cm_disconnects;
35333 -atomic_t cm_closes;
35334 -atomic_t cm_connecteds;
35335 -atomic_t cm_connect_reqs;
35336 -atomic_t cm_rejects;
35337 +atomic_unchecked_t cm_connects;
35338 +atomic_unchecked_t cm_accepts;
35339 +atomic_unchecked_t cm_disconnects;
35340 +atomic_unchecked_t cm_closes;
35341 +atomic_unchecked_t cm_connecteds;
35342 +atomic_unchecked_t cm_connect_reqs;
35343 +atomic_unchecked_t cm_rejects;
35344
35345
35346 /**
35347 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
35348 cm_node->rem_mac);
35349
35350 add_hte_node(cm_core, cm_node);
35351 - atomic_inc(&cm_nodes_created);
35352 + atomic_inc_unchecked(&cm_nodes_created);
35353
35354 return cm_node;
35355 }
35356 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
35357 }
35358
35359 atomic_dec(&cm_core->node_cnt);
35360 - atomic_inc(&cm_nodes_destroyed);
35361 + atomic_inc_unchecked(&cm_nodes_destroyed);
35362 nesqp = cm_node->nesqp;
35363 if (nesqp) {
35364 nesqp->cm_node = NULL;
35365 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
35366
35367 static void drop_packet(struct sk_buff *skb)
35368 {
35369 - atomic_inc(&cm_accel_dropped_pkts);
35370 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
35371 dev_kfree_skb_any(skb);
35372 }
35373
35374 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
35375
35376 int reset = 0; /* whether to send reset in case of err.. */
35377 int passive_state;
35378 - atomic_inc(&cm_resets_recvd);
35379 + atomic_inc_unchecked(&cm_resets_recvd);
35380 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
35381 " refcnt=%d\n", cm_node, cm_node->state,
35382 atomic_read(&cm_node->ref_count));
35383 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
35384 rem_ref_cm_node(cm_node->cm_core, cm_node);
35385 return NULL;
35386 }
35387 - atomic_inc(&cm_loopbacks);
35388 + atomic_inc_unchecked(&cm_loopbacks);
35389 loopbackremotenode->loopbackpartner = cm_node;
35390 loopbackremotenode->tcp_cntxt.rcv_wscale =
35391 NES_CM_DEFAULT_RCV_WND_SCALE;
35392 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
35393 add_ref_cm_node(cm_node);
35394 } else if (cm_node->state == NES_CM_STATE_TSA) {
35395 rem_ref_cm_node(cm_core, cm_node);
35396 - atomic_inc(&cm_accel_dropped_pkts);
35397 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
35398 dev_kfree_skb_any(skb);
35399 break;
35400 }
35401 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35402
35403 if ((cm_id) && (cm_id->event_handler)) {
35404 if (issue_disconn) {
35405 - atomic_inc(&cm_disconnects);
35406 + atomic_inc_unchecked(&cm_disconnects);
35407 cm_event.event = IW_CM_EVENT_DISCONNECT;
35408 cm_event.status = disconn_status;
35409 cm_event.local_addr = cm_id->local_addr;
35410 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35411 }
35412
35413 if (issue_close) {
35414 - atomic_inc(&cm_closes);
35415 + atomic_inc_unchecked(&cm_closes);
35416 nes_disconnect(nesqp, 1);
35417
35418 cm_id->provider_data = nesqp;
35419 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35420
35421 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
35422 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
35423 - atomic_inc(&cm_accepts);
35424 + atomic_inc_unchecked(&cm_accepts);
35425
35426 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
35427 atomic_read(&nesvnic->netdev->refcnt));
35428 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
35429
35430 struct nes_cm_core *cm_core;
35431
35432 - atomic_inc(&cm_rejects);
35433 + atomic_inc_unchecked(&cm_rejects);
35434 cm_node = (struct nes_cm_node *) cm_id->provider_data;
35435 loopback = cm_node->loopbackpartner;
35436 cm_core = cm_node->cm_core;
35437 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35438 ntohl(cm_id->local_addr.sin_addr.s_addr),
35439 ntohs(cm_id->local_addr.sin_port));
35440
35441 - atomic_inc(&cm_connects);
35442 + atomic_inc_unchecked(&cm_connects);
35443 nesqp->active_conn = 1;
35444
35445 /* cache the cm_id in the qp */
35446 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
35447 if (nesqp->destroyed) {
35448 return;
35449 }
35450 - atomic_inc(&cm_connecteds);
35451 + atomic_inc_unchecked(&cm_connecteds);
35452 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
35453 " local port 0x%04X. jiffies = %lu.\n",
35454 nesqp->hwqp.qp_id,
35455 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
35456
35457 ret = cm_id->event_handler(cm_id, &cm_event);
35458 cm_id->add_ref(cm_id);
35459 - atomic_inc(&cm_closes);
35460 + atomic_inc_unchecked(&cm_closes);
35461 cm_event.event = IW_CM_EVENT_CLOSE;
35462 cm_event.status = IW_CM_EVENT_STATUS_OK;
35463 cm_event.provider_data = cm_id->provider_data;
35464 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
35465 return;
35466 cm_id = cm_node->cm_id;
35467
35468 - atomic_inc(&cm_connect_reqs);
35469 + atomic_inc_unchecked(&cm_connect_reqs);
35470 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35471 cm_node, cm_id, jiffies);
35472
35473 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
35474 return;
35475 cm_id = cm_node->cm_id;
35476
35477 - atomic_inc(&cm_connect_reqs);
35478 + atomic_inc_unchecked(&cm_connect_reqs);
35479 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35480 cm_node, cm_id, jiffies);
35481
35482 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
35483 index e593af3..870694a 100644
35484 --- a/drivers/infiniband/hw/nes/nes_nic.c
35485 +++ b/drivers/infiniband/hw/nes/nes_nic.c
35486 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35487 target_stat_values[++index] = mh_detected;
35488 target_stat_values[++index] = mh_pauses_sent;
35489 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
35490 - target_stat_values[++index] = atomic_read(&cm_connects);
35491 - target_stat_values[++index] = atomic_read(&cm_accepts);
35492 - target_stat_values[++index] = atomic_read(&cm_disconnects);
35493 - target_stat_values[++index] = atomic_read(&cm_connecteds);
35494 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
35495 - target_stat_values[++index] = atomic_read(&cm_rejects);
35496 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
35497 - target_stat_values[++index] = atomic_read(&qps_created);
35498 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
35499 - target_stat_values[++index] = atomic_read(&qps_destroyed);
35500 - target_stat_values[++index] = atomic_read(&cm_closes);
35501 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
35502 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
35503 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
35504 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
35505 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
35506 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
35507 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
35508 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
35509 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
35510 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
35511 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
35512 target_stat_values[++index] = cm_packets_sent;
35513 target_stat_values[++index] = cm_packets_bounced;
35514 target_stat_values[++index] = cm_packets_created;
35515 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35516 target_stat_values[++index] = cm_listens_created;
35517 target_stat_values[++index] = cm_listens_destroyed;
35518 target_stat_values[++index] = cm_backlog_drops;
35519 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
35520 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
35521 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
35522 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
35523 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
35524 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
35525 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
35526 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
35527 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
35528 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
35529 target_stat_values[++index] = int_mod_timer_init;
35530 target_stat_values[++index] = int_mod_cq_depth_1;
35531 target_stat_values[++index] = int_mod_cq_depth_4;
35532 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
35533 index a680c42..f914deb 100644
35534 --- a/drivers/infiniband/hw/nes/nes_verbs.c
35535 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
35536 @@ -45,9 +45,9 @@
35537
35538 #include <rdma/ib_umem.h>
35539
35540 -atomic_t mod_qp_timouts;
35541 -atomic_t qps_created;
35542 -atomic_t sw_qps_destroyed;
35543 +atomic_unchecked_t mod_qp_timouts;
35544 +atomic_unchecked_t qps_created;
35545 +atomic_unchecked_t sw_qps_destroyed;
35546
35547 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
35548
35549 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
35550 if (init_attr->create_flags)
35551 return ERR_PTR(-EINVAL);
35552
35553 - atomic_inc(&qps_created);
35554 + atomic_inc_unchecked(&qps_created);
35555 switch (init_attr->qp_type) {
35556 case IB_QPT_RC:
35557 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
35558 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
35559 struct iw_cm_event cm_event;
35560 int ret;
35561
35562 - atomic_inc(&sw_qps_destroyed);
35563 + atomic_inc_unchecked(&sw_qps_destroyed);
35564 nesqp->destroyed = 1;
35565
35566 /* Blow away the connection if it exists. */
35567 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
35568 index ac11be0..3883c04 100644
35569 --- a/drivers/input/gameport/gameport.c
35570 +++ b/drivers/input/gameport/gameport.c
35571 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
35572 */
35573 static void gameport_init_port(struct gameport *gameport)
35574 {
35575 - static atomic_t gameport_no = ATOMIC_INIT(0);
35576 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
35577
35578 __module_get(THIS_MODULE);
35579
35580 mutex_init(&gameport->drv_mutex);
35581 device_initialize(&gameport->dev);
35582 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
35583 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
35584 gameport->dev.bus = &gameport_bus;
35585 gameport->dev.release = gameport_release_port;
35586 if (gameport->parent)
35587 diff --git a/drivers/input/input.c b/drivers/input/input.c
35588 index c82ae82..8cfb9cb 100644
35589 --- a/drivers/input/input.c
35590 +++ b/drivers/input/input.c
35591 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
35592 */
35593 int input_register_device(struct input_dev *dev)
35594 {
35595 - static atomic_t input_no = ATOMIC_INIT(0);
35596 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
35597 struct input_handler *handler;
35598 const char *path;
35599 int error;
35600 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
35601 dev->setkeycode = input_default_setkeycode;
35602
35603 dev_set_name(&dev->dev, "input%ld",
35604 - (unsigned long) atomic_inc_return(&input_no) - 1);
35605 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
35606
35607 error = device_add(&dev->dev);
35608 if (error)
35609 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
35610 index ca13a6b..b032b0c 100644
35611 --- a/drivers/input/joystick/sidewinder.c
35612 +++ b/drivers/input/joystick/sidewinder.c
35613 @@ -30,6 +30,7 @@
35614 #include <linux/kernel.h>
35615 #include <linux/module.h>
35616 #include <linux/slab.h>
35617 +#include <linux/sched.h>
35618 #include <linux/init.h>
35619 #include <linux/input.h>
35620 #include <linux/gameport.h>
35621 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
35622 unsigned char buf[SW_LENGTH];
35623 int i;
35624
35625 + pax_track_stack();
35626 +
35627 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
35628
35629 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
35630 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
35631 index 79e3edc..01412b9 100644
35632 --- a/drivers/input/joystick/xpad.c
35633 +++ b/drivers/input/joystick/xpad.c
35634 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
35635
35636 static int xpad_led_probe(struct usb_xpad *xpad)
35637 {
35638 - static atomic_t led_seq = ATOMIC_INIT(0);
35639 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
35640 long led_no;
35641 struct xpad_led *led;
35642 struct led_classdev *led_cdev;
35643 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
35644 if (!led)
35645 return -ENOMEM;
35646
35647 - led_no = (long)atomic_inc_return(&led_seq) - 1;
35648 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
35649
35650 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
35651 led->xpad = xpad;
35652 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
35653 index 0236f0d..c7327f1 100644
35654 --- a/drivers/input/serio/serio.c
35655 +++ b/drivers/input/serio/serio.c
35656 @@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
35657 */
35658 static void serio_init_port(struct serio *serio)
35659 {
35660 - static atomic_t serio_no = ATOMIC_INIT(0);
35661 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
35662
35663 __module_get(THIS_MODULE);
35664
35665 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
35666 mutex_init(&serio->drv_mutex);
35667 device_initialize(&serio->dev);
35668 dev_set_name(&serio->dev, "serio%ld",
35669 - (long)atomic_inc_return(&serio_no) - 1);
35670 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
35671 serio->dev.bus = &serio_bus;
35672 serio->dev.release = serio_release_port;
35673 if (serio->parent) {
35674 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
35675 index 33dcd8d..2783d25 100644
35676 --- a/drivers/isdn/gigaset/common.c
35677 +++ b/drivers/isdn/gigaset/common.c
35678 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
35679 cs->commands_pending = 0;
35680 cs->cur_at_seq = 0;
35681 cs->gotfwver = -1;
35682 - cs->open_count = 0;
35683 + local_set(&cs->open_count, 0);
35684 cs->dev = NULL;
35685 cs->tty = NULL;
35686 cs->tty_dev = NULL;
35687 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
35688 index a2f6125..6a70677 100644
35689 --- a/drivers/isdn/gigaset/gigaset.h
35690 +++ b/drivers/isdn/gigaset/gigaset.h
35691 @@ -34,6 +34,7 @@
35692 #include <linux/tty_driver.h>
35693 #include <linux/list.h>
35694 #include <asm/atomic.h>
35695 +#include <asm/local.h>
35696
35697 #define GIG_VERSION {0,5,0,0}
35698 #define GIG_COMPAT {0,4,0,0}
35699 @@ -446,7 +447,7 @@ struct cardstate {
35700 spinlock_t cmdlock;
35701 unsigned curlen, cmdbytes;
35702
35703 - unsigned open_count;
35704 + local_t open_count;
35705 struct tty_struct *tty;
35706 struct tasklet_struct if_wake_tasklet;
35707 unsigned control_state;
35708 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
35709 index b3065b8..c7e8cc9 100644
35710 --- a/drivers/isdn/gigaset/interface.c
35711 +++ b/drivers/isdn/gigaset/interface.c
35712 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
35713 return -ERESTARTSYS; // FIXME -EINTR?
35714 tty->driver_data = cs;
35715
35716 - ++cs->open_count;
35717 -
35718 - if (cs->open_count == 1) {
35719 + if (local_inc_return(&cs->open_count) == 1) {
35720 spin_lock_irqsave(&cs->lock, flags);
35721 cs->tty = tty;
35722 spin_unlock_irqrestore(&cs->lock, flags);
35723 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
35724
35725 if (!cs->connected)
35726 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35727 - else if (!cs->open_count)
35728 + else if (!local_read(&cs->open_count))
35729 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35730 else {
35731 - if (!--cs->open_count) {
35732 + if (!local_dec_return(&cs->open_count)) {
35733 spin_lock_irqsave(&cs->lock, flags);
35734 cs->tty = NULL;
35735 spin_unlock_irqrestore(&cs->lock, flags);
35736 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
35737 if (!cs->connected) {
35738 gig_dbg(DEBUG_IF, "not connected");
35739 retval = -ENODEV;
35740 - } else if (!cs->open_count)
35741 + } else if (!local_read(&cs->open_count))
35742 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35743 else {
35744 retval = 0;
35745 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
35746 if (!cs->connected) {
35747 gig_dbg(DEBUG_IF, "not connected");
35748 retval = -ENODEV;
35749 - } else if (!cs->open_count)
35750 + } else if (!local_read(&cs->open_count))
35751 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35752 else if (cs->mstate != MS_LOCKED) {
35753 dev_warn(cs->dev, "can't write to unlocked device\n");
35754 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
35755 if (!cs->connected) {
35756 gig_dbg(DEBUG_IF, "not connected");
35757 retval = -ENODEV;
35758 - } else if (!cs->open_count)
35759 + } else if (!local_read(&cs->open_count))
35760 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35761 else if (cs->mstate != MS_LOCKED) {
35762 dev_warn(cs->dev, "can't write to unlocked device\n");
35763 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
35764
35765 if (!cs->connected)
35766 gig_dbg(DEBUG_IF, "not connected");
35767 - else if (!cs->open_count)
35768 + else if (!local_read(&cs->open_count))
35769 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35770 else if (cs->mstate != MS_LOCKED)
35771 dev_warn(cs->dev, "can't write to unlocked device\n");
35772 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
35773
35774 if (!cs->connected)
35775 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35776 - else if (!cs->open_count)
35777 + else if (!local_read(&cs->open_count))
35778 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35779 else {
35780 //FIXME
35781 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
35782
35783 if (!cs->connected)
35784 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35785 - else if (!cs->open_count)
35786 + else if (!local_read(&cs->open_count))
35787 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35788 else {
35789 //FIXME
35790 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
35791 goto out;
35792 }
35793
35794 - if (!cs->open_count) {
35795 + if (!local_read(&cs->open_count)) {
35796 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35797 goto out;
35798 }
35799 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
35800 index a7c0083..62a7cb6 100644
35801 --- a/drivers/isdn/hardware/avm/b1.c
35802 +++ b/drivers/isdn/hardware/avm/b1.c
35803 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
35804 }
35805 if (left) {
35806 if (t4file->user) {
35807 - if (copy_from_user(buf, dp, left))
35808 + if (left > sizeof buf || copy_from_user(buf, dp, left))
35809 return -EFAULT;
35810 } else {
35811 memcpy(buf, dp, left);
35812 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
35813 }
35814 if (left) {
35815 if (config->user) {
35816 - if (copy_from_user(buf, dp, left))
35817 + if (left > sizeof buf || copy_from_user(buf, dp, left))
35818 return -EFAULT;
35819 } else {
35820 memcpy(buf, dp, left);
35821 diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
35822 index f130724..c373c68 100644
35823 --- a/drivers/isdn/hardware/eicon/capidtmf.c
35824 +++ b/drivers/isdn/hardware/eicon/capidtmf.c
35825 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
35826 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
35827 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
35828
35829 + pax_track_stack();
35830
35831 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
35832 {
35833 diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
35834 index 4d425c6..a9be6c4 100644
35835 --- a/drivers/isdn/hardware/eicon/capifunc.c
35836 +++ b/drivers/isdn/hardware/eicon/capifunc.c
35837 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
35838 IDI_SYNC_REQ req;
35839 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35840
35841 + pax_track_stack();
35842 +
35843 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35844
35845 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35846 diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
35847 index 3029234..ef0d9e2 100644
35848 --- a/drivers/isdn/hardware/eicon/diddfunc.c
35849 +++ b/drivers/isdn/hardware/eicon/diddfunc.c
35850 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35851 IDI_SYNC_REQ req;
35852 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35853
35854 + pax_track_stack();
35855 +
35856 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35857
35858 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35859 diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
35860 index d36a4c0..11e7d1a 100644
35861 --- a/drivers/isdn/hardware/eicon/divasfunc.c
35862 +++ b/drivers/isdn/hardware/eicon/divasfunc.c
35863 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35864 IDI_SYNC_REQ req;
35865 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35866
35867 + pax_track_stack();
35868 +
35869 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35870
35871 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35872 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
35873 index 85784a7..a19ca98 100644
35874 --- a/drivers/isdn/hardware/eicon/divasync.h
35875 +++ b/drivers/isdn/hardware/eicon/divasync.h
35876 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
35877 } diva_didd_add_adapter_t;
35878 typedef struct _diva_didd_remove_adapter {
35879 IDI_CALL p_request;
35880 -} diva_didd_remove_adapter_t;
35881 +} __no_const diva_didd_remove_adapter_t;
35882 typedef struct _diva_didd_read_adapter_array {
35883 void * buffer;
35884 dword length;
35885 diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
35886 index db87d51..7d09acf 100644
35887 --- a/drivers/isdn/hardware/eicon/idifunc.c
35888 +++ b/drivers/isdn/hardware/eicon/idifunc.c
35889 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35890 IDI_SYNC_REQ req;
35891 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35892
35893 + pax_track_stack();
35894 +
35895 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35896
35897 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35898 diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
35899 index ae89fb8..0fab299 100644
35900 --- a/drivers/isdn/hardware/eicon/message.c
35901 +++ b/drivers/isdn/hardware/eicon/message.c
35902 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
35903 dword d;
35904 word w;
35905
35906 + pax_track_stack();
35907 +
35908 a = plci->adapter;
35909 Id = ((word)plci->Id<<8)|a->Id;
35910 PUT_WORD(&SS_Ind[4],0x0000);
35911 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
35912 word j, n, w;
35913 dword d;
35914
35915 + pax_track_stack();
35916 +
35917
35918 for(i=0;i<8;i++) bp_parms[i].length = 0;
35919 for(i=0;i<2;i++) global_config[i].length = 0;
35920 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
35921 const byte llc3[] = {4,3,2,2,6,6,0};
35922 const byte header[] = {0,2,3,3,0,0,0};
35923
35924 + pax_track_stack();
35925 +
35926 for(i=0;i<8;i++) bp_parms[i].length = 0;
35927 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
35928 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
35929 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
35930 word appl_number_group_type[MAX_APPL];
35931 PLCI *auxplci;
35932
35933 + pax_track_stack();
35934 +
35935 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
35936
35937 if(!a->group_optimization_enabled)
35938 diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
35939 index a564b75..f3cf8b5 100644
35940 --- a/drivers/isdn/hardware/eicon/mntfunc.c
35941 +++ b/drivers/isdn/hardware/eicon/mntfunc.c
35942 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35943 IDI_SYNC_REQ req;
35944 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35945
35946 + pax_track_stack();
35947 +
35948 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35949
35950 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35951 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
35952 index a3bd163..8956575 100644
35953 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
35954 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
35955 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
35956 typedef struct _diva_os_idi_adapter_interface {
35957 diva_init_card_proc_t cleanup_adapter_proc;
35958 diva_cmd_card_proc_t cmd_proc;
35959 -} diva_os_idi_adapter_interface_t;
35960 +} __no_const diva_os_idi_adapter_interface_t;
35961
35962 typedef struct _diva_os_xdi_adapter {
35963 struct list_head link;
35964 diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
35965 index adb1e8c..21b590b 100644
35966 --- a/drivers/isdn/i4l/isdn_common.c
35967 +++ b/drivers/isdn/i4l/isdn_common.c
35968 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
35969 } iocpar;
35970 void __user *argp = (void __user *)arg;
35971
35972 + pax_track_stack();
35973 +
35974 #define name iocpar.name
35975 #define bname iocpar.bname
35976 #define iocts iocpar.iocts
35977 diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
35978 index 90b56ed..5ed3305 100644
35979 --- a/drivers/isdn/i4l/isdn_net.c
35980 +++ b/drivers/isdn/i4l/isdn_net.c
35981 @@ -1902,7 +1902,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
35982 {
35983 isdn_net_local *lp = netdev_priv(dev);
35984 unsigned char *p;
35985 - ushort len = 0;
35986 + int len = 0;
35987
35988 switch (lp->p_encap) {
35989 case ISDN_NET_ENCAP_ETHER:
35990 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
35991 index bf7997a..cf091db 100644
35992 --- a/drivers/isdn/icn/icn.c
35993 +++ b/drivers/isdn/icn/icn.c
35994 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
35995 if (count > len)
35996 count = len;
35997 if (user) {
35998 - if (copy_from_user(msg, buf, count))
35999 + if (count > sizeof msg || copy_from_user(msg, buf, count))
36000 return -EFAULT;
36001 } else
36002 memcpy(msg, buf, count);
36003 diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
36004 index feb0fa4..f76f830 100644
36005 --- a/drivers/isdn/mISDN/socket.c
36006 +++ b/drivers/isdn/mISDN/socket.c
36007 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
36008 if (dev) {
36009 struct mISDN_devinfo di;
36010
36011 + memset(&di, 0, sizeof(di));
36012 di.id = dev->id;
36013 di.Dprotocols = dev->Dprotocols;
36014 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
36015 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
36016 if (dev) {
36017 struct mISDN_devinfo di;
36018
36019 + memset(&di, 0, sizeof(di));
36020 di.id = dev->id;
36021 di.Dprotocols = dev->Dprotocols;
36022 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
36023 diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
36024 index 485be8b..f0225bc 100644
36025 --- a/drivers/isdn/sc/interrupt.c
36026 +++ b/drivers/isdn/sc/interrupt.c
36027 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
36028 }
36029 else if(callid>=0x0000 && callid<=0x7FFF)
36030 {
36031 + int len;
36032 +
36033 pr_debug("%s: Got Incoming Call\n",
36034 sc_adapter[card]->devicename);
36035 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
36036 - strcpy(setup.eazmsn,
36037 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
36038 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
36039 + sizeof(setup.phone));
36040 + if (len >= sizeof(setup.phone))
36041 + continue;
36042 + len = strlcpy(setup.eazmsn,
36043 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
36044 + sizeof(setup.eazmsn));
36045 + if (len >= sizeof(setup.eazmsn))
36046 + continue;
36047 setup.si1 = 7;
36048 setup.si2 = 0;
36049 setup.plan = 0;
36050 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
36051 * Handle a GetMyNumber Rsp
36052 */
36053 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
36054 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
36055 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
36056 + rcvmsg.msg_data.byte_array,
36057 + sizeof(rcvmsg.msg_data.byte_array));
36058 continue;
36059 }
36060
36061 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
36062 index 8744d24..d1f9a9a 100644
36063 --- a/drivers/lguest/core.c
36064 +++ b/drivers/lguest/core.c
36065 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
36066 * it's worked so far. The end address needs +1 because __get_vm_area
36067 * allocates an extra guard page, so we need space for that.
36068 */
36069 +
36070 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
36071 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
36072 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
36073 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
36074 +#else
36075 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
36076 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
36077 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
36078 +#endif
36079 +
36080 if (!switcher_vma) {
36081 err = -ENOMEM;
36082 printk("lguest: could not map switcher pages high\n");
36083 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
36084 * Now the Switcher is mapped at the right address, we can't fail!
36085 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
36086 */
36087 - memcpy(switcher_vma->addr, start_switcher_text,
36088 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
36089 end_switcher_text - start_switcher_text);
36090
36091 printk(KERN_INFO "lguest: mapped switcher at %p\n",
36092 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
36093 index 6ae3888..8b38145 100644
36094 --- a/drivers/lguest/x86/core.c
36095 +++ b/drivers/lguest/x86/core.c
36096 @@ -59,7 +59,7 @@ static struct {
36097 /* Offset from where switcher.S was compiled to where we've copied it */
36098 static unsigned long switcher_offset(void)
36099 {
36100 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
36101 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
36102 }
36103
36104 /* This cpu's struct lguest_pages. */
36105 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
36106 * These copies are pretty cheap, so we do them unconditionally: */
36107 /* Save the current Host top-level page directory.
36108 */
36109 +
36110 +#ifdef CONFIG_PAX_PER_CPU_PGD
36111 + pages->state.host_cr3 = read_cr3();
36112 +#else
36113 pages->state.host_cr3 = __pa(current->mm->pgd);
36114 +#endif
36115 +
36116 /*
36117 * Set up the Guest's page tables to see this CPU's pages (and no
36118 * other CPU's pages).
36119 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
36120 * compiled-in switcher code and the high-mapped copy we just made.
36121 */
36122 for (i = 0; i < IDT_ENTRIES; i++)
36123 - default_idt_entries[i] += switcher_offset();
36124 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
36125
36126 /*
36127 * Set up the Switcher's per-cpu areas.
36128 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
36129 * it will be undisturbed when we switch. To change %cs and jump we
36130 * need this structure to feed to Intel's "lcall" instruction.
36131 */
36132 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
36133 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
36134 lguest_entry.segment = LGUEST_CS;
36135
36136 /*
36137 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
36138 index 40634b0..4f5855e 100644
36139 --- a/drivers/lguest/x86/switcher_32.S
36140 +++ b/drivers/lguest/x86/switcher_32.S
36141 @@ -87,6 +87,7 @@
36142 #include <asm/page.h>
36143 #include <asm/segment.h>
36144 #include <asm/lguest.h>
36145 +#include <asm/processor-flags.h>
36146
36147 // We mark the start of the code to copy
36148 // It's placed in .text tho it's never run here
36149 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
36150 // Changes type when we load it: damn Intel!
36151 // For after we switch over our page tables
36152 // That entry will be read-only: we'd crash.
36153 +
36154 +#ifdef CONFIG_PAX_KERNEXEC
36155 + mov %cr0, %edx
36156 + xor $X86_CR0_WP, %edx
36157 + mov %edx, %cr0
36158 +#endif
36159 +
36160 movl $(GDT_ENTRY_TSS*8), %edx
36161 ltr %dx
36162
36163 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
36164 // Let's clear it again for our return.
36165 // The GDT descriptor of the Host
36166 // Points to the table after two "size" bytes
36167 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
36168 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
36169 // Clear "used" from type field (byte 5, bit 2)
36170 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
36171 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
36172 +
36173 +#ifdef CONFIG_PAX_KERNEXEC
36174 + mov %cr0, %eax
36175 + xor $X86_CR0_WP, %eax
36176 + mov %eax, %cr0
36177 +#endif
36178
36179 // Once our page table's switched, the Guest is live!
36180 // The Host fades as we run this final step.
36181 @@ -295,13 +309,12 @@ deliver_to_host:
36182 // I consulted gcc, and it gave
36183 // These instructions, which I gladly credit:
36184 leal (%edx,%ebx,8), %eax
36185 - movzwl (%eax),%edx
36186 - movl 4(%eax), %eax
36187 - xorw %ax, %ax
36188 - orl %eax, %edx
36189 + movl 4(%eax), %edx
36190 + movw (%eax), %dx
36191 // Now the address of the handler's in %edx
36192 // We call it now: its "iret" drops us home.
36193 - jmp *%edx
36194 + ljmp $__KERNEL_CS, $1f
36195 +1: jmp *%edx
36196
36197 // Every interrupt can come to us here
36198 // But we must truly tell each apart.
36199 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
36200 index 588a5b0..b71db89 100644
36201 --- a/drivers/macintosh/macio_asic.c
36202 +++ b/drivers/macintosh/macio_asic.c
36203 @@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
36204 * MacIO is matched against any Apple ID, it's probe() function
36205 * will then decide wether it applies or not
36206 */
36207 -static const struct pci_device_id __devinitdata pci_ids [] = { {
36208 +static const struct pci_device_id __devinitconst pci_ids [] = { {
36209 .vendor = PCI_VENDOR_ID_APPLE,
36210 .device = PCI_ANY_ID,
36211 .subvendor = PCI_ANY_ID,
36212 diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
36213 index a348bb0..ecd9b3f 100644
36214 --- a/drivers/macintosh/via-pmu-backlight.c
36215 +++ b/drivers/macintosh/via-pmu-backlight.c
36216 @@ -15,7 +15,7 @@
36217
36218 #define MAX_PMU_LEVEL 0xFF
36219
36220 -static struct backlight_ops pmu_backlight_data;
36221 +static const struct backlight_ops pmu_backlight_data;
36222 static DEFINE_SPINLOCK(pmu_backlight_lock);
36223 static int sleeping, uses_pmu_bl;
36224 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
36225 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
36226 return bd->props.brightness;
36227 }
36228
36229 -static struct backlight_ops pmu_backlight_data = {
36230 +static const struct backlight_ops pmu_backlight_data = {
36231 .get_brightness = pmu_backlight_get_brightness,
36232 .update_status = pmu_backlight_update_status,
36233
36234 diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
36235 index 6f308a4..b5f7ff7 100644
36236 --- a/drivers/macintosh/via-pmu.c
36237 +++ b/drivers/macintosh/via-pmu.c
36238 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
36239 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
36240 }
36241
36242 -static struct platform_suspend_ops pmu_pm_ops = {
36243 +static const struct platform_suspend_ops pmu_pm_ops = {
36244 .enter = powerbook_sleep,
36245 .valid = pmu_sleep_valid,
36246 };
36247 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
36248 index 818b617..4656e38 100644
36249 --- a/drivers/md/dm-ioctl.c
36250 +++ b/drivers/md/dm-ioctl.c
36251 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
36252 cmd == DM_LIST_VERSIONS_CMD)
36253 return 0;
36254
36255 - if ((cmd == DM_DEV_CREATE_CMD)) {
36256 + if (cmd == DM_DEV_CREATE_CMD) {
36257 if (!*param->name) {
36258 DMWARN("name not supplied when creating device");
36259 return -EINVAL;
36260 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
36261 index 6021d0a..a878643 100644
36262 --- a/drivers/md/dm-raid1.c
36263 +++ b/drivers/md/dm-raid1.c
36264 @@ -41,7 +41,7 @@ enum dm_raid1_error {
36265
36266 struct mirror {
36267 struct mirror_set *ms;
36268 - atomic_t error_count;
36269 + atomic_unchecked_t error_count;
36270 unsigned long error_type;
36271 struct dm_dev *dev;
36272 sector_t offset;
36273 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36274 * simple way to tell if a device has encountered
36275 * errors.
36276 */
36277 - atomic_inc(&m->error_count);
36278 + atomic_inc_unchecked(&m->error_count);
36279
36280 if (test_and_set_bit(error_type, &m->error_type))
36281 return;
36282 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36283 }
36284
36285 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
36286 - if (!atomic_read(&new->error_count)) {
36287 + if (!atomic_read_unchecked(&new->error_count)) {
36288 set_default_mirror(new);
36289 break;
36290 }
36291 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
36292 struct mirror *m = get_default_mirror(ms);
36293
36294 do {
36295 - if (likely(!atomic_read(&m->error_count)))
36296 + if (likely(!atomic_read_unchecked(&m->error_count)))
36297 return m;
36298
36299 if (m-- == ms->mirror)
36300 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
36301 {
36302 struct mirror *default_mirror = get_default_mirror(m->ms);
36303
36304 - return !atomic_read(&default_mirror->error_count);
36305 + return !atomic_read_unchecked(&default_mirror->error_count);
36306 }
36307
36308 static int mirror_available(struct mirror_set *ms, struct bio *bio)
36309 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
36310 */
36311 if (likely(region_in_sync(ms, region, 1)))
36312 m = choose_mirror(ms, bio->bi_sector);
36313 - else if (m && atomic_read(&m->error_count))
36314 + else if (m && atomic_read_unchecked(&m->error_count))
36315 m = NULL;
36316
36317 if (likely(m))
36318 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
36319 }
36320
36321 ms->mirror[mirror].ms = ms;
36322 - atomic_set(&(ms->mirror[mirror].error_count), 0);
36323 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
36324 ms->mirror[mirror].error_type = 0;
36325 ms->mirror[mirror].offset = offset;
36326
36327 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
36328 */
36329 static char device_status_char(struct mirror *m)
36330 {
36331 - if (!atomic_read(&(m->error_count)))
36332 + if (!atomic_read_unchecked(&(m->error_count)))
36333 return 'A';
36334
36335 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
36336 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
36337 index bd58703..9f26571 100644
36338 --- a/drivers/md/dm-stripe.c
36339 +++ b/drivers/md/dm-stripe.c
36340 @@ -20,7 +20,7 @@ struct stripe {
36341 struct dm_dev *dev;
36342 sector_t physical_start;
36343
36344 - atomic_t error_count;
36345 + atomic_unchecked_t error_count;
36346 };
36347
36348 struct stripe_c {
36349 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
36350 kfree(sc);
36351 return r;
36352 }
36353 - atomic_set(&(sc->stripe[i].error_count), 0);
36354 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
36355 }
36356
36357 ti->private = sc;
36358 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
36359 DMEMIT("%d ", sc->stripes);
36360 for (i = 0; i < sc->stripes; i++) {
36361 DMEMIT("%s ", sc->stripe[i].dev->name);
36362 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
36363 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
36364 'D' : 'A';
36365 }
36366 buffer[i] = '\0';
36367 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
36368 */
36369 for (i = 0; i < sc->stripes; i++)
36370 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
36371 - atomic_inc(&(sc->stripe[i].error_count));
36372 - if (atomic_read(&(sc->stripe[i].error_count)) <
36373 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
36374 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
36375 DM_IO_ERROR_THRESHOLD)
36376 queue_work(kstriped, &sc->kstriped_ws);
36377 }
36378 diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
36379 index 4b04590..13a77b2 100644
36380 --- a/drivers/md/dm-sysfs.c
36381 +++ b/drivers/md/dm-sysfs.c
36382 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
36383 NULL,
36384 };
36385
36386 -static struct sysfs_ops dm_sysfs_ops = {
36387 +static const struct sysfs_ops dm_sysfs_ops = {
36388 .show = dm_attr_show,
36389 };
36390
36391 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
36392 index 03345bb..332250d 100644
36393 --- a/drivers/md/dm-table.c
36394 +++ b/drivers/md/dm-table.c
36395 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
36396 if (!dev_size)
36397 return 0;
36398
36399 - if ((start >= dev_size) || (start + len > dev_size)) {
36400 + if ((start >= dev_size) || (len > dev_size - start)) {
36401 DMWARN("%s: %s too small for target: "
36402 "start=%llu, len=%llu, dev_size=%llu",
36403 dm_device_name(ti->table->md), bdevname(bdev, b),
36404 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
36405 index c988ac2..c418141 100644
36406 --- a/drivers/md/dm.c
36407 +++ b/drivers/md/dm.c
36408 @@ -165,9 +165,9 @@ struct mapped_device {
36409 /*
36410 * Event handling.
36411 */
36412 - atomic_t event_nr;
36413 + atomic_unchecked_t event_nr;
36414 wait_queue_head_t eventq;
36415 - atomic_t uevent_seq;
36416 + atomic_unchecked_t uevent_seq;
36417 struct list_head uevent_list;
36418 spinlock_t uevent_lock; /* Protect access to uevent_list */
36419
36420 @@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
36421 rwlock_init(&md->map_lock);
36422 atomic_set(&md->holders, 1);
36423 atomic_set(&md->open_count, 0);
36424 - atomic_set(&md->event_nr, 0);
36425 - atomic_set(&md->uevent_seq, 0);
36426 + atomic_set_unchecked(&md->event_nr, 0);
36427 + atomic_set_unchecked(&md->uevent_seq, 0);
36428 INIT_LIST_HEAD(&md->uevent_list);
36429 spin_lock_init(&md->uevent_lock);
36430
36431 @@ -1927,7 +1927,7 @@ static void event_callback(void *context)
36432
36433 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
36434
36435 - atomic_inc(&md->event_nr);
36436 + atomic_inc_unchecked(&md->event_nr);
36437 wake_up(&md->eventq);
36438 }
36439
36440 @@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
36441
36442 uint32_t dm_next_uevent_seq(struct mapped_device *md)
36443 {
36444 - return atomic_add_return(1, &md->uevent_seq);
36445 + return atomic_add_return_unchecked(1, &md->uevent_seq);
36446 }
36447
36448 uint32_t dm_get_event_nr(struct mapped_device *md)
36449 {
36450 - return atomic_read(&md->event_nr);
36451 + return atomic_read_unchecked(&md->event_nr);
36452 }
36453
36454 int dm_wait_event(struct mapped_device *md, int event_nr)
36455 {
36456 return wait_event_interruptible(md->eventq,
36457 - (event_nr != atomic_read(&md->event_nr)));
36458 + (event_nr != atomic_read_unchecked(&md->event_nr)));
36459 }
36460
36461 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
36462 diff --git a/drivers/md/md.c b/drivers/md/md.c
36463 index 4ce6e2f..7a9530a 100644
36464 --- a/drivers/md/md.c
36465 +++ b/drivers/md/md.c
36466 @@ -153,10 +153,10 @@ static int start_readonly;
36467 * start build, activate spare
36468 */
36469 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
36470 -static atomic_t md_event_count;
36471 +static atomic_unchecked_t md_event_count;
36472 void md_new_event(mddev_t *mddev)
36473 {
36474 - atomic_inc(&md_event_count);
36475 + atomic_inc_unchecked(&md_event_count);
36476 wake_up(&md_event_waiters);
36477 }
36478 EXPORT_SYMBOL_GPL(md_new_event);
36479 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
36480 */
36481 static void md_new_event_inintr(mddev_t *mddev)
36482 {
36483 - atomic_inc(&md_event_count);
36484 + atomic_inc_unchecked(&md_event_count);
36485 wake_up(&md_event_waiters);
36486 }
36487
36488 @@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
36489
36490 rdev->preferred_minor = 0xffff;
36491 rdev->data_offset = le64_to_cpu(sb->data_offset);
36492 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36493 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36494
36495 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
36496 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
36497 @@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
36498 else
36499 sb->resync_offset = cpu_to_le64(0);
36500
36501 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
36502 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
36503
36504 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
36505 sb->size = cpu_to_le64(mddev->dev_sectors);
36506 @@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
36507 static ssize_t
36508 errors_show(mdk_rdev_t *rdev, char *page)
36509 {
36510 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
36511 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
36512 }
36513
36514 static ssize_t
36515 @@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
36516 char *e;
36517 unsigned long n = simple_strtoul(buf, &e, 10);
36518 if (*buf && (*e == 0 || *e == '\n')) {
36519 - atomic_set(&rdev->corrected_errors, n);
36520 + atomic_set_unchecked(&rdev->corrected_errors, n);
36521 return len;
36522 }
36523 return -EINVAL;
36524 @@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
36525 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
36526 kfree(rdev);
36527 }
36528 -static struct sysfs_ops rdev_sysfs_ops = {
36529 +static const struct sysfs_ops rdev_sysfs_ops = {
36530 .show = rdev_attr_show,
36531 .store = rdev_attr_store,
36532 };
36533 @@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
36534 rdev->data_offset = 0;
36535 rdev->sb_events = 0;
36536 atomic_set(&rdev->nr_pending, 0);
36537 - atomic_set(&rdev->read_errors, 0);
36538 - atomic_set(&rdev->corrected_errors, 0);
36539 + atomic_set_unchecked(&rdev->read_errors, 0);
36540 + atomic_set_unchecked(&rdev->corrected_errors, 0);
36541
36542 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
36543 if (!size) {
36544 @@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
36545 kfree(mddev);
36546 }
36547
36548 -static struct sysfs_ops md_sysfs_ops = {
36549 +static const struct sysfs_ops md_sysfs_ops = {
36550 .show = md_attr_show,
36551 .store = md_attr_store,
36552 };
36553 @@ -4482,7 +4482,8 @@ out:
36554 err = 0;
36555 blk_integrity_unregister(disk);
36556 md_new_event(mddev);
36557 - sysfs_notify_dirent(mddev->sysfs_state);
36558 + if (mddev->sysfs_state)
36559 + sysfs_notify_dirent(mddev->sysfs_state);
36560 return err;
36561 }
36562
36563 @@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36564
36565 spin_unlock(&pers_lock);
36566 seq_printf(seq, "\n");
36567 - mi->event = atomic_read(&md_event_count);
36568 + mi->event = atomic_read_unchecked(&md_event_count);
36569 return 0;
36570 }
36571 if (v == (void*)2) {
36572 @@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36573 chunk_kb ? "KB" : "B");
36574 if (bitmap->file) {
36575 seq_printf(seq, ", file: ");
36576 - seq_path(seq, &bitmap->file->f_path, " \t\n");
36577 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
36578 }
36579
36580 seq_printf(seq, "\n");
36581 @@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
36582 else {
36583 struct seq_file *p = file->private_data;
36584 p->private = mi;
36585 - mi->event = atomic_read(&md_event_count);
36586 + mi->event = atomic_read_unchecked(&md_event_count);
36587 }
36588 return error;
36589 }
36590 @@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
36591 /* always allow read */
36592 mask = POLLIN | POLLRDNORM;
36593
36594 - if (mi->event != atomic_read(&md_event_count))
36595 + if (mi->event != atomic_read_unchecked(&md_event_count))
36596 mask |= POLLERR | POLLPRI;
36597 return mask;
36598 }
36599 @@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
36600 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
36601 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
36602 (int)part_stat_read(&disk->part0, sectors[1]) -
36603 - atomic_read(&disk->sync_io);
36604 + atomic_read_unchecked(&disk->sync_io);
36605 /* sync IO will cause sync_io to increase before the disk_stats
36606 * as sync_io is counted when a request starts, and
36607 * disk_stats is counted when it completes.
36608 diff --git a/drivers/md/md.h b/drivers/md/md.h
36609 index 87430fe..0024a4c 100644
36610 --- a/drivers/md/md.h
36611 +++ b/drivers/md/md.h
36612 @@ -94,10 +94,10 @@ struct mdk_rdev_s
36613 * only maintained for arrays that
36614 * support hot removal
36615 */
36616 - atomic_t read_errors; /* number of consecutive read errors that
36617 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
36618 * we have tried to ignore.
36619 */
36620 - atomic_t corrected_errors; /* number of corrected read errors,
36621 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
36622 * for reporting to userspace and storing
36623 * in superblock.
36624 */
36625 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
36626
36627 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
36628 {
36629 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36630 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36631 }
36632
36633 struct mdk_personality
36634 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
36635 index 968cb14..f0ad2e4 100644
36636 --- a/drivers/md/raid1.c
36637 +++ b/drivers/md/raid1.c
36638 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
36639 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
36640 continue;
36641 rdev = conf->mirrors[d].rdev;
36642 - atomic_add(s, &rdev->corrected_errors);
36643 + atomic_add_unchecked(s, &rdev->corrected_errors);
36644 if (sync_page_io(rdev->bdev,
36645 sect + rdev->data_offset,
36646 s<<9,
36647 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
36648 /* Well, this device is dead */
36649 md_error(mddev, rdev);
36650 else {
36651 - atomic_add(s, &rdev->corrected_errors);
36652 + atomic_add_unchecked(s, &rdev->corrected_errors);
36653 printk(KERN_INFO
36654 "raid1:%s: read error corrected "
36655 "(%d sectors at %llu on %s)\n",
36656 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
36657 index 1b4e232..cf0f534 100644
36658 --- a/drivers/md/raid10.c
36659 +++ b/drivers/md/raid10.c
36660 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
36661 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
36662 set_bit(R10BIO_Uptodate, &r10_bio->state);
36663 else {
36664 - atomic_add(r10_bio->sectors,
36665 + atomic_add_unchecked(r10_bio->sectors,
36666 &conf->mirrors[d].rdev->corrected_errors);
36667 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
36668 md_error(r10_bio->mddev,
36669 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
36670 test_bit(In_sync, &rdev->flags)) {
36671 atomic_inc(&rdev->nr_pending);
36672 rcu_read_unlock();
36673 - atomic_add(s, &rdev->corrected_errors);
36674 + atomic_add_unchecked(s, &rdev->corrected_errors);
36675 if (sync_page_io(rdev->bdev,
36676 r10_bio->devs[sl].addr +
36677 sect + rdev->data_offset,
36678 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
36679 index 883215d..675bf47 100644
36680 --- a/drivers/md/raid5.c
36681 +++ b/drivers/md/raid5.c
36682 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
36683 bi->bi_next = NULL;
36684 if ((rw & WRITE) &&
36685 test_bit(R5_ReWrite, &sh->dev[i].flags))
36686 - atomic_add(STRIPE_SECTORS,
36687 + atomic_add_unchecked(STRIPE_SECTORS,
36688 &rdev->corrected_errors);
36689 generic_make_request(bi);
36690 } else {
36691 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
36692 clear_bit(R5_ReadError, &sh->dev[i].flags);
36693 clear_bit(R5_ReWrite, &sh->dev[i].flags);
36694 }
36695 - if (atomic_read(&conf->disks[i].rdev->read_errors))
36696 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
36697 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
36698 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
36699 } else {
36700 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
36701 int retry = 0;
36702 rdev = conf->disks[i].rdev;
36703
36704 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
36705 - atomic_inc(&rdev->read_errors);
36706 + atomic_inc_unchecked(&rdev->read_errors);
36707 if (conf->mddev->degraded >= conf->max_degraded)
36708 printk_rl(KERN_WARNING
36709 "raid5:%s: read error not correctable "
36710 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
36711 (unsigned long long)(sh->sector
36712 + rdev->data_offset),
36713 bdn);
36714 - else if (atomic_read(&rdev->read_errors)
36715 + else if (atomic_read_unchecked(&rdev->read_errors)
36716 > conf->max_nr_stripes)
36717 printk(KERN_WARNING
36718 "raid5:%s: Too many read errors, failing device %s.\n",
36719 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
36720 sector_t r_sector;
36721 struct stripe_head sh2;
36722
36723 + pax_track_stack();
36724
36725 chunk_offset = sector_div(new_sector, sectors_per_chunk);
36726 stripe = new_sector;
36727 diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
36728 index 05bde9c..2f31d40 100644
36729 --- a/drivers/media/common/saa7146_hlp.c
36730 +++ b/drivers/media/common/saa7146_hlp.c
36731 @@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
36732
36733 int x[32], y[32], w[32], h[32];
36734
36735 + pax_track_stack();
36736 +
36737 /* clear out memory */
36738 memset(&line_list[0], 0x00, sizeof(u32)*32);
36739 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
36740 diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36741 index cb22da5..82b686e 100644
36742 --- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36743 +++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36744 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
36745 u8 buf[HOST_LINK_BUF_SIZE];
36746 int i;
36747
36748 + pax_track_stack();
36749 +
36750 dprintk("%s\n", __func__);
36751
36752 /* check if we have space for a link buf in the rx_buffer */
36753 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
36754 unsigned long timeout;
36755 int written;
36756
36757 + pax_track_stack();
36758 +
36759 dprintk("%s\n", __func__);
36760
36761 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
36762 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
36763 index 2fe05d0..a3289c4 100644
36764 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
36765 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
36766 @@ -71,7 +71,7 @@ struct dvb_demux_feed {
36767 union {
36768 dmx_ts_cb ts;
36769 dmx_section_cb sec;
36770 - } cb;
36771 + } __no_const cb;
36772
36773 struct dvb_demux *demux;
36774 void *priv;
36775 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
36776 index 94159b9..376bd8e 100644
36777 --- a/drivers/media/dvb/dvb-core/dvbdev.c
36778 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
36779 @@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
36780 const struct dvb_device *template, void *priv, int type)
36781 {
36782 struct dvb_device *dvbdev;
36783 - struct file_operations *dvbdevfops;
36784 + file_operations_no_const *dvbdevfops;
36785 struct device *clsdev;
36786 int minor;
36787 int id;
36788 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
36789 index 2a53dd0..db8c07a 100644
36790 --- a/drivers/media/dvb/dvb-usb/cxusb.c
36791 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
36792 @@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
36793 struct dib0700_adapter_state {
36794 int (*set_param_save) (struct dvb_frontend *,
36795 struct dvb_frontend_parameters *);
36796 -};
36797 +} __no_const;
36798
36799 static int dib7070_set_param_override(struct dvb_frontend *fe,
36800 struct dvb_frontend_parameters *fep)
36801 diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
36802 index db7f7f7..f55e96f 100644
36803 --- a/drivers/media/dvb/dvb-usb/dib0700_core.c
36804 +++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
36805 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
36806
36807 u8 buf[260];
36808
36809 + pax_track_stack();
36810 +
36811 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
36812 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
36813
36814 diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36815 index 524acf5..5ffc403 100644
36816 --- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
36817 +++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36818 @@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
36819
36820 struct dib0700_adapter_state {
36821 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
36822 -};
36823 +} __no_const;
36824
36825 /* Hauppauge Nova-T 500 (aka Bristol)
36826 * has a LNA on GPIO0 which is enabled by setting 1 */
36827 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
36828 index ba91735..4261d84 100644
36829 --- a/drivers/media/dvb/frontends/dib3000.h
36830 +++ b/drivers/media/dvb/frontends/dib3000.h
36831 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
36832 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
36833 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
36834 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
36835 -};
36836 +} __no_const;
36837
36838 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
36839 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
36840 diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
36841 index c709ce6..b3fe620 100644
36842 --- a/drivers/media/dvb/frontends/or51211.c
36843 +++ b/drivers/media/dvb/frontends/or51211.c
36844 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
36845 u8 tudata[585];
36846 int i;
36847
36848 + pax_track_stack();
36849 +
36850 dprintk("Firmware is %zd bytes\n",fw->size);
36851
36852 /* Get eprom data */
36853 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
36854 index 482d0f3..ee1e202 100644
36855 --- a/drivers/media/radio/radio-cadet.c
36856 +++ b/drivers/media/radio/radio-cadet.c
36857 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
36858 while (i < count && dev->rdsin != dev->rdsout)
36859 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
36860
36861 - if (copy_to_user(data, readbuf, i))
36862 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
36863 return -EFAULT;
36864 return i;
36865 }
36866 diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
36867 index 6dd51e2..0359b92 100644
36868 --- a/drivers/media/video/cx18/cx18-driver.c
36869 +++ b/drivers/media/video/cx18/cx18-driver.c
36870 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
36871
36872 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
36873
36874 -static atomic_t cx18_instance = ATOMIC_INIT(0);
36875 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
36876
36877 /* Parameter declarations */
36878 static int cardtype[CX18_MAX_CARDS];
36879 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
36880 struct i2c_client c;
36881 u8 eedata[256];
36882
36883 + pax_track_stack();
36884 +
36885 memset(&c, 0, sizeof(c));
36886 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
36887 c.adapter = &cx->i2c_adap[0];
36888 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
36889 struct cx18 *cx;
36890
36891 /* FIXME - module parameter arrays constrain max instances */
36892 - i = atomic_inc_return(&cx18_instance) - 1;
36893 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
36894 if (i >= CX18_MAX_CARDS) {
36895 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
36896 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
36897 diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
36898 index 463ec34..2f4625a 100644
36899 --- a/drivers/media/video/ivtv/ivtv-driver.c
36900 +++ b/drivers/media/video/ivtv/ivtv-driver.c
36901 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
36902 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
36903
36904 /* ivtv instance counter */
36905 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
36906 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
36907
36908 /* Parameter declarations */
36909 static int cardtype[IVTV_MAX_CARDS];
36910 diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
36911 index 5fc4ac0..652a54a 100644
36912 --- a/drivers/media/video/omap24xxcam.c
36913 +++ b/drivers/media/video/omap24xxcam.c
36914 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
36915 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
36916
36917 do_gettimeofday(&vb->ts);
36918 - vb->field_count = atomic_add_return(2, &fh->field_count);
36919 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
36920 if (csr & csr_error) {
36921 vb->state = VIDEOBUF_ERROR;
36922 if (!atomic_read(&fh->cam->in_reset)) {
36923 diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
36924 index 2ce67f5..cf26a5b 100644
36925 --- a/drivers/media/video/omap24xxcam.h
36926 +++ b/drivers/media/video/omap24xxcam.h
36927 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
36928 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
36929 struct videobuf_queue vbq;
36930 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
36931 - atomic_t field_count; /* field counter for videobuf_buffer */
36932 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
36933 /* accessing cam here doesn't need serialisation: it's constant */
36934 struct omap24xxcam_device *cam;
36935 };
36936 diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36937 index 299afa4..eb47459 100644
36938 --- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36939 +++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36940 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
36941 u8 *eeprom;
36942 struct tveeprom tvdata;
36943
36944 + pax_track_stack();
36945 +
36946 memset(&tvdata,0,sizeof(tvdata));
36947
36948 eeprom = pvr2_eeprom_fetch(hdw);
36949 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36950 index 5b152ff..3320638 100644
36951 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36952 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36953 @@ -195,7 +195,7 @@ struct pvr2_hdw {
36954
36955 /* I2C stuff */
36956 struct i2c_adapter i2c_adap;
36957 - struct i2c_algorithm i2c_algo;
36958 + i2c_algorithm_no_const i2c_algo;
36959 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
36960 int i2c_cx25840_hack_state;
36961 int i2c_linked;
36962 diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
36963 index 1eabff6..8e2313a 100644
36964 --- a/drivers/media/video/saa7134/saa6752hs.c
36965 +++ b/drivers/media/video/saa7134/saa6752hs.c
36966 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
36967 unsigned char localPAT[256];
36968 unsigned char localPMT[256];
36969
36970 + pax_track_stack();
36971 +
36972 /* Set video format - must be done first as it resets other settings */
36973 set_reg8(client, 0x41, h->video_format);
36974
36975 diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
36976 index 9c1d3ac..b1b49e9 100644
36977 --- a/drivers/media/video/saa7164/saa7164-cmd.c
36978 +++ b/drivers/media/video/saa7164/saa7164-cmd.c
36979 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
36980 wait_queue_head_t *q = 0;
36981 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36982
36983 + pax_track_stack();
36984 +
36985 /* While any outstand message on the bus exists... */
36986 do {
36987
36988 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
36989 u8 tmp[512];
36990 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36991
36992 + pax_track_stack();
36993 +
36994 while (loop) {
36995
36996 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
36997 diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
36998 index b085496..cde0270 100644
36999 --- a/drivers/media/video/usbvideo/ibmcam.c
37000 +++ b/drivers/media/video/usbvideo/ibmcam.c
37001 @@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
37002 static int __init ibmcam_init(void)
37003 {
37004 struct usbvideo_cb cbTbl;
37005 - memset(&cbTbl, 0, sizeof(cbTbl));
37006 - cbTbl.probe = ibmcam_probe;
37007 - cbTbl.setupOnOpen = ibmcam_setup_on_open;
37008 - cbTbl.videoStart = ibmcam_video_start;
37009 - cbTbl.videoStop = ibmcam_video_stop;
37010 - cbTbl.processData = ibmcam_ProcessIsocData;
37011 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37012 - cbTbl.adjustPicture = ibmcam_adjust_picture;
37013 - cbTbl.getFPS = ibmcam_calculate_fps;
37014 + memset((void *)&cbTbl, 0, sizeof(cbTbl));
37015 + *(void **)&cbTbl.probe = ibmcam_probe;
37016 + *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
37017 + *(void **)&cbTbl.videoStart = ibmcam_video_start;
37018 + *(void **)&cbTbl.videoStop = ibmcam_video_stop;
37019 + *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
37020 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37021 + *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
37022 + *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
37023 return usbvideo_register(
37024 &cams,
37025 MAX_IBMCAM,
37026 diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
37027 index 31d57f2..600b735 100644
37028 --- a/drivers/media/video/usbvideo/konicawc.c
37029 +++ b/drivers/media/video/usbvideo/konicawc.c
37030 @@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
37031 int error;
37032
37033 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
37034 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37035 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37036
37037 cam->input = input_dev = input_allocate_device();
37038 if (!input_dev) {
37039 @@ -935,16 +935,16 @@ static int __init konicawc_init(void)
37040 struct usbvideo_cb cbTbl;
37041 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
37042 DRIVER_DESC "\n");
37043 - memset(&cbTbl, 0, sizeof(cbTbl));
37044 - cbTbl.probe = konicawc_probe;
37045 - cbTbl.setupOnOpen = konicawc_setup_on_open;
37046 - cbTbl.processData = konicawc_process_isoc;
37047 - cbTbl.getFPS = konicawc_calculate_fps;
37048 - cbTbl.setVideoMode = konicawc_set_video_mode;
37049 - cbTbl.startDataPump = konicawc_start_data;
37050 - cbTbl.stopDataPump = konicawc_stop_data;
37051 - cbTbl.adjustPicture = konicawc_adjust_picture;
37052 - cbTbl.userFree = konicawc_free_uvd;
37053 + memset((void * )&cbTbl, 0, sizeof(cbTbl));
37054 + *(void **)&cbTbl.probe = konicawc_probe;
37055 + *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
37056 + *(void **)&cbTbl.processData = konicawc_process_isoc;
37057 + *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
37058 + *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
37059 + *(void **)&cbTbl.startDataPump = konicawc_start_data;
37060 + *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
37061 + *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
37062 + *(void **)&cbTbl.userFree = konicawc_free_uvd;
37063 return usbvideo_register(
37064 &cams,
37065 MAX_CAMERAS,
37066 diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
37067 index 803d3e4..c4d1b96 100644
37068 --- a/drivers/media/video/usbvideo/quickcam_messenger.c
37069 +++ b/drivers/media/video/usbvideo/quickcam_messenger.c
37070 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
37071 int error;
37072
37073 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
37074 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37075 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37076
37077 cam->input = input_dev = input_allocate_device();
37078 if (!input_dev) {
37079 diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
37080 index fbd1b63..292f9f0 100644
37081 --- a/drivers/media/video/usbvideo/ultracam.c
37082 +++ b/drivers/media/video/usbvideo/ultracam.c
37083 @@ -655,14 +655,14 @@ static int __init ultracam_init(void)
37084 {
37085 struct usbvideo_cb cbTbl;
37086 memset(&cbTbl, 0, sizeof(cbTbl));
37087 - cbTbl.probe = ultracam_probe;
37088 - cbTbl.setupOnOpen = ultracam_setup_on_open;
37089 - cbTbl.videoStart = ultracam_video_start;
37090 - cbTbl.videoStop = ultracam_video_stop;
37091 - cbTbl.processData = ultracam_ProcessIsocData;
37092 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37093 - cbTbl.adjustPicture = ultracam_adjust_picture;
37094 - cbTbl.getFPS = ultracam_calculate_fps;
37095 + *(void **)&cbTbl.probe = ultracam_probe;
37096 + *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
37097 + *(void **)&cbTbl.videoStart = ultracam_video_start;
37098 + *(void **)&cbTbl.videoStop = ultracam_video_stop;
37099 + *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
37100 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37101 + *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
37102 + *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
37103 return usbvideo_register(
37104 &cams,
37105 MAX_CAMERAS,
37106 diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
37107 index dea8b32..34f6878 100644
37108 --- a/drivers/media/video/usbvideo/usbvideo.c
37109 +++ b/drivers/media/video/usbvideo/usbvideo.c
37110 @@ -697,15 +697,15 @@ int usbvideo_register(
37111 __func__, cams, base_size, num_cams);
37112
37113 /* Copy callbacks, apply defaults for those that are not set */
37114 - memmove(&cams->cb, cbTbl, sizeof(cams->cb));
37115 + memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
37116 if (cams->cb.getFrame == NULL)
37117 - cams->cb.getFrame = usbvideo_GetFrame;
37118 + *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
37119 if (cams->cb.disconnect == NULL)
37120 - cams->cb.disconnect = usbvideo_Disconnect;
37121 + *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
37122 if (cams->cb.startDataPump == NULL)
37123 - cams->cb.startDataPump = usbvideo_StartDataPump;
37124 + *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
37125 if (cams->cb.stopDataPump == NULL)
37126 - cams->cb.stopDataPump = usbvideo_StopDataPump;
37127 + *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
37128
37129 cams->num_cameras = num_cams;
37130 cams->cam = (struct uvd *) &cams[1];
37131 diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
37132 index c66985b..7fa143a 100644
37133 --- a/drivers/media/video/usbvideo/usbvideo.h
37134 +++ b/drivers/media/video/usbvideo/usbvideo.h
37135 @@ -268,7 +268,7 @@ struct usbvideo_cb {
37136 int (*startDataPump)(struct uvd *uvd);
37137 void (*stopDataPump)(struct uvd *uvd);
37138 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
37139 -};
37140 +} __no_const;
37141
37142 struct usbvideo {
37143 int num_cameras; /* As allocated */
37144 diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
37145 index e0f91e4..37554ea 100644
37146 --- a/drivers/media/video/usbvision/usbvision-core.c
37147 +++ b/drivers/media/video/usbvision/usbvision-core.c
37148 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
37149 unsigned char rv, gv, bv;
37150 static unsigned char *Y, *U, *V;
37151
37152 + pax_track_stack();
37153 +
37154 frame = usbvision->curFrame;
37155 imageSize = frame->frmwidth * frame->frmheight;
37156 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
37157 diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
37158 index 0d06e7c..3d17d24 100644
37159 --- a/drivers/media/video/v4l2-device.c
37160 +++ b/drivers/media/video/v4l2-device.c
37161 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
37162 EXPORT_SYMBOL_GPL(v4l2_device_register);
37163
37164 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
37165 - atomic_t *instance)
37166 + atomic_unchecked_t *instance)
37167 {
37168 - int num = atomic_inc_return(instance) - 1;
37169 + int num = atomic_inc_return_unchecked(instance) - 1;
37170 int len = strlen(basename);
37171
37172 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
37173 diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
37174 index 032ebae..6a3532c 100644
37175 --- a/drivers/media/video/videobuf-dma-sg.c
37176 +++ b/drivers/media/video/videobuf-dma-sg.c
37177 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
37178 {
37179 struct videobuf_queue q;
37180
37181 + pax_track_stack();
37182 +
37183 /* Required to make generic handler to call __videobuf_alloc */
37184 q.int_ops = &sg_ops;
37185
37186 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
37187 index b6992b7..9fa7547 100644
37188 --- a/drivers/message/fusion/mptbase.c
37189 +++ b/drivers/message/fusion/mptbase.c
37190 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
37191 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
37192 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
37193
37194 +#ifdef CONFIG_GRKERNSEC_HIDESYM
37195 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
37196 + NULL, NULL);
37197 +#else
37198 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
37199 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
37200 +#endif
37201 +
37202 /*
37203 * Rounding UP to nearest 4-kB boundary here...
37204 */
37205 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
37206 index 83873e3..e360e9a 100644
37207 --- a/drivers/message/fusion/mptsas.c
37208 +++ b/drivers/message/fusion/mptsas.c
37209 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
37210 return 0;
37211 }
37212
37213 +static inline void
37214 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
37215 +{
37216 + if (phy_info->port_details) {
37217 + phy_info->port_details->rphy = rphy;
37218 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
37219 + ioc->name, rphy));
37220 + }
37221 +
37222 + if (rphy) {
37223 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
37224 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
37225 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
37226 + ioc->name, rphy, rphy->dev.release));
37227 + }
37228 +}
37229 +
37230 /* no mutex */
37231 static void
37232 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
37233 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
37234 return NULL;
37235 }
37236
37237 -static inline void
37238 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
37239 -{
37240 - if (phy_info->port_details) {
37241 - phy_info->port_details->rphy = rphy;
37242 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
37243 - ioc->name, rphy));
37244 - }
37245 -
37246 - if (rphy) {
37247 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
37248 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
37249 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
37250 - ioc->name, rphy, rphy->dev.release));
37251 - }
37252 -}
37253 -
37254 static inline struct sas_port *
37255 mptsas_get_port(struct mptsas_phyinfo *phy_info)
37256 {
37257 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
37258 index bd096ca..332cf76 100644
37259 --- a/drivers/message/fusion/mptscsih.c
37260 +++ b/drivers/message/fusion/mptscsih.c
37261 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
37262
37263 h = shost_priv(SChost);
37264
37265 - if (h) {
37266 - if (h->info_kbuf == NULL)
37267 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37268 - return h->info_kbuf;
37269 - h->info_kbuf[0] = '\0';
37270 + if (!h)
37271 + return NULL;
37272
37273 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37274 - h->info_kbuf[size-1] = '\0';
37275 - }
37276 + if (h->info_kbuf == NULL)
37277 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37278 + return h->info_kbuf;
37279 + h->info_kbuf[0] = '\0';
37280 +
37281 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37282 + h->info_kbuf[size-1] = '\0';
37283
37284 return h->info_kbuf;
37285 }
37286 diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
37287 index efba702..59b2c0f 100644
37288 --- a/drivers/message/i2o/i2o_config.c
37289 +++ b/drivers/message/i2o/i2o_config.c
37290 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
37291 struct i2o_message *msg;
37292 unsigned int iop;
37293
37294 + pax_track_stack();
37295 +
37296 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
37297 return -EFAULT;
37298
37299 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
37300 index 7045c45..c07b170 100644
37301 --- a/drivers/message/i2o/i2o_proc.c
37302 +++ b/drivers/message/i2o/i2o_proc.c
37303 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
37304 "Array Controller Device"
37305 };
37306
37307 -static char *chtostr(u8 * chars, int n)
37308 -{
37309 - char tmp[256];
37310 - tmp[0] = 0;
37311 - return strncat(tmp, (char *)chars, n);
37312 -}
37313 -
37314 static int i2o_report_query_status(struct seq_file *seq, int block_status,
37315 char *group)
37316 {
37317 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
37318
37319 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
37320 seq_printf(seq, "%-#8x", ddm_table.module_id);
37321 - seq_printf(seq, "%-29s",
37322 - chtostr(ddm_table.module_name_version, 28));
37323 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
37324 seq_printf(seq, "%9d ", ddm_table.data_size);
37325 seq_printf(seq, "%8d", ddm_table.code_size);
37326
37327 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
37328
37329 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
37330 seq_printf(seq, "%-#8x", dst->module_id);
37331 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
37332 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
37333 + seq_printf(seq, "%-.28s", dst->module_name_version);
37334 + seq_printf(seq, "%-.8s", dst->date);
37335 seq_printf(seq, "%8d ", dst->module_size);
37336 seq_printf(seq, "%8d ", dst->mpb_size);
37337 seq_printf(seq, "0x%04x", dst->module_flags);
37338 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
37339 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
37340 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
37341 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
37342 - seq_printf(seq, "Vendor info : %s\n",
37343 - chtostr((u8 *) (work32 + 2), 16));
37344 - seq_printf(seq, "Product info : %s\n",
37345 - chtostr((u8 *) (work32 + 6), 16));
37346 - seq_printf(seq, "Description : %s\n",
37347 - chtostr((u8 *) (work32 + 10), 16));
37348 - seq_printf(seq, "Product rev. : %s\n",
37349 - chtostr((u8 *) (work32 + 14), 8));
37350 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
37351 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
37352 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
37353 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
37354
37355 seq_printf(seq, "Serial number : ");
37356 print_serial_number(seq, (u8 *) (work32 + 16),
37357 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
37358 }
37359
37360 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
37361 - seq_printf(seq, "Module name : %s\n",
37362 - chtostr(result.module_name, 24));
37363 - seq_printf(seq, "Module revision : %s\n",
37364 - chtostr(result.module_rev, 8));
37365 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
37366 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
37367
37368 seq_printf(seq, "Serial number : ");
37369 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
37370 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
37371 return 0;
37372 }
37373
37374 - seq_printf(seq, "Device name : %s\n",
37375 - chtostr(result.device_name, 64));
37376 - seq_printf(seq, "Service name : %s\n",
37377 - chtostr(result.service_name, 64));
37378 - seq_printf(seq, "Physical name : %s\n",
37379 - chtostr(result.physical_location, 64));
37380 - seq_printf(seq, "Instance number : %s\n",
37381 - chtostr(result.instance_number, 4));
37382 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
37383 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
37384 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
37385 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
37386
37387 return 0;
37388 }
37389 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
37390 index 27cf4af..b1205b8 100644
37391 --- a/drivers/message/i2o/iop.c
37392 +++ b/drivers/message/i2o/iop.c
37393 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
37394
37395 spin_lock_irqsave(&c->context_list_lock, flags);
37396
37397 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
37398 - atomic_inc(&c->context_list_counter);
37399 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
37400 + atomic_inc_unchecked(&c->context_list_counter);
37401
37402 - entry->context = atomic_read(&c->context_list_counter);
37403 + entry->context = atomic_read_unchecked(&c->context_list_counter);
37404
37405 list_add(&entry->list, &c->context_list);
37406
37407 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
37408
37409 #if BITS_PER_LONG == 64
37410 spin_lock_init(&c->context_list_lock);
37411 - atomic_set(&c->context_list_counter, 0);
37412 + atomic_set_unchecked(&c->context_list_counter, 0);
37413 INIT_LIST_HEAD(&c->context_list);
37414 #endif
37415
37416 diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
37417 index 78e3e85..66c9a0d 100644
37418 --- a/drivers/mfd/ab3100-core.c
37419 +++ b/drivers/mfd/ab3100-core.c
37420 @@ -777,7 +777,7 @@ struct ab_family_id {
37421 char *name;
37422 };
37423
37424 -static const struct ab_family_id ids[] __initdata = {
37425 +static const struct ab_family_id ids[] __initconst = {
37426 /* AB3100 */
37427 {
37428 .id = 0xc0,
37429 diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
37430 index 8d8c932..8104515 100644
37431 --- a/drivers/mfd/wm8350-i2c.c
37432 +++ b/drivers/mfd/wm8350-i2c.c
37433 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
37434 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
37435 int ret;
37436
37437 + pax_track_stack();
37438 +
37439 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
37440 return -EINVAL;
37441
37442 diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
37443 index e4ff50b..4cc3f04 100644
37444 --- a/drivers/misc/kgdbts.c
37445 +++ b/drivers/misc/kgdbts.c
37446 @@ -118,7 +118,7 @@
37447 } while (0)
37448 #define MAX_CONFIG_LEN 40
37449
37450 -static struct kgdb_io kgdbts_io_ops;
37451 +static const struct kgdb_io kgdbts_io_ops;
37452 static char get_buf[BUFMAX];
37453 static int get_buf_cnt;
37454 static char put_buf[BUFMAX];
37455 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
37456 module_put(THIS_MODULE);
37457 }
37458
37459 -static struct kgdb_io kgdbts_io_ops = {
37460 +static const struct kgdb_io kgdbts_io_ops = {
37461 .name = "kgdbts",
37462 .read_char = kgdbts_get_char,
37463 .write_char = kgdbts_put_char,
37464 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
37465 index 37e7cfc..67cfb76 100644
37466 --- a/drivers/misc/sgi-gru/gruhandles.c
37467 +++ b/drivers/misc/sgi-gru/gruhandles.c
37468 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37469
37470 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
37471 {
37472 - atomic_long_inc(&mcs_op_statistics[op].count);
37473 - atomic_long_add(clks, &mcs_op_statistics[op].total);
37474 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
37475 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
37476 if (mcs_op_statistics[op].max < clks)
37477 mcs_op_statistics[op].max = clks;
37478 }
37479 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
37480 index 3f2375c..467c6e6 100644
37481 --- a/drivers/misc/sgi-gru/gruprocfs.c
37482 +++ b/drivers/misc/sgi-gru/gruprocfs.c
37483 @@ -32,9 +32,9 @@
37484
37485 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
37486
37487 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
37488 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
37489 {
37490 - unsigned long val = atomic_long_read(v);
37491 + unsigned long val = atomic_long_read_unchecked(v);
37492
37493 if (val)
37494 seq_printf(s, "%16lu %s\n", val, id);
37495 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
37496 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
37497
37498 for (op = 0; op < mcsop_last; op++) {
37499 - count = atomic_long_read(&mcs_op_statistics[op].count);
37500 - total = atomic_long_read(&mcs_op_statistics[op].total);
37501 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
37502 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
37503 max = mcs_op_statistics[op].max;
37504 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
37505 count ? total / count : 0, max);
37506 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
37507 index 46990bc..4a251b5 100644
37508 --- a/drivers/misc/sgi-gru/grutables.h
37509 +++ b/drivers/misc/sgi-gru/grutables.h
37510 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
37511 * GRU statistics.
37512 */
37513 struct gru_stats_s {
37514 - atomic_long_t vdata_alloc;
37515 - atomic_long_t vdata_free;
37516 - atomic_long_t gts_alloc;
37517 - atomic_long_t gts_free;
37518 - atomic_long_t vdata_double_alloc;
37519 - atomic_long_t gts_double_allocate;
37520 - atomic_long_t assign_context;
37521 - atomic_long_t assign_context_failed;
37522 - atomic_long_t free_context;
37523 - atomic_long_t load_user_context;
37524 - atomic_long_t load_kernel_context;
37525 - atomic_long_t lock_kernel_context;
37526 - atomic_long_t unlock_kernel_context;
37527 - atomic_long_t steal_user_context;
37528 - atomic_long_t steal_kernel_context;
37529 - atomic_long_t steal_context_failed;
37530 - atomic_long_t nopfn;
37531 - atomic_long_t break_cow;
37532 - atomic_long_t asid_new;
37533 - atomic_long_t asid_next;
37534 - atomic_long_t asid_wrap;
37535 - atomic_long_t asid_reuse;
37536 - atomic_long_t intr;
37537 - atomic_long_t intr_mm_lock_failed;
37538 - atomic_long_t call_os;
37539 - atomic_long_t call_os_offnode_reference;
37540 - atomic_long_t call_os_check_for_bug;
37541 - atomic_long_t call_os_wait_queue;
37542 - atomic_long_t user_flush_tlb;
37543 - atomic_long_t user_unload_context;
37544 - atomic_long_t user_exception;
37545 - atomic_long_t set_context_option;
37546 - atomic_long_t migrate_check;
37547 - atomic_long_t migrated_retarget;
37548 - atomic_long_t migrated_unload;
37549 - atomic_long_t migrated_unload_delay;
37550 - atomic_long_t migrated_nopfn_retarget;
37551 - atomic_long_t migrated_nopfn_unload;
37552 - atomic_long_t tlb_dropin;
37553 - atomic_long_t tlb_dropin_fail_no_asid;
37554 - atomic_long_t tlb_dropin_fail_upm;
37555 - atomic_long_t tlb_dropin_fail_invalid;
37556 - atomic_long_t tlb_dropin_fail_range_active;
37557 - atomic_long_t tlb_dropin_fail_idle;
37558 - atomic_long_t tlb_dropin_fail_fmm;
37559 - atomic_long_t tlb_dropin_fail_no_exception;
37560 - atomic_long_t tlb_dropin_fail_no_exception_war;
37561 - atomic_long_t tfh_stale_on_fault;
37562 - atomic_long_t mmu_invalidate_range;
37563 - atomic_long_t mmu_invalidate_page;
37564 - atomic_long_t mmu_clear_flush_young;
37565 - atomic_long_t flush_tlb;
37566 - atomic_long_t flush_tlb_gru;
37567 - atomic_long_t flush_tlb_gru_tgh;
37568 - atomic_long_t flush_tlb_gru_zero_asid;
37569 + atomic_long_unchecked_t vdata_alloc;
37570 + atomic_long_unchecked_t vdata_free;
37571 + atomic_long_unchecked_t gts_alloc;
37572 + atomic_long_unchecked_t gts_free;
37573 + atomic_long_unchecked_t vdata_double_alloc;
37574 + atomic_long_unchecked_t gts_double_allocate;
37575 + atomic_long_unchecked_t assign_context;
37576 + atomic_long_unchecked_t assign_context_failed;
37577 + atomic_long_unchecked_t free_context;
37578 + atomic_long_unchecked_t load_user_context;
37579 + atomic_long_unchecked_t load_kernel_context;
37580 + atomic_long_unchecked_t lock_kernel_context;
37581 + atomic_long_unchecked_t unlock_kernel_context;
37582 + atomic_long_unchecked_t steal_user_context;
37583 + atomic_long_unchecked_t steal_kernel_context;
37584 + atomic_long_unchecked_t steal_context_failed;
37585 + atomic_long_unchecked_t nopfn;
37586 + atomic_long_unchecked_t break_cow;
37587 + atomic_long_unchecked_t asid_new;
37588 + atomic_long_unchecked_t asid_next;
37589 + atomic_long_unchecked_t asid_wrap;
37590 + atomic_long_unchecked_t asid_reuse;
37591 + atomic_long_unchecked_t intr;
37592 + atomic_long_unchecked_t intr_mm_lock_failed;
37593 + atomic_long_unchecked_t call_os;
37594 + atomic_long_unchecked_t call_os_offnode_reference;
37595 + atomic_long_unchecked_t call_os_check_for_bug;
37596 + atomic_long_unchecked_t call_os_wait_queue;
37597 + atomic_long_unchecked_t user_flush_tlb;
37598 + atomic_long_unchecked_t user_unload_context;
37599 + atomic_long_unchecked_t user_exception;
37600 + atomic_long_unchecked_t set_context_option;
37601 + atomic_long_unchecked_t migrate_check;
37602 + atomic_long_unchecked_t migrated_retarget;
37603 + atomic_long_unchecked_t migrated_unload;
37604 + atomic_long_unchecked_t migrated_unload_delay;
37605 + atomic_long_unchecked_t migrated_nopfn_retarget;
37606 + atomic_long_unchecked_t migrated_nopfn_unload;
37607 + atomic_long_unchecked_t tlb_dropin;
37608 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
37609 + atomic_long_unchecked_t tlb_dropin_fail_upm;
37610 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
37611 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
37612 + atomic_long_unchecked_t tlb_dropin_fail_idle;
37613 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
37614 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
37615 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
37616 + atomic_long_unchecked_t tfh_stale_on_fault;
37617 + atomic_long_unchecked_t mmu_invalidate_range;
37618 + atomic_long_unchecked_t mmu_invalidate_page;
37619 + atomic_long_unchecked_t mmu_clear_flush_young;
37620 + atomic_long_unchecked_t flush_tlb;
37621 + atomic_long_unchecked_t flush_tlb_gru;
37622 + atomic_long_unchecked_t flush_tlb_gru_tgh;
37623 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
37624
37625 - atomic_long_t copy_gpa;
37626 + atomic_long_unchecked_t copy_gpa;
37627
37628 - atomic_long_t mesq_receive;
37629 - atomic_long_t mesq_receive_none;
37630 - atomic_long_t mesq_send;
37631 - atomic_long_t mesq_send_failed;
37632 - atomic_long_t mesq_noop;
37633 - atomic_long_t mesq_send_unexpected_error;
37634 - atomic_long_t mesq_send_lb_overflow;
37635 - atomic_long_t mesq_send_qlimit_reached;
37636 - atomic_long_t mesq_send_amo_nacked;
37637 - atomic_long_t mesq_send_put_nacked;
37638 - atomic_long_t mesq_qf_not_full;
37639 - atomic_long_t mesq_qf_locked;
37640 - atomic_long_t mesq_qf_noop_not_full;
37641 - atomic_long_t mesq_qf_switch_head_failed;
37642 - atomic_long_t mesq_qf_unexpected_error;
37643 - atomic_long_t mesq_noop_unexpected_error;
37644 - atomic_long_t mesq_noop_lb_overflow;
37645 - atomic_long_t mesq_noop_qlimit_reached;
37646 - atomic_long_t mesq_noop_amo_nacked;
37647 - atomic_long_t mesq_noop_put_nacked;
37648 + atomic_long_unchecked_t mesq_receive;
37649 + atomic_long_unchecked_t mesq_receive_none;
37650 + atomic_long_unchecked_t mesq_send;
37651 + atomic_long_unchecked_t mesq_send_failed;
37652 + atomic_long_unchecked_t mesq_noop;
37653 + atomic_long_unchecked_t mesq_send_unexpected_error;
37654 + atomic_long_unchecked_t mesq_send_lb_overflow;
37655 + atomic_long_unchecked_t mesq_send_qlimit_reached;
37656 + atomic_long_unchecked_t mesq_send_amo_nacked;
37657 + atomic_long_unchecked_t mesq_send_put_nacked;
37658 + atomic_long_unchecked_t mesq_qf_not_full;
37659 + atomic_long_unchecked_t mesq_qf_locked;
37660 + atomic_long_unchecked_t mesq_qf_noop_not_full;
37661 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
37662 + atomic_long_unchecked_t mesq_qf_unexpected_error;
37663 + atomic_long_unchecked_t mesq_noop_unexpected_error;
37664 + atomic_long_unchecked_t mesq_noop_lb_overflow;
37665 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
37666 + atomic_long_unchecked_t mesq_noop_amo_nacked;
37667 + atomic_long_unchecked_t mesq_noop_put_nacked;
37668
37669 };
37670
37671 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
37672 cchop_deallocate, tghop_invalidate, mcsop_last};
37673
37674 struct mcs_op_statistic {
37675 - atomic_long_t count;
37676 - atomic_long_t total;
37677 + atomic_long_unchecked_t count;
37678 + atomic_long_unchecked_t total;
37679 unsigned long max;
37680 };
37681
37682 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37683
37684 #define STAT(id) do { \
37685 if (gru_options & OPT_STATS) \
37686 - atomic_long_inc(&gru_stats.id); \
37687 + atomic_long_inc_unchecked(&gru_stats.id); \
37688 } while (0)
37689
37690 #ifdef CONFIG_SGI_GRU_DEBUG
37691 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
37692 index 2275126..12a9dbfb 100644
37693 --- a/drivers/misc/sgi-xp/xp.h
37694 +++ b/drivers/misc/sgi-xp/xp.h
37695 @@ -289,7 +289,7 @@ struct xpc_interface {
37696 xpc_notify_func, void *);
37697 void (*received) (short, int, void *);
37698 enum xp_retval (*partid_to_nasids) (short, void *);
37699 -};
37700 +} __no_const;
37701
37702 extern struct xpc_interface xpc_interface;
37703
37704 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
37705 index b94d5f7..7f494c5 100644
37706 --- a/drivers/misc/sgi-xp/xpc.h
37707 +++ b/drivers/misc/sgi-xp/xpc.h
37708 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
37709 void (*received_payload) (struct xpc_channel *, void *);
37710 void (*notify_senders_of_disconnect) (struct xpc_channel *);
37711 };
37712 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
37713
37714 /* struct xpc_partition act_state values (for XPC HB) */
37715
37716 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
37717 /* found in xpc_main.c */
37718 extern struct device *xpc_part;
37719 extern struct device *xpc_chan;
37720 -extern struct xpc_arch_operations xpc_arch_ops;
37721 +extern xpc_arch_operations_no_const xpc_arch_ops;
37722 extern int xpc_disengage_timelimit;
37723 extern int xpc_disengage_timedout;
37724 extern int xpc_activate_IRQ_rcvd;
37725 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
37726 index fd3688a..7e211a4 100644
37727 --- a/drivers/misc/sgi-xp/xpc_main.c
37728 +++ b/drivers/misc/sgi-xp/xpc_main.c
37729 @@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
37730 .notifier_call = xpc_system_die,
37731 };
37732
37733 -struct xpc_arch_operations xpc_arch_ops;
37734 +xpc_arch_operations_no_const xpc_arch_ops;
37735
37736 /*
37737 * Timer function to enforce the timelimit on the partition disengage.
37738 diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
37739 index 8b70e03..700bda6 100644
37740 --- a/drivers/misc/sgi-xp/xpc_sn2.c
37741 +++ b/drivers/misc/sgi-xp/xpc_sn2.c
37742 @@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
37743 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
37744 }
37745
37746 -static struct xpc_arch_operations xpc_arch_ops_sn2 = {
37747 +static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
37748 .setup_partitions = xpc_setup_partitions_sn2,
37749 .teardown_partitions = xpc_teardown_partitions_sn2,
37750 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
37751 @@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
37752 int ret;
37753 size_t buf_size;
37754
37755 - xpc_arch_ops = xpc_arch_ops_sn2;
37756 + pax_open_kernel();
37757 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
37758 + pax_close_kernel();
37759
37760 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
37761 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
37762 diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
37763 index 8e08d71..7cb8c9b 100644
37764 --- a/drivers/misc/sgi-xp/xpc_uv.c
37765 +++ b/drivers/misc/sgi-xp/xpc_uv.c
37766 @@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
37767 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
37768 }
37769
37770 -static struct xpc_arch_operations xpc_arch_ops_uv = {
37771 +static const struct xpc_arch_operations xpc_arch_ops_uv = {
37772 .setup_partitions = xpc_setup_partitions_uv,
37773 .teardown_partitions = xpc_teardown_partitions_uv,
37774 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
37775 @@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
37776 int
37777 xpc_init_uv(void)
37778 {
37779 - xpc_arch_ops = xpc_arch_ops_uv;
37780 + pax_open_kernel();
37781 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
37782 + pax_close_kernel();
37783
37784 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
37785 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
37786 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
37787 index 6fd20b42..650efe3 100644
37788 --- a/drivers/mmc/host/sdhci-pci.c
37789 +++ b/drivers/mmc/host/sdhci-pci.c
37790 @@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
37791 .probe = via_probe,
37792 };
37793
37794 -static const struct pci_device_id pci_ids[] __devinitdata = {
37795 +static const struct pci_device_id pci_ids[] __devinitconst = {
37796 {
37797 .vendor = PCI_VENDOR_ID_RICOH,
37798 .device = PCI_DEVICE_ID_RICOH_R5C822,
37799 diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
37800 index e7563a9..5f90ce5 100644
37801 --- a/drivers/mtd/chips/cfi_cmdset_0001.c
37802 +++ b/drivers/mtd/chips/cfi_cmdset_0001.c
37803 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
37804 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
37805 unsigned long timeo = jiffies + HZ;
37806
37807 + pax_track_stack();
37808 +
37809 /* Prevent setting state FL_SYNCING for chip in suspended state. */
37810 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
37811 goto sleep;
37812 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
37813 unsigned long initial_adr;
37814 int initial_len = len;
37815
37816 + pax_track_stack();
37817 +
37818 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
37819 adr += chip->start;
37820 initial_adr = adr;
37821 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
37822 int retries = 3;
37823 int ret;
37824
37825 + pax_track_stack();
37826 +
37827 adr += chip->start;
37828
37829 retry:
37830 diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
37831 index 0667a67..3ab97ed 100644
37832 --- a/drivers/mtd/chips/cfi_cmdset_0020.c
37833 +++ b/drivers/mtd/chips/cfi_cmdset_0020.c
37834 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
37835 unsigned long cmd_addr;
37836 struct cfi_private *cfi = map->fldrv_priv;
37837
37838 + pax_track_stack();
37839 +
37840 adr += chip->start;
37841
37842 /* Ensure cmd read/writes are aligned. */
37843 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
37844 DECLARE_WAITQUEUE(wait, current);
37845 int wbufsize, z;
37846
37847 + pax_track_stack();
37848 +
37849 /* M58LW064A requires bus alignment for buffer wriets -- saw */
37850 if (adr & (map_bankwidth(map)-1))
37851 return -EINVAL;
37852 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
37853 DECLARE_WAITQUEUE(wait, current);
37854 int ret = 0;
37855
37856 + pax_track_stack();
37857 +
37858 adr += chip->start;
37859
37860 /* Let's determine this according to the interleave only once */
37861 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
37862 unsigned long timeo = jiffies + HZ;
37863 DECLARE_WAITQUEUE(wait, current);
37864
37865 + pax_track_stack();
37866 +
37867 adr += chip->start;
37868
37869 /* Let's determine this according to the interleave only once */
37870 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
37871 unsigned long timeo = jiffies + HZ;
37872 DECLARE_WAITQUEUE(wait, current);
37873
37874 + pax_track_stack();
37875 +
37876 adr += chip->start;
37877
37878 /* Let's determine this according to the interleave only once */
37879 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
37880 index 5bf5f46..c5de373 100644
37881 --- a/drivers/mtd/devices/doc2000.c
37882 +++ b/drivers/mtd/devices/doc2000.c
37883 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
37884
37885 /* The ECC will not be calculated correctly if less than 512 is written */
37886 /* DBB-
37887 - if (len != 0x200 && eccbuf)
37888 + if (len != 0x200)
37889 printk(KERN_WARNING
37890 "ECC needs a full sector write (adr: %lx size %lx)\n",
37891 (long) to, (long) len);
37892 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
37893 index 0990f78..bb4e8a4 100644
37894 --- a/drivers/mtd/devices/doc2001.c
37895 +++ b/drivers/mtd/devices/doc2001.c
37896 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
37897 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
37898
37899 /* Don't allow read past end of device */
37900 - if (from >= this->totlen)
37901 + if (from >= this->totlen || !len)
37902 return -EINVAL;
37903
37904 /* Don't allow a single read to cross a 512-byte block boundary */
37905 diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
37906 index e56d6b4..f07e6cf 100644
37907 --- a/drivers/mtd/ftl.c
37908 +++ b/drivers/mtd/ftl.c
37909 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
37910 loff_t offset;
37911 uint16_t srcunitswap = cpu_to_le16(srcunit);
37912
37913 + pax_track_stack();
37914 +
37915 eun = &part->EUNInfo[srcunit];
37916 xfer = &part->XferInfo[xferunit];
37917 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
37918 diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
37919 index 8aca552..146446e 100755
37920 --- a/drivers/mtd/inftlcore.c
37921 +++ b/drivers/mtd/inftlcore.c
37922 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
37923 struct inftl_oob oob;
37924 size_t retlen;
37925
37926 + pax_track_stack();
37927 +
37928 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
37929 "pending=%d)\n", inftl, thisVUC, pendingblock);
37930
37931 diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
37932 index 32e82ae..ed50953 100644
37933 --- a/drivers/mtd/inftlmount.c
37934 +++ b/drivers/mtd/inftlmount.c
37935 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
37936 struct INFTLPartition *ip;
37937 size_t retlen;
37938
37939 + pax_track_stack();
37940 +
37941 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
37942
37943 /*
37944 diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
37945 index 79bf40f..fe5f8fd 100644
37946 --- a/drivers/mtd/lpddr/qinfo_probe.c
37947 +++ b/drivers/mtd/lpddr/qinfo_probe.c
37948 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
37949 {
37950 map_word pfow_val[4];
37951
37952 + pax_track_stack();
37953 +
37954 /* Check identification string */
37955 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
37956 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
37957 diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
37958 index 726a1b8..f46b460 100644
37959 --- a/drivers/mtd/mtdchar.c
37960 +++ b/drivers/mtd/mtdchar.c
37961 @@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
37962 u_long size;
37963 struct mtd_info_user info;
37964
37965 + pax_track_stack();
37966 +
37967 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
37968
37969 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
37970 diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
37971 index 1002e18..26d82d5 100644
37972 --- a/drivers/mtd/nftlcore.c
37973 +++ b/drivers/mtd/nftlcore.c
37974 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
37975 int inplace = 1;
37976 size_t retlen;
37977
37978 + pax_track_stack();
37979 +
37980 memset(BlockMap, 0xff, sizeof(BlockMap));
37981 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
37982
37983 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
37984 index 8b22b18..6fada85 100644
37985 --- a/drivers/mtd/nftlmount.c
37986 +++ b/drivers/mtd/nftlmount.c
37987 @@ -23,6 +23,7 @@
37988 #include <asm/errno.h>
37989 #include <linux/delay.h>
37990 #include <linux/slab.h>
37991 +#include <linux/sched.h>
37992 #include <linux/mtd/mtd.h>
37993 #include <linux/mtd/nand.h>
37994 #include <linux/mtd/nftl.h>
37995 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
37996 struct mtd_info *mtd = nftl->mbd.mtd;
37997 unsigned int i;
37998
37999 + pax_track_stack();
38000 +
38001 /* Assume logical EraseSize == physical erasesize for starting the scan.
38002 We'll sort it out later if we find a MediaHeader which says otherwise */
38003 /* Actually, we won't. The new DiskOnChip driver has already scanned
38004 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
38005 index 14cec04..d775b87 100644
38006 --- a/drivers/mtd/ubi/build.c
38007 +++ b/drivers/mtd/ubi/build.c
38008 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
38009 static int __init bytes_str_to_int(const char *str)
38010 {
38011 char *endp;
38012 - unsigned long result;
38013 + unsigned long result, scale = 1;
38014
38015 result = simple_strtoul(str, &endp, 0);
38016 if (str == endp || result >= INT_MAX) {
38017 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
38018
38019 switch (*endp) {
38020 case 'G':
38021 - result *= 1024;
38022 + scale *= 1024;
38023 case 'M':
38024 - result *= 1024;
38025 + scale *= 1024;
38026 case 'K':
38027 - result *= 1024;
38028 + scale *= 1024;
38029 if (endp[1] == 'i' && endp[2] == 'B')
38030 endp += 2;
38031 case '\0':
38032 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
38033 return -EINVAL;
38034 }
38035
38036 - return result;
38037 + if ((intoverflow_t)result*scale >= INT_MAX) {
38038 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
38039 + str);
38040 + return -EINVAL;
38041 + }
38042 +
38043 + return result*scale;
38044 }
38045
38046 /**
38047 diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
38048 index ab68886..ca405e8 100644
38049 --- a/drivers/net/atlx/atl2.c
38050 +++ b/drivers/net/atlx/atl2.c
38051 @@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
38052 */
38053
38054 #define ATL2_PARAM(X, desc) \
38055 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
38056 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
38057 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
38058 MODULE_PARM_DESC(X, desc);
38059 #else
38060 diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
38061 index 4874b2b..67f8526 100644
38062 --- a/drivers/net/bnx2.c
38063 +++ b/drivers/net/bnx2.c
38064 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
38065 int rc = 0;
38066 u32 magic, csum;
38067
38068 + pax_track_stack();
38069 +
38070 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
38071 goto test_nvram_done;
38072
38073 diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
38074 index fd3eb07..8a6978d 100644
38075 --- a/drivers/net/cxgb3/l2t.h
38076 +++ b/drivers/net/cxgb3/l2t.h
38077 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
38078 */
38079 struct l2t_skb_cb {
38080 arp_failure_handler_func arp_failure_handler;
38081 -};
38082 +} __no_const;
38083
38084 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
38085
38086 diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
38087 index 032cfe0..411af379 100644
38088 --- a/drivers/net/cxgb3/t3_hw.c
38089 +++ b/drivers/net/cxgb3/t3_hw.c
38090 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
38091 int i, addr, ret;
38092 struct t3_vpd vpd;
38093
38094 + pax_track_stack();
38095 +
38096 /*
38097 * Card information is normally at VPD_BASE but some early cards had
38098 * it at 0.
38099 diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
38100 index d1e0563..b9e129c 100644
38101 --- a/drivers/net/e1000e/82571.c
38102 +++ b/drivers/net/e1000e/82571.c
38103 @@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
38104 {
38105 struct e1000_hw *hw = &adapter->hw;
38106 struct e1000_mac_info *mac = &hw->mac;
38107 - struct e1000_mac_operations *func = &mac->ops;
38108 + e1000_mac_operations_no_const *func = &mac->ops;
38109 u32 swsm = 0;
38110 u32 swsm2 = 0;
38111 bool force_clear_smbi = false;
38112 @@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
38113 temp = er32(ICRXDMTC);
38114 }
38115
38116 -static struct e1000_mac_operations e82571_mac_ops = {
38117 +static const struct e1000_mac_operations e82571_mac_ops = {
38118 /* .check_mng_mode: mac type dependent */
38119 /* .check_for_link: media type dependent */
38120 .id_led_init = e1000e_id_led_init,
38121 @@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
38122 .setup_led = e1000e_setup_led_generic,
38123 };
38124
38125 -static struct e1000_phy_operations e82_phy_ops_igp = {
38126 +static const struct e1000_phy_operations e82_phy_ops_igp = {
38127 .acquire_phy = e1000_get_hw_semaphore_82571,
38128 .check_reset_block = e1000e_check_reset_block_generic,
38129 .commit_phy = NULL,
38130 @@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
38131 .cfg_on_link_up = NULL,
38132 };
38133
38134 -static struct e1000_phy_operations e82_phy_ops_m88 = {
38135 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
38136 .acquire_phy = e1000_get_hw_semaphore_82571,
38137 .check_reset_block = e1000e_check_reset_block_generic,
38138 .commit_phy = e1000e_phy_sw_reset,
38139 @@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
38140 .cfg_on_link_up = NULL,
38141 };
38142
38143 -static struct e1000_phy_operations e82_phy_ops_bm = {
38144 +static const struct e1000_phy_operations e82_phy_ops_bm = {
38145 .acquire_phy = e1000_get_hw_semaphore_82571,
38146 .check_reset_block = e1000e_check_reset_block_generic,
38147 .commit_phy = e1000e_phy_sw_reset,
38148 @@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
38149 .cfg_on_link_up = NULL,
38150 };
38151
38152 -static struct e1000_nvm_operations e82571_nvm_ops = {
38153 +static const struct e1000_nvm_operations e82571_nvm_ops = {
38154 .acquire_nvm = e1000_acquire_nvm_82571,
38155 .read_nvm = e1000e_read_nvm_eerd,
38156 .release_nvm = e1000_release_nvm_82571,
38157 diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
38158 index 47db9bd..fa58ccd 100644
38159 --- a/drivers/net/e1000e/e1000.h
38160 +++ b/drivers/net/e1000e/e1000.h
38161 @@ -375,9 +375,9 @@ struct e1000_info {
38162 u32 pba;
38163 u32 max_hw_frame_size;
38164 s32 (*get_variants)(struct e1000_adapter *);
38165 - struct e1000_mac_operations *mac_ops;
38166 - struct e1000_phy_operations *phy_ops;
38167 - struct e1000_nvm_operations *nvm_ops;
38168 + const struct e1000_mac_operations *mac_ops;
38169 + const struct e1000_phy_operations *phy_ops;
38170 + const struct e1000_nvm_operations *nvm_ops;
38171 };
38172
38173 /* hardware capability, feature, and workaround flags */
38174 diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
38175 index ae5d736..e9a93a1 100644
38176 --- a/drivers/net/e1000e/es2lan.c
38177 +++ b/drivers/net/e1000e/es2lan.c
38178 @@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
38179 {
38180 struct e1000_hw *hw = &adapter->hw;
38181 struct e1000_mac_info *mac = &hw->mac;
38182 - struct e1000_mac_operations *func = &mac->ops;
38183 + e1000_mac_operations_no_const *func = &mac->ops;
38184
38185 /* Set media type */
38186 switch (adapter->pdev->device) {
38187 @@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
38188 temp = er32(ICRXDMTC);
38189 }
38190
38191 -static struct e1000_mac_operations es2_mac_ops = {
38192 +static const struct e1000_mac_operations es2_mac_ops = {
38193 .id_led_init = e1000e_id_led_init,
38194 .check_mng_mode = e1000e_check_mng_mode_generic,
38195 /* check_for_link dependent on media type */
38196 @@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
38197 .setup_led = e1000e_setup_led_generic,
38198 };
38199
38200 -static struct e1000_phy_operations es2_phy_ops = {
38201 +static const struct e1000_phy_operations es2_phy_ops = {
38202 .acquire_phy = e1000_acquire_phy_80003es2lan,
38203 .check_reset_block = e1000e_check_reset_block_generic,
38204 .commit_phy = e1000e_phy_sw_reset,
38205 @@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
38206 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
38207 };
38208
38209 -static struct e1000_nvm_operations es2_nvm_ops = {
38210 +static const struct e1000_nvm_operations es2_nvm_ops = {
38211 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
38212 .read_nvm = e1000e_read_nvm_eerd,
38213 .release_nvm = e1000_release_nvm_80003es2lan,
38214 diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
38215 index 11f3b7c..6381887 100644
38216 --- a/drivers/net/e1000e/hw.h
38217 +++ b/drivers/net/e1000e/hw.h
38218 @@ -753,6 +753,7 @@ struct e1000_mac_operations {
38219 s32 (*setup_physical_interface)(struct e1000_hw *);
38220 s32 (*setup_led)(struct e1000_hw *);
38221 };
38222 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38223
38224 /* Function pointers for the PHY. */
38225 struct e1000_phy_operations {
38226 @@ -774,6 +775,7 @@ struct e1000_phy_operations {
38227 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
38228 s32 (*cfg_on_link_up)(struct e1000_hw *);
38229 };
38230 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38231
38232 /* Function pointers for the NVM. */
38233 struct e1000_nvm_operations {
38234 @@ -785,9 +787,10 @@ struct e1000_nvm_operations {
38235 s32 (*validate_nvm)(struct e1000_hw *);
38236 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
38237 };
38238 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38239
38240 struct e1000_mac_info {
38241 - struct e1000_mac_operations ops;
38242 + e1000_mac_operations_no_const ops;
38243
38244 u8 addr[6];
38245 u8 perm_addr[6];
38246 @@ -823,7 +826,7 @@ struct e1000_mac_info {
38247 };
38248
38249 struct e1000_phy_info {
38250 - struct e1000_phy_operations ops;
38251 + e1000_phy_operations_no_const ops;
38252
38253 enum e1000_phy_type type;
38254
38255 @@ -857,7 +860,7 @@ struct e1000_phy_info {
38256 };
38257
38258 struct e1000_nvm_info {
38259 - struct e1000_nvm_operations ops;
38260 + e1000_nvm_operations_no_const ops;
38261
38262 enum e1000_nvm_type type;
38263 enum e1000_nvm_override override;
38264 diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
38265 index de39f9a..e28d3e0 100644
38266 --- a/drivers/net/e1000e/ich8lan.c
38267 +++ b/drivers/net/e1000e/ich8lan.c
38268 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
38269 }
38270 }
38271
38272 -static struct e1000_mac_operations ich8_mac_ops = {
38273 +static const struct e1000_mac_operations ich8_mac_ops = {
38274 .id_led_init = e1000e_id_led_init,
38275 .check_mng_mode = e1000_check_mng_mode_ich8lan,
38276 .check_for_link = e1000_check_for_copper_link_ich8lan,
38277 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
38278 /* id_led_init dependent on mac type */
38279 };
38280
38281 -static struct e1000_phy_operations ich8_phy_ops = {
38282 +static const struct e1000_phy_operations ich8_phy_ops = {
38283 .acquire_phy = e1000_acquire_swflag_ich8lan,
38284 .check_reset_block = e1000_check_reset_block_ich8lan,
38285 .commit_phy = NULL,
38286 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
38287 .write_phy_reg = e1000e_write_phy_reg_igp,
38288 };
38289
38290 -static struct e1000_nvm_operations ich8_nvm_ops = {
38291 +static const struct e1000_nvm_operations ich8_nvm_ops = {
38292 .acquire_nvm = e1000_acquire_nvm_ich8lan,
38293 .read_nvm = e1000_read_nvm_ich8lan,
38294 .release_nvm = e1000_release_nvm_ich8lan,
38295 diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
38296 index 18d5fbb..542d96d 100644
38297 --- a/drivers/net/fealnx.c
38298 +++ b/drivers/net/fealnx.c
38299 @@ -151,7 +151,7 @@ struct chip_info {
38300 int flags;
38301 };
38302
38303 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
38304 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
38305 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38306 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
38307 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38308 diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
38309 index 0e5b54b..b503f82 100644
38310 --- a/drivers/net/hamradio/6pack.c
38311 +++ b/drivers/net/hamradio/6pack.c
38312 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
38313 unsigned char buf[512];
38314 int count1;
38315
38316 + pax_track_stack();
38317 +
38318 if (!count)
38319 return;
38320
38321 diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
38322 index 5862282..7cce8cb 100644
38323 --- a/drivers/net/ibmveth.c
38324 +++ b/drivers/net/ibmveth.c
38325 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
38326 NULL,
38327 };
38328
38329 -static struct sysfs_ops veth_pool_ops = {
38330 +static const struct sysfs_ops veth_pool_ops = {
38331 .show = veth_pool_show,
38332 .store = veth_pool_store,
38333 };
38334 diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
38335 index d617f2d..57b5309 100644
38336 --- a/drivers/net/igb/e1000_82575.c
38337 +++ b/drivers/net/igb/e1000_82575.c
38338 @@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
38339 wr32(E1000_VT_CTL, vt_ctl);
38340 }
38341
38342 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
38343 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
38344 .reset_hw = igb_reset_hw_82575,
38345 .init_hw = igb_init_hw_82575,
38346 .check_for_link = igb_check_for_link_82575,
38347 @@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
38348 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
38349 };
38350
38351 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
38352 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
38353 .acquire = igb_acquire_phy_82575,
38354 .get_cfg_done = igb_get_cfg_done_82575,
38355 .release = igb_release_phy_82575,
38356 };
38357
38358 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38359 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38360 .acquire = igb_acquire_nvm_82575,
38361 .read = igb_read_nvm_eerd,
38362 .release = igb_release_nvm_82575,
38363 diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
38364 index 72081df..d855cf5 100644
38365 --- a/drivers/net/igb/e1000_hw.h
38366 +++ b/drivers/net/igb/e1000_hw.h
38367 @@ -288,6 +288,7 @@ struct e1000_mac_operations {
38368 s32 (*read_mac_addr)(struct e1000_hw *);
38369 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
38370 };
38371 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38372
38373 struct e1000_phy_operations {
38374 s32 (*acquire)(struct e1000_hw *);
38375 @@ -303,6 +304,7 @@ struct e1000_phy_operations {
38376 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
38377 s32 (*write_reg)(struct e1000_hw *, u32, u16);
38378 };
38379 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38380
38381 struct e1000_nvm_operations {
38382 s32 (*acquire)(struct e1000_hw *);
38383 @@ -310,6 +312,7 @@ struct e1000_nvm_operations {
38384 void (*release)(struct e1000_hw *);
38385 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
38386 };
38387 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38388
38389 struct e1000_info {
38390 s32 (*get_invariants)(struct e1000_hw *);
38391 @@ -321,7 +324,7 @@ struct e1000_info {
38392 extern const struct e1000_info e1000_82575_info;
38393
38394 struct e1000_mac_info {
38395 - struct e1000_mac_operations ops;
38396 + e1000_mac_operations_no_const ops;
38397
38398 u8 addr[6];
38399 u8 perm_addr[6];
38400 @@ -365,7 +368,7 @@ struct e1000_mac_info {
38401 };
38402
38403 struct e1000_phy_info {
38404 - struct e1000_phy_operations ops;
38405 + e1000_phy_operations_no_const ops;
38406
38407 enum e1000_phy_type type;
38408
38409 @@ -400,7 +403,7 @@ struct e1000_phy_info {
38410 };
38411
38412 struct e1000_nvm_info {
38413 - struct e1000_nvm_operations ops;
38414 + e1000_nvm_operations_no_const ops;
38415
38416 enum e1000_nvm_type type;
38417 enum e1000_nvm_override override;
38418 @@ -446,6 +449,7 @@ struct e1000_mbx_operations {
38419 s32 (*check_for_ack)(struct e1000_hw *, u16);
38420 s32 (*check_for_rst)(struct e1000_hw *, u16);
38421 };
38422 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38423
38424 struct e1000_mbx_stats {
38425 u32 msgs_tx;
38426 @@ -457,7 +461,7 @@ struct e1000_mbx_stats {
38427 };
38428
38429 struct e1000_mbx_info {
38430 - struct e1000_mbx_operations ops;
38431 + e1000_mbx_operations_no_const ops;
38432 struct e1000_mbx_stats stats;
38433 u32 timeout;
38434 u32 usec_delay;
38435 diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
38436 index 1e8ce37..549c453 100644
38437 --- a/drivers/net/igbvf/vf.h
38438 +++ b/drivers/net/igbvf/vf.h
38439 @@ -187,9 +187,10 @@ struct e1000_mac_operations {
38440 s32 (*read_mac_addr)(struct e1000_hw *);
38441 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
38442 };
38443 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38444
38445 struct e1000_mac_info {
38446 - struct e1000_mac_operations ops;
38447 + e1000_mac_operations_no_const ops;
38448 u8 addr[6];
38449 u8 perm_addr[6];
38450
38451 @@ -211,6 +212,7 @@ struct e1000_mbx_operations {
38452 s32 (*check_for_ack)(struct e1000_hw *);
38453 s32 (*check_for_rst)(struct e1000_hw *);
38454 };
38455 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38456
38457 struct e1000_mbx_stats {
38458 u32 msgs_tx;
38459 @@ -222,7 +224,7 @@ struct e1000_mbx_stats {
38460 };
38461
38462 struct e1000_mbx_info {
38463 - struct e1000_mbx_operations ops;
38464 + e1000_mbx_operations_no_const ops;
38465 struct e1000_mbx_stats stats;
38466 u32 timeout;
38467 u32 usec_delay;
38468 diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
38469 index aa7286b..a61394f 100644
38470 --- a/drivers/net/iseries_veth.c
38471 +++ b/drivers/net/iseries_veth.c
38472 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
38473 NULL
38474 };
38475
38476 -static struct sysfs_ops veth_cnx_sysfs_ops = {
38477 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
38478 .show = veth_cnx_attribute_show
38479 };
38480
38481 @@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
38482 NULL
38483 };
38484
38485 -static struct sysfs_ops veth_port_sysfs_ops = {
38486 +static const struct sysfs_ops veth_port_sysfs_ops = {
38487 .show = veth_port_attribute_show
38488 };
38489
38490 diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
38491 index 8aa44dc..fa1e797 100644
38492 --- a/drivers/net/ixgb/ixgb_main.c
38493 +++ b/drivers/net/ixgb/ixgb_main.c
38494 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
38495 u32 rctl;
38496 int i;
38497
38498 + pax_track_stack();
38499 +
38500 /* Check for Promiscuous and All Multicast modes */
38501
38502 rctl = IXGB_READ_REG(hw, RCTL);
38503 diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
38504 index af35e1d..8781785 100644
38505 --- a/drivers/net/ixgb/ixgb_param.c
38506 +++ b/drivers/net/ixgb/ixgb_param.c
38507 @@ -260,6 +260,9 @@ void __devinit
38508 ixgb_check_options(struct ixgb_adapter *adapter)
38509 {
38510 int bd = adapter->bd_number;
38511 +
38512 + pax_track_stack();
38513 +
38514 if (bd >= IXGB_MAX_NIC) {
38515 printk(KERN_NOTICE
38516 "Warning: no configuration for board #%i\n", bd);
38517 diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
38518 index b17aa73..ed74540 100644
38519 --- a/drivers/net/ixgbe/ixgbe_type.h
38520 +++ b/drivers/net/ixgbe/ixgbe_type.h
38521 @@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
38522 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
38523 s32 (*update_checksum)(struct ixgbe_hw *);
38524 };
38525 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
38526
38527 struct ixgbe_mac_operations {
38528 s32 (*init_hw)(struct ixgbe_hw *);
38529 @@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
38530 /* Flow Control */
38531 s32 (*fc_enable)(struct ixgbe_hw *, s32);
38532 };
38533 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
38534
38535 struct ixgbe_phy_operations {
38536 s32 (*identify)(struct ixgbe_hw *);
38537 @@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
38538 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
38539 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
38540 };
38541 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
38542
38543 struct ixgbe_eeprom_info {
38544 - struct ixgbe_eeprom_operations ops;
38545 + ixgbe_eeprom_operations_no_const ops;
38546 enum ixgbe_eeprom_type type;
38547 u32 semaphore_delay;
38548 u16 word_size;
38549 @@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
38550 };
38551
38552 struct ixgbe_mac_info {
38553 - struct ixgbe_mac_operations ops;
38554 + ixgbe_mac_operations_no_const ops;
38555 enum ixgbe_mac_type type;
38556 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38557 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38558 @@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
38559 };
38560
38561 struct ixgbe_phy_info {
38562 - struct ixgbe_phy_operations ops;
38563 + ixgbe_phy_operations_no_const ops;
38564 struct mdio_if_info mdio;
38565 enum ixgbe_phy_type type;
38566 u32 id;
38567 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
38568 index 291a505..2543756 100644
38569 --- a/drivers/net/mlx4/main.c
38570 +++ b/drivers/net/mlx4/main.c
38571 @@ -38,6 +38,7 @@
38572 #include <linux/errno.h>
38573 #include <linux/pci.h>
38574 #include <linux/dma-mapping.h>
38575 +#include <linux/sched.h>
38576
38577 #include <linux/mlx4/device.h>
38578 #include <linux/mlx4/doorbell.h>
38579 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
38580 u64 icm_size;
38581 int err;
38582
38583 + pax_track_stack();
38584 +
38585 err = mlx4_QUERY_FW(dev);
38586 if (err) {
38587 if (err == -EACCES)
38588 diff --git a/drivers/net/niu.c b/drivers/net/niu.c
38589 index 2dce134..fa5ce75 100644
38590 --- a/drivers/net/niu.c
38591 +++ b/drivers/net/niu.c
38592 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
38593 int i, num_irqs, err;
38594 u8 first_ldg;
38595
38596 + pax_track_stack();
38597 +
38598 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
38599 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
38600 ldg_num_map[i] = first_ldg + i;
38601 diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
38602 index c1b3f09..97cd8c4 100644
38603 --- a/drivers/net/pcnet32.c
38604 +++ b/drivers/net/pcnet32.c
38605 @@ -79,7 +79,7 @@ static int cards_found;
38606 /*
38607 * VLB I/O addresses
38608 */
38609 -static unsigned int pcnet32_portlist[] __initdata =
38610 +static unsigned int pcnet32_portlist[] __devinitdata =
38611 { 0x300, 0x320, 0x340, 0x360, 0 };
38612
38613 static int pcnet32_debug = 0;
38614 @@ -267,7 +267,7 @@ struct pcnet32_private {
38615 struct sk_buff **rx_skbuff;
38616 dma_addr_t *tx_dma_addr;
38617 dma_addr_t *rx_dma_addr;
38618 - struct pcnet32_access a;
38619 + struct pcnet32_access *a;
38620 spinlock_t lock; /* Guard lock */
38621 unsigned int cur_rx, cur_tx; /* The next free ring entry */
38622 unsigned int rx_ring_size; /* current rx ring size */
38623 @@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
38624 u16 val;
38625
38626 netif_wake_queue(dev);
38627 - val = lp->a.read_csr(ioaddr, CSR3);
38628 + val = lp->a->read_csr(ioaddr, CSR3);
38629 val &= 0x00ff;
38630 - lp->a.write_csr(ioaddr, CSR3, val);
38631 + lp->a->write_csr(ioaddr, CSR3, val);
38632 napi_enable(&lp->napi);
38633 }
38634
38635 @@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
38636 r = mii_link_ok(&lp->mii_if);
38637 } else if (lp->chip_version >= PCNET32_79C970A) {
38638 ulong ioaddr = dev->base_addr; /* card base I/O address */
38639 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38640 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38641 } else { /* can not detect link on really old chips */
38642 r = 1;
38643 }
38644 @@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
38645 pcnet32_netif_stop(dev);
38646
38647 spin_lock_irqsave(&lp->lock, flags);
38648 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38649 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38650
38651 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
38652
38653 @@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
38654 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38655 {
38656 struct pcnet32_private *lp = netdev_priv(dev);
38657 - struct pcnet32_access *a = &lp->a; /* access to registers */
38658 + struct pcnet32_access *a = lp->a; /* access to registers */
38659 ulong ioaddr = dev->base_addr; /* card base I/O address */
38660 struct sk_buff *skb; /* sk buff */
38661 int x, i; /* counters */
38662 @@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38663 pcnet32_netif_stop(dev);
38664
38665 spin_lock_irqsave(&lp->lock, flags);
38666 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38667 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38668
38669 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
38670
38671 /* Reset the PCNET32 */
38672 - lp->a.reset(ioaddr);
38673 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38674 + lp->a->reset(ioaddr);
38675 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38676
38677 /* switch pcnet32 to 32bit mode */
38678 - lp->a.write_bcr(ioaddr, 20, 2);
38679 + lp->a->write_bcr(ioaddr, 20, 2);
38680
38681 /* purge & init rings but don't actually restart */
38682 pcnet32_restart(dev, 0x0000);
38683
38684 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38685 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38686
38687 /* Initialize Transmit buffers. */
38688 size = data_len + 15;
38689 @@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38690
38691 /* set int loopback in CSR15 */
38692 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
38693 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
38694 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
38695
38696 teststatus = cpu_to_le16(0x8000);
38697 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38698 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38699
38700 /* Check status of descriptors */
38701 for (x = 0; x < numbuffs; x++) {
38702 @@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38703 }
38704 }
38705
38706 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38707 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38708 wmb();
38709 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
38710 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
38711 @@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38712 pcnet32_restart(dev, CSR0_NORMAL);
38713 } else {
38714 pcnet32_purge_rx_ring(dev);
38715 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38716 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38717 }
38718 spin_unlock_irqrestore(&lp->lock, flags);
38719
38720 @@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38721 static void pcnet32_led_blink_callback(struct net_device *dev)
38722 {
38723 struct pcnet32_private *lp = netdev_priv(dev);
38724 - struct pcnet32_access *a = &lp->a;
38725 + struct pcnet32_access *a = lp->a;
38726 ulong ioaddr = dev->base_addr;
38727 unsigned long flags;
38728 int i;
38729 @@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
38730 static int pcnet32_phys_id(struct net_device *dev, u32 data)
38731 {
38732 struct pcnet32_private *lp = netdev_priv(dev);
38733 - struct pcnet32_access *a = &lp->a;
38734 + struct pcnet32_access *a = lp->a;
38735 ulong ioaddr = dev->base_addr;
38736 unsigned long flags;
38737 int i, regs[4];
38738 @@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
38739 {
38740 int csr5;
38741 struct pcnet32_private *lp = netdev_priv(dev);
38742 - struct pcnet32_access *a = &lp->a;
38743 + struct pcnet32_access *a = lp->a;
38744 ulong ioaddr = dev->base_addr;
38745 int ticks;
38746
38747 @@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38748 spin_lock_irqsave(&lp->lock, flags);
38749 if (pcnet32_tx(dev)) {
38750 /* reset the chip to clear the error condition, then restart */
38751 - lp->a.reset(ioaddr);
38752 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38753 + lp->a->reset(ioaddr);
38754 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38755 pcnet32_restart(dev, CSR0_START);
38756 netif_wake_queue(dev);
38757 }
38758 @@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38759 __napi_complete(napi);
38760
38761 /* clear interrupt masks */
38762 - val = lp->a.read_csr(ioaddr, CSR3);
38763 + val = lp->a->read_csr(ioaddr, CSR3);
38764 val &= 0x00ff;
38765 - lp->a.write_csr(ioaddr, CSR3, val);
38766 + lp->a->write_csr(ioaddr, CSR3, val);
38767
38768 /* Set interrupt enable. */
38769 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
38770 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
38771
38772 spin_unlock_irqrestore(&lp->lock, flags);
38773 }
38774 @@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38775 int i, csr0;
38776 u16 *buff = ptr;
38777 struct pcnet32_private *lp = netdev_priv(dev);
38778 - struct pcnet32_access *a = &lp->a;
38779 + struct pcnet32_access *a = lp->a;
38780 ulong ioaddr = dev->base_addr;
38781 unsigned long flags;
38782
38783 @@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38784 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
38785 if (lp->phymask & (1 << j)) {
38786 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
38787 - lp->a.write_bcr(ioaddr, 33,
38788 + lp->a->write_bcr(ioaddr, 33,
38789 (j << 5) | i);
38790 - *buff++ = lp->a.read_bcr(ioaddr, 34);
38791 + *buff++ = lp->a->read_bcr(ioaddr, 34);
38792 }
38793 }
38794 }
38795 @@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38796 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
38797 lp->options |= PCNET32_PORT_FD;
38798
38799 - lp->a = *a;
38800 + lp->a = a;
38801
38802 /* prior to register_netdev, dev->name is not yet correct */
38803 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
38804 @@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38805 if (lp->mii) {
38806 /* lp->phycount and lp->phymask are set to 0 by memset above */
38807
38808 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38809 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38810 /* scan for PHYs */
38811 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38812 unsigned short id1, id2;
38813 @@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38814 "Found PHY %04x:%04x at address %d.\n",
38815 id1, id2, i);
38816 }
38817 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38818 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38819 if (lp->phycount > 1) {
38820 lp->options |= PCNET32_PORT_MII;
38821 }
38822 @@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
38823 }
38824
38825 /* Reset the PCNET32 */
38826 - lp->a.reset(ioaddr);
38827 + lp->a->reset(ioaddr);
38828
38829 /* switch pcnet32 to 32bit mode */
38830 - lp->a.write_bcr(ioaddr, 20, 2);
38831 + lp->a->write_bcr(ioaddr, 20, 2);
38832
38833 if (netif_msg_ifup(lp))
38834 printk(KERN_DEBUG
38835 @@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
38836 (u32) (lp->init_dma_addr));
38837
38838 /* set/reset autoselect bit */
38839 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
38840 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
38841 if (lp->options & PCNET32_PORT_ASEL)
38842 val |= 2;
38843 - lp->a.write_bcr(ioaddr, 2, val);
38844 + lp->a->write_bcr(ioaddr, 2, val);
38845
38846 /* handle full duplex setting */
38847 if (lp->mii_if.full_duplex) {
38848 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
38849 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
38850 if (lp->options & PCNET32_PORT_FD) {
38851 val |= 1;
38852 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
38853 @@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
38854 if (lp->chip_version == 0x2627)
38855 val |= 3;
38856 }
38857 - lp->a.write_bcr(ioaddr, 9, val);
38858 + lp->a->write_bcr(ioaddr, 9, val);
38859 }
38860
38861 /* set/reset GPSI bit in test register */
38862 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
38863 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
38864 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
38865 val |= 0x10;
38866 - lp->a.write_csr(ioaddr, 124, val);
38867 + lp->a->write_csr(ioaddr, 124, val);
38868
38869 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
38870 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
38871 @@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
38872 * duplex, and/or enable auto negotiation, and clear DANAS
38873 */
38874 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
38875 - lp->a.write_bcr(ioaddr, 32,
38876 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
38877 + lp->a->write_bcr(ioaddr, 32,
38878 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
38879 /* disable Auto Negotiation, set 10Mpbs, HD */
38880 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
38881 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
38882 if (lp->options & PCNET32_PORT_FD)
38883 val |= 0x10;
38884 if (lp->options & PCNET32_PORT_100)
38885 val |= 0x08;
38886 - lp->a.write_bcr(ioaddr, 32, val);
38887 + lp->a->write_bcr(ioaddr, 32, val);
38888 } else {
38889 if (lp->options & PCNET32_PORT_ASEL) {
38890 - lp->a.write_bcr(ioaddr, 32,
38891 - lp->a.read_bcr(ioaddr,
38892 + lp->a->write_bcr(ioaddr, 32,
38893 + lp->a->read_bcr(ioaddr,
38894 32) | 0x0080);
38895 /* enable auto negotiate, setup, disable fd */
38896 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
38897 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
38898 val |= 0x20;
38899 - lp->a.write_bcr(ioaddr, 32, val);
38900 + lp->a->write_bcr(ioaddr, 32, val);
38901 }
38902 }
38903 } else {
38904 @@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
38905 * There is really no good other way to handle multiple PHYs
38906 * other than turning off all automatics
38907 */
38908 - val = lp->a.read_bcr(ioaddr, 2);
38909 - lp->a.write_bcr(ioaddr, 2, val & ~2);
38910 - val = lp->a.read_bcr(ioaddr, 32);
38911 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38912 + val = lp->a->read_bcr(ioaddr, 2);
38913 + lp->a->write_bcr(ioaddr, 2, val & ~2);
38914 + val = lp->a->read_bcr(ioaddr, 32);
38915 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38916
38917 if (!(lp->options & PCNET32_PORT_ASEL)) {
38918 /* setup ecmd */
38919 @@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
38920 ecmd.speed =
38921 lp->
38922 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
38923 - bcr9 = lp->a.read_bcr(ioaddr, 9);
38924 + bcr9 = lp->a->read_bcr(ioaddr, 9);
38925
38926 if (lp->options & PCNET32_PORT_FD) {
38927 ecmd.duplex = DUPLEX_FULL;
38928 @@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
38929 ecmd.duplex = DUPLEX_HALF;
38930 bcr9 |= ~(1 << 0);
38931 }
38932 - lp->a.write_bcr(ioaddr, 9, bcr9);
38933 + lp->a->write_bcr(ioaddr, 9, bcr9);
38934 }
38935
38936 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38937 @@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
38938
38939 #ifdef DO_DXSUFLO
38940 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
38941 - val = lp->a.read_csr(ioaddr, CSR3);
38942 + val = lp->a->read_csr(ioaddr, CSR3);
38943 val |= 0x40;
38944 - lp->a.write_csr(ioaddr, CSR3, val);
38945 + lp->a->write_csr(ioaddr, CSR3, val);
38946 }
38947 #endif
38948
38949 @@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
38950 napi_enable(&lp->napi);
38951
38952 /* Re-initialize the PCNET32, and start it when done. */
38953 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38954 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38955 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38956 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38957
38958 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38959 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38960 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38961 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38962
38963 netif_start_queue(dev);
38964
38965 @@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
38966
38967 i = 0;
38968 while (i++ < 100)
38969 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38970 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38971 break;
38972 /*
38973 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
38974 * reports that doing so triggers a bug in the '974.
38975 */
38976 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
38977 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
38978
38979 if (netif_msg_ifup(lp))
38980 printk(KERN_DEBUG
38981 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
38982 dev->name, i,
38983 (u32) (lp->init_dma_addr),
38984 - lp->a.read_csr(ioaddr, CSR0));
38985 + lp->a->read_csr(ioaddr, CSR0));
38986
38987 spin_unlock_irqrestore(&lp->lock, flags);
38988
38989 @@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
38990 * Switch back to 16bit mode to avoid problems with dumb
38991 * DOS packet driver after a warm reboot
38992 */
38993 - lp->a.write_bcr(ioaddr, 20, 4);
38994 + lp->a->write_bcr(ioaddr, 20, 4);
38995
38996 err_free_irq:
38997 spin_unlock_irqrestore(&lp->lock, flags);
38998 @@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38999
39000 /* wait for stop */
39001 for (i = 0; i < 100; i++)
39002 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
39003 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
39004 break;
39005
39006 if (i >= 100 && netif_msg_drv(lp))
39007 @@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
39008 return;
39009
39010 /* ReInit Ring */
39011 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
39012 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
39013 i = 0;
39014 while (i++ < 1000)
39015 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
39016 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
39017 break;
39018
39019 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
39020 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
39021 }
39022
39023 static void pcnet32_tx_timeout(struct net_device *dev)
39024 @@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
39025 if (pcnet32_debug & NETIF_MSG_DRV)
39026 printk(KERN_ERR
39027 "%s: transmit timed out, status %4.4x, resetting.\n",
39028 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39029 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39030 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39031 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39032 dev->stats.tx_errors++;
39033 if (netif_msg_tx_err(lp)) {
39034 int i;
39035 @@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
39036 if (netif_msg_tx_queued(lp)) {
39037 printk(KERN_DEBUG
39038 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
39039 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39040 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39041 }
39042
39043 /* Default status -- will not enable Successful-TxDone
39044 @@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
39045 dev->stats.tx_bytes += skb->len;
39046
39047 /* Trigger an immediate send poll. */
39048 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
39049 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
39050
39051 dev->trans_start = jiffies;
39052
39053 @@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
39054
39055 spin_lock(&lp->lock);
39056
39057 - csr0 = lp->a.read_csr(ioaddr, CSR0);
39058 + csr0 = lp->a->read_csr(ioaddr, CSR0);
39059 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
39060 if (csr0 == 0xffff) {
39061 break; /* PCMCIA remove happened */
39062 }
39063 /* Acknowledge all of the current interrupt sources ASAP. */
39064 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
39065 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
39066
39067 if (netif_msg_intr(lp))
39068 printk(KERN_DEBUG
39069 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
39070 - dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
39071 + dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
39072
39073 /* Log misc errors. */
39074 if (csr0 & 0x4000)
39075 @@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
39076 if (napi_schedule_prep(&lp->napi)) {
39077 u16 val;
39078 /* set interrupt masks */
39079 - val = lp->a.read_csr(ioaddr, CSR3);
39080 + val = lp->a->read_csr(ioaddr, CSR3);
39081 val |= 0x5f00;
39082 - lp->a.write_csr(ioaddr, CSR3, val);
39083 + lp->a->write_csr(ioaddr, CSR3, val);
39084
39085 __napi_schedule(&lp->napi);
39086 break;
39087 }
39088 - csr0 = lp->a.read_csr(ioaddr, CSR0);
39089 + csr0 = lp->a->read_csr(ioaddr, CSR0);
39090 }
39091
39092 if (netif_msg_intr(lp))
39093 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
39094 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39095 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39096
39097 spin_unlock(&lp->lock);
39098
39099 @@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
39100
39101 spin_lock_irqsave(&lp->lock, flags);
39102
39103 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
39104 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
39105
39106 if (netif_msg_ifdown(lp))
39107 printk(KERN_DEBUG
39108 "%s: Shutting down ethercard, status was %2.2x.\n",
39109 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39110 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39111
39112 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
39113 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39114 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39115
39116 /*
39117 * Switch back to 16bit mode to avoid problems with dumb
39118 * DOS packet driver after a warm reboot
39119 */
39120 - lp->a.write_bcr(ioaddr, 20, 4);
39121 + lp->a->write_bcr(ioaddr, 20, 4);
39122
39123 spin_unlock_irqrestore(&lp->lock, flags);
39124
39125 @@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
39126 unsigned long flags;
39127
39128 spin_lock_irqsave(&lp->lock, flags);
39129 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
39130 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
39131 spin_unlock_irqrestore(&lp->lock, flags);
39132
39133 return &dev->stats;
39134 @@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
39135 if (dev->flags & IFF_ALLMULTI) {
39136 ib->filter[0] = cpu_to_le32(~0U);
39137 ib->filter[1] = cpu_to_le32(~0U);
39138 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
39139 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
39140 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
39141 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
39142 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
39143 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
39144 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
39145 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
39146 return;
39147 }
39148 /* clear the multicast filter */
39149 @@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
39150 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
39151 }
39152 for (i = 0; i < 4; i++)
39153 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
39154 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
39155 le16_to_cpu(mcast_table[i]));
39156 return;
39157 }
39158 @@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
39159
39160 spin_lock_irqsave(&lp->lock, flags);
39161 suspended = pcnet32_suspend(dev, &flags, 0);
39162 - csr15 = lp->a.read_csr(ioaddr, CSR15);
39163 + csr15 = lp->a->read_csr(ioaddr, CSR15);
39164 if (dev->flags & IFF_PROMISC) {
39165 /* Log any net taps. */
39166 if (netif_msg_hw(lp))
39167 @@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
39168 lp->init_block->mode =
39169 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
39170 7);
39171 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
39172 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
39173 } else {
39174 lp->init_block->mode =
39175 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
39176 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
39177 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
39178 pcnet32_load_multicast(dev);
39179 }
39180
39181 if (suspended) {
39182 int csr5;
39183 /* clear SUSPEND (SPND) - CSR5 bit 0 */
39184 - csr5 = lp->a.read_csr(ioaddr, CSR5);
39185 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
39186 + csr5 = lp->a->read_csr(ioaddr, CSR5);
39187 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
39188 } else {
39189 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39190 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39191 pcnet32_restart(dev, CSR0_NORMAL);
39192 netif_wake_queue(dev);
39193 }
39194 @@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
39195 if (!lp->mii)
39196 return 0;
39197
39198 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39199 - val_out = lp->a.read_bcr(ioaddr, 34);
39200 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39201 + val_out = lp->a->read_bcr(ioaddr, 34);
39202
39203 return val_out;
39204 }
39205 @@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
39206 if (!lp->mii)
39207 return;
39208
39209 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39210 - lp->a.write_bcr(ioaddr, 34, val);
39211 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39212 + lp->a->write_bcr(ioaddr, 34, val);
39213 }
39214
39215 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39216 @@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
39217 curr_link = mii_link_ok(&lp->mii_if);
39218 } else {
39219 ulong ioaddr = dev->base_addr; /* card base I/O address */
39220 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
39221 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
39222 }
39223 if (!curr_link) {
39224 if (prev_link || verbose) {
39225 @@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
39226 (ecmd.duplex ==
39227 DUPLEX_FULL) ? "full" : "half");
39228 }
39229 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
39230 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
39231 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
39232 if (lp->mii_if.full_duplex)
39233 bcr9 |= (1 << 0);
39234 else
39235 bcr9 &= ~(1 << 0);
39236 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
39237 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
39238 }
39239 } else {
39240 if (netif_msg_link(lp))
39241 diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
39242 index 7cc9898..6eb50d3 100644
39243 --- a/drivers/net/sis190.c
39244 +++ b/drivers/net/sis190.c
39245 @@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
39246 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
39247 struct net_device *dev)
39248 {
39249 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
39250 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
39251 struct sis190_private *tp = netdev_priv(dev);
39252 struct pci_dev *isa_bridge;
39253 u8 reg, tmp8;
39254 diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
39255 index e13685a..60c948c 100644
39256 --- a/drivers/net/sundance.c
39257 +++ b/drivers/net/sundance.c
39258 @@ -225,7 +225,7 @@ enum {
39259 struct pci_id_info {
39260 const char *name;
39261 };
39262 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39263 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39264 {"D-Link DFE-550TX FAST Ethernet Adapter"},
39265 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
39266 {"D-Link DFE-580TX 4 port Server Adapter"},
39267 diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
39268 index 529f55a..cccaa18 100644
39269 --- a/drivers/net/tg3.h
39270 +++ b/drivers/net/tg3.h
39271 @@ -95,6 +95,7 @@
39272 #define CHIPREV_ID_5750_A0 0x4000
39273 #define CHIPREV_ID_5750_A1 0x4001
39274 #define CHIPREV_ID_5750_A3 0x4003
39275 +#define CHIPREV_ID_5750_C1 0x4201
39276 #define CHIPREV_ID_5750_C2 0x4202
39277 #define CHIPREV_ID_5752_A0_HW 0x5000
39278 #define CHIPREV_ID_5752_A0 0x6000
39279 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
39280 index b9db1b5..720f9ce 100644
39281 --- a/drivers/net/tokenring/abyss.c
39282 +++ b/drivers/net/tokenring/abyss.c
39283 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
39284
39285 static int __init abyss_init (void)
39286 {
39287 - abyss_netdev_ops = tms380tr_netdev_ops;
39288 + pax_open_kernel();
39289 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39290
39291 - abyss_netdev_ops.ndo_open = abyss_open;
39292 - abyss_netdev_ops.ndo_stop = abyss_close;
39293 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
39294 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
39295 + pax_close_kernel();
39296
39297 return pci_register_driver(&abyss_driver);
39298 }
39299 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
39300 index 456f8bf..373e56d 100644
39301 --- a/drivers/net/tokenring/madgemc.c
39302 +++ b/drivers/net/tokenring/madgemc.c
39303 @@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
39304
39305 static int __init madgemc_init (void)
39306 {
39307 - madgemc_netdev_ops = tms380tr_netdev_ops;
39308 - madgemc_netdev_ops.ndo_open = madgemc_open;
39309 - madgemc_netdev_ops.ndo_stop = madgemc_close;
39310 + pax_open_kernel();
39311 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39312 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
39313 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
39314 + pax_close_kernel();
39315
39316 return mca_register_driver (&madgemc_driver);
39317 }
39318 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
39319 index 16e8783..925bd49 100644
39320 --- a/drivers/net/tokenring/proteon.c
39321 +++ b/drivers/net/tokenring/proteon.c
39322 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
39323 struct platform_device *pdev;
39324 int i, num = 0, err = 0;
39325
39326 - proteon_netdev_ops = tms380tr_netdev_ops;
39327 - proteon_netdev_ops.ndo_open = proteon_open;
39328 - proteon_netdev_ops.ndo_stop = tms380tr_close;
39329 + pax_open_kernel();
39330 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39331 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
39332 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
39333 + pax_close_kernel();
39334
39335 err = platform_driver_register(&proteon_driver);
39336 if (err)
39337 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
39338 index 46db5c5..37c1536 100644
39339 --- a/drivers/net/tokenring/skisa.c
39340 +++ b/drivers/net/tokenring/skisa.c
39341 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
39342 struct platform_device *pdev;
39343 int i, num = 0, err = 0;
39344
39345 - sk_isa_netdev_ops = tms380tr_netdev_ops;
39346 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
39347 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39348 + pax_open_kernel();
39349 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39350 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
39351 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39352 + pax_close_kernel();
39353
39354 err = platform_driver_register(&sk_isa_driver);
39355 if (err)
39356 diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
39357 index 74e5ba4..5cf6bc9 100644
39358 --- a/drivers/net/tulip/de2104x.c
39359 +++ b/drivers/net/tulip/de2104x.c
39360 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
39361 struct de_srom_info_leaf *il;
39362 void *bufp;
39363
39364 + pax_track_stack();
39365 +
39366 /* download entire eeprom */
39367 for (i = 0; i < DE_EEPROM_WORDS; i++)
39368 ((__le16 *)ee_data)[i] =
39369 diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
39370 index a8349b7..90f9dfe 100644
39371 --- a/drivers/net/tulip/de4x5.c
39372 +++ b/drivers/net/tulip/de4x5.c
39373 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39374 for (i=0; i<ETH_ALEN; i++) {
39375 tmp.addr[i] = dev->dev_addr[i];
39376 }
39377 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39378 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39379 break;
39380
39381 case DE4X5_SET_HWADDR: /* Set the hardware address */
39382 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39383 spin_lock_irqsave(&lp->lock, flags);
39384 memcpy(&statbuf, &lp->pktStats, ioc->len);
39385 spin_unlock_irqrestore(&lp->lock, flags);
39386 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
39387 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
39388 return -EFAULT;
39389 break;
39390 }
39391 diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
39392 index 391acd3..56d11cd 100644
39393 --- a/drivers/net/tulip/eeprom.c
39394 +++ b/drivers/net/tulip/eeprom.c
39395 @@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
39396 {NULL}};
39397
39398
39399 -static const char *block_name[] __devinitdata = {
39400 +static const char *block_name[] __devinitconst = {
39401 "21140 non-MII",
39402 "21140 MII PHY",
39403 "21142 Serial PHY",
39404 diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
39405 index b38d3b7..b1cff23 100644
39406 --- a/drivers/net/tulip/winbond-840.c
39407 +++ b/drivers/net/tulip/winbond-840.c
39408 @@ -235,7 +235,7 @@ struct pci_id_info {
39409 int drv_flags; /* Driver use, intended as capability flags. */
39410 };
39411
39412 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39413 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39414 { /* Sometime a Level-One switch card. */
39415 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
39416 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
39417 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
39418 index f450bc9..2b747c8 100644
39419 --- a/drivers/net/usb/hso.c
39420 +++ b/drivers/net/usb/hso.c
39421 @@ -71,7 +71,7 @@
39422 #include <asm/byteorder.h>
39423 #include <linux/serial_core.h>
39424 #include <linux/serial.h>
39425 -
39426 +#include <asm/local.h>
39427
39428 #define DRIVER_VERSION "1.2"
39429 #define MOD_AUTHOR "Option Wireless"
39430 @@ -258,7 +258,7 @@ struct hso_serial {
39431
39432 /* from usb_serial_port */
39433 struct tty_struct *tty;
39434 - int open_count;
39435 + local_t open_count;
39436 spinlock_t serial_lock;
39437
39438 int (*write_data) (struct hso_serial *serial);
39439 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
39440 struct urb *urb;
39441
39442 urb = serial->rx_urb[0];
39443 - if (serial->open_count > 0) {
39444 + if (local_read(&serial->open_count) > 0) {
39445 count = put_rxbuf_data(urb, serial);
39446 if (count == -1)
39447 return;
39448 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
39449 DUMP1(urb->transfer_buffer, urb->actual_length);
39450
39451 /* Anyone listening? */
39452 - if (serial->open_count == 0)
39453 + if (local_read(&serial->open_count) == 0)
39454 return;
39455
39456 if (status == 0) {
39457 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39458 spin_unlock_irq(&serial->serial_lock);
39459
39460 /* check for port already opened, if not set the termios */
39461 - serial->open_count++;
39462 - if (serial->open_count == 1) {
39463 + if (local_inc_return(&serial->open_count) == 1) {
39464 tty->low_latency = 1;
39465 serial->rx_state = RX_IDLE;
39466 /* Force default termio settings */
39467 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39468 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
39469 if (result) {
39470 hso_stop_serial_device(serial->parent);
39471 - serial->open_count--;
39472 + local_dec(&serial->open_count);
39473 kref_put(&serial->parent->ref, hso_serial_ref_free);
39474 }
39475 } else {
39476 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
39477
39478 /* reset the rts and dtr */
39479 /* do the actual close */
39480 - serial->open_count--;
39481 + local_dec(&serial->open_count);
39482
39483 - if (serial->open_count <= 0) {
39484 - serial->open_count = 0;
39485 + if (local_read(&serial->open_count) <= 0) {
39486 + local_set(&serial->open_count, 0);
39487 spin_lock_irq(&serial->serial_lock);
39488 if (serial->tty == tty) {
39489 serial->tty->driver_data = NULL;
39490 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
39491
39492 /* the actual setup */
39493 spin_lock_irqsave(&serial->serial_lock, flags);
39494 - if (serial->open_count)
39495 + if (local_read(&serial->open_count))
39496 _hso_serial_set_termios(tty, old);
39497 else
39498 tty->termios = old;
39499 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
39500 /* Start all serial ports */
39501 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
39502 if (serial_table[i] && (serial_table[i]->interface == iface)) {
39503 - if (dev2ser(serial_table[i])->open_count) {
39504 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
39505 result =
39506 hso_start_serial_device(serial_table[i], GFP_NOIO);
39507 hso_kick_transmit(dev2ser(serial_table[i]));
39508 diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
39509 index 3e94f0c..ffdd926 100644
39510 --- a/drivers/net/vxge/vxge-config.h
39511 +++ b/drivers/net/vxge/vxge-config.h
39512 @@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
39513 void (*link_down)(struct __vxge_hw_device *devh);
39514 void (*crit_err)(struct __vxge_hw_device *devh,
39515 enum vxge_hw_event type, u64 ext_data);
39516 -};
39517 +} __no_const;
39518
39519 /*
39520 * struct __vxge_hw_blockpool_entry - Block private data structure
39521 diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
39522 index 068d7a9..35293de 100644
39523 --- a/drivers/net/vxge/vxge-main.c
39524 +++ b/drivers/net/vxge/vxge-main.c
39525 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
39526 struct sk_buff *completed[NR_SKB_COMPLETED];
39527 int more;
39528
39529 + pax_track_stack();
39530 +
39531 do {
39532 more = 0;
39533 skb_ptr = completed;
39534 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
39535 u8 mtable[256] = {0}; /* CPU to vpath mapping */
39536 int index;
39537
39538 + pax_track_stack();
39539 +
39540 /*
39541 * Filling
39542 * - itable with bucket numbers
39543 diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
39544 index 461742b..81be42e 100644
39545 --- a/drivers/net/vxge/vxge-traffic.h
39546 +++ b/drivers/net/vxge/vxge-traffic.h
39547 @@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
39548 struct vxge_hw_mempool_dma *dma_object,
39549 u32 index,
39550 u32 is_last);
39551 -};
39552 +} __no_const;
39553
39554 void
39555 __vxge_hw_mempool_destroy(
39556 diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
39557 index cd8cb95..4153b79 100644
39558 --- a/drivers/net/wan/cycx_x25.c
39559 +++ b/drivers/net/wan/cycx_x25.c
39560 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
39561 unsigned char hex[1024],
39562 * phex = hex;
39563
39564 + pax_track_stack();
39565 +
39566 if (len >= (sizeof(hex) / 2))
39567 len = (sizeof(hex) / 2) - 1;
39568
39569 diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
39570 index aa9248f..a4e3c3b 100644
39571 --- a/drivers/net/wan/hdlc_x25.c
39572 +++ b/drivers/net/wan/hdlc_x25.c
39573 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
39574
39575 static int x25_open(struct net_device *dev)
39576 {
39577 - struct lapb_register_struct cb;
39578 + static struct lapb_register_struct cb = {
39579 + .connect_confirmation = x25_connected,
39580 + .connect_indication = x25_connected,
39581 + .disconnect_confirmation = x25_disconnected,
39582 + .disconnect_indication = x25_disconnected,
39583 + .data_indication = x25_data_indication,
39584 + .data_transmit = x25_data_transmit
39585 + };
39586 int result;
39587
39588 - cb.connect_confirmation = x25_connected;
39589 - cb.connect_indication = x25_connected;
39590 - cb.disconnect_confirmation = x25_disconnected;
39591 - cb.disconnect_indication = x25_disconnected;
39592 - cb.data_indication = x25_data_indication;
39593 - cb.data_transmit = x25_data_transmit;
39594 -
39595 result = lapb_register(dev, &cb);
39596 if (result != LAPB_OK)
39597 return result;
39598 diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
39599 index 5ad287c..783b020 100644
39600 --- a/drivers/net/wimax/i2400m/usb-fw.c
39601 +++ b/drivers/net/wimax/i2400m/usb-fw.c
39602 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
39603 int do_autopm = 1;
39604 DECLARE_COMPLETION_ONSTACK(notif_completion);
39605
39606 + pax_track_stack();
39607 +
39608 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
39609 i2400m, ack, ack_size);
39610 BUG_ON(_ack == i2400m->bm_ack_buf);
39611 diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
39612 index 6c26840..62c97c3 100644
39613 --- a/drivers/net/wireless/airo.c
39614 +++ b/drivers/net/wireless/airo.c
39615 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
39616 BSSListElement * loop_net;
39617 BSSListElement * tmp_net;
39618
39619 + pax_track_stack();
39620 +
39621 /* Blow away current list of scan results */
39622 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
39623 list_move_tail (&loop_net->list, &ai->network_free_list);
39624 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
39625 WepKeyRid wkr;
39626 int rc;
39627
39628 + pax_track_stack();
39629 +
39630 memset( &mySsid, 0, sizeof( mySsid ) );
39631 kfree (ai->flash);
39632 ai->flash = NULL;
39633 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
39634 __le32 *vals = stats.vals;
39635 int len;
39636
39637 + pax_track_stack();
39638 +
39639 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39640 return -ENOMEM;
39641 data = (struct proc_data *)file->private_data;
39642 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
39643 /* If doLoseSync is not 1, we won't do a Lose Sync */
39644 int doLoseSync = -1;
39645
39646 + pax_track_stack();
39647 +
39648 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39649 return -ENOMEM;
39650 data = (struct proc_data *)file->private_data;
39651 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
39652 int i;
39653 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
39654
39655 + pax_track_stack();
39656 +
39657 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
39658 if (!qual)
39659 return -ENOMEM;
39660 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
39661 CapabilityRid cap_rid;
39662 __le32 *vals = stats_rid.vals;
39663
39664 + pax_track_stack();
39665 +
39666 /* Get stats out of the card */
39667 clear_bit(JOB_WSTATS, &local->jobs);
39668 if (local->power.event) {
39669 diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
39670 index 747508c..82e965d 100644
39671 --- a/drivers/net/wireless/ath/ath5k/debug.c
39672 +++ b/drivers/net/wireless/ath/ath5k/debug.c
39673 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
39674 unsigned int v;
39675 u64 tsf;
39676
39677 + pax_track_stack();
39678 +
39679 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
39680 len += snprintf(buf+len, sizeof(buf)-len,
39681 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
39682 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
39683 unsigned int len = 0;
39684 unsigned int i;
39685
39686 + pax_track_stack();
39687 +
39688 len += snprintf(buf+len, sizeof(buf)-len,
39689 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
39690
39691 diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
39692 index 2be4c22..593b1eb 100644
39693 --- a/drivers/net/wireless/ath/ath9k/debug.c
39694 +++ b/drivers/net/wireless/ath/ath9k/debug.c
39695 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
39696 char buf[512];
39697 unsigned int len = 0;
39698
39699 + pax_track_stack();
39700 +
39701 len += snprintf(buf + len, sizeof(buf) - len,
39702 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
39703 len += snprintf(buf + len, sizeof(buf) - len,
39704 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
39705 int i;
39706 u8 addr[ETH_ALEN];
39707
39708 + pax_track_stack();
39709 +
39710 len += snprintf(buf + len, sizeof(buf) - len,
39711 "primary: %s (%s chan=%d ht=%d)\n",
39712 wiphy_name(sc->pri_wiphy->hw->wiphy),
39713 diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
39714 index 80b19a4..dab3a45 100644
39715 --- a/drivers/net/wireless/b43/debugfs.c
39716 +++ b/drivers/net/wireless/b43/debugfs.c
39717 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
39718 struct b43_debugfs_fops {
39719 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
39720 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
39721 - struct file_operations fops;
39722 + const struct file_operations fops;
39723 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
39724 size_t file_struct_offset;
39725 };
39726 diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
39727 index 1f85ac5..c99b4b4 100644
39728 --- a/drivers/net/wireless/b43legacy/debugfs.c
39729 +++ b/drivers/net/wireless/b43legacy/debugfs.c
39730 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
39731 struct b43legacy_debugfs_fops {
39732 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
39733 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
39734 - struct file_operations fops;
39735 + const struct file_operations fops;
39736 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
39737 size_t file_struct_offset;
39738 /* Take wl->irq_lock before calling read/write? */
39739 diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
39740 index 43102bf..3b569c3 100644
39741 --- a/drivers/net/wireless/ipw2x00/ipw2100.c
39742 +++ b/drivers/net/wireless/ipw2x00/ipw2100.c
39743 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
39744 int err;
39745 DECLARE_SSID_BUF(ssid);
39746
39747 + pax_track_stack();
39748 +
39749 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
39750
39751 if (ssid_len)
39752 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
39753 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
39754 int err;
39755
39756 + pax_track_stack();
39757 +
39758 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
39759 idx, keylen, len);
39760
39761 diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
39762 index 282b1f7..169f0cf 100644
39763 --- a/drivers/net/wireless/ipw2x00/libipw_rx.c
39764 +++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
39765 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
39766 unsigned long flags;
39767 DECLARE_SSID_BUF(ssid);
39768
39769 + pax_track_stack();
39770 +
39771 LIBIPW_DEBUG_SCAN("'%s' (%pM"
39772 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
39773 print_ssid(ssid, info_element->data, info_element->len),
39774 diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
39775 index 950267a..80d5fd2 100644
39776 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c
39777 +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
39778 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
39779 },
39780 };
39781
39782 -static struct iwl_ops iwl1000_ops = {
39783 +static const struct iwl_ops iwl1000_ops = {
39784 .ucode = &iwl5000_ucode,
39785 .lib = &iwl1000_lib,
39786 .hcmd = &iwl5000_hcmd,
39787 diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
39788 index 56bfcc3..b348020 100644
39789 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c
39790 +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
39791 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
39792 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
39793 };
39794
39795 -static struct iwl_ops iwl3945_ops = {
39796 +static const struct iwl_ops iwl3945_ops = {
39797 .ucode = &iwl3945_ucode,
39798 .lib = &iwl3945_lib,
39799 .hcmd = &iwl3945_hcmd,
39800 diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
39801 index 585b8d4..e142963 100644
39802 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c
39803 +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
39804 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
39805 },
39806 };
39807
39808 -static struct iwl_ops iwl4965_ops = {
39809 +static const struct iwl_ops iwl4965_ops = {
39810 .ucode = &iwl4965_ucode,
39811 .lib = &iwl4965_lib,
39812 .hcmd = &iwl4965_hcmd,
39813 diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
39814 index 1f423f2..e37c192 100644
39815 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c
39816 +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
39817 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
39818 },
39819 };
39820
39821 -struct iwl_ops iwl5000_ops = {
39822 +const struct iwl_ops iwl5000_ops = {
39823 .ucode = &iwl5000_ucode,
39824 .lib = &iwl5000_lib,
39825 .hcmd = &iwl5000_hcmd,
39826 .utils = &iwl5000_hcmd_utils,
39827 };
39828
39829 -static struct iwl_ops iwl5150_ops = {
39830 +static const struct iwl_ops iwl5150_ops = {
39831 .ucode = &iwl5000_ucode,
39832 .lib = &iwl5150_lib,
39833 .hcmd = &iwl5000_hcmd,
39834 diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
39835 index 1473452..f07d5e1 100644
39836 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c
39837 +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
39838 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
39839 .calc_rssi = iwl5000_calc_rssi,
39840 };
39841
39842 -static struct iwl_ops iwl6000_ops = {
39843 +static const struct iwl_ops iwl6000_ops = {
39844 .ucode = &iwl5000_ucode,
39845 .lib = &iwl6000_lib,
39846 .hcmd = &iwl5000_hcmd,
39847 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39848 index 1a3dfa2..b3e0a61 100644
39849 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39850 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39851 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
39852 u8 active_index = 0;
39853 s32 tpt = 0;
39854
39855 + pax_track_stack();
39856 +
39857 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
39858
39859 if (!ieee80211_is_data(hdr->frame_control) ||
39860 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
39861 u8 valid_tx_ant = 0;
39862 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
39863
39864 + pax_track_stack();
39865 +
39866 /* Override starting rate (index 0) if needed for debug purposes */
39867 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
39868
39869 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
39870 index 0e56d78..6a3c107 100644
39871 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c
39872 +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
39873 @@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
39874 if (iwl_debug_level & IWL_DL_INFO)
39875 dev_printk(KERN_DEBUG, &(pdev->dev),
39876 "Disabling hw_scan\n");
39877 - iwl_hw_ops.hw_scan = NULL;
39878 + pax_open_kernel();
39879 + *(void **)&iwl_hw_ops.hw_scan = NULL;
39880 + pax_close_kernel();
39881 }
39882
39883 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
39884 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
39885 index cbc6290..eb323d7 100644
39886 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
39887 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
39888 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
39889 #endif
39890
39891 #else
39892 -#define IWL_DEBUG(__priv, level, fmt, args...)
39893 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
39894 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
39895 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
39896 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
39897 void *p, u32 len)
39898 {}
39899 diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39900 index a198bcf..8e68233 100644
39901 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39902 +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39903 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
39904 int pos = 0;
39905 const size_t bufsz = sizeof(buf);
39906
39907 + pax_track_stack();
39908 +
39909 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
39910 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
39911 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
39912 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
39913 const size_t bufsz = sizeof(buf);
39914 ssize_t ret;
39915
39916 + pax_track_stack();
39917 +
39918 for (i = 0; i < AC_NUM; i++) {
39919 pos += scnprintf(buf + pos, bufsz - pos,
39920 "\tcw_min\tcw_max\taifsn\ttxop\n");
39921 diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
39922 index 3539ea4..b174bfa 100644
39923 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h
39924 +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
39925 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
39926
39927 /* shared structures from iwl-5000.c */
39928 extern struct iwl_mod_params iwl50_mod_params;
39929 -extern struct iwl_ops iwl5000_ops;
39930 +extern const struct iwl_ops iwl5000_ops;
39931 extern struct iwl_ucode_ops iwl5000_ucode;
39932 extern struct iwl_lib_ops iwl5000_lib;
39933 extern struct iwl_hcmd_ops iwl5000_hcmd;
39934 diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39935 index 619590d..69235ee 100644
39936 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
39937 +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39938 @@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
39939 */
39940 if (iwl3945_mod_params.disable_hw_scan) {
39941 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
39942 - iwl3945_hw_ops.hw_scan = NULL;
39943 + pax_open_kernel();
39944 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
39945 + pax_close_kernel();
39946 }
39947
39948
39949 diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39950 index 1465379..fe4d78b 100644
39951 --- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
39952 +++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39953 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
39954 int buf_len = 512;
39955 size_t len = 0;
39956
39957 + pax_track_stack();
39958 +
39959 if (*ppos != 0)
39960 return 0;
39961 if (count < sizeof(buf))
39962 diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
39963 index 893a55c..7f66a50 100644
39964 --- a/drivers/net/wireless/libertas/debugfs.c
39965 +++ b/drivers/net/wireless/libertas/debugfs.c
39966 @@ -708,7 +708,7 @@ out_unlock:
39967 struct lbs_debugfs_files {
39968 const char *name;
39969 int perm;
39970 - struct file_operations fops;
39971 + const struct file_operations fops;
39972 };
39973
39974 static const struct lbs_debugfs_files debugfs_files[] = {
39975 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
39976 index 2ecbedb..42704f0 100644
39977 --- a/drivers/net/wireless/rndis_wlan.c
39978 +++ b/drivers/net/wireless/rndis_wlan.c
39979 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
39980
39981 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
39982
39983 - if (rts_threshold < 0 || rts_threshold > 2347)
39984 + if (rts_threshold > 2347)
39985 rts_threshold = 2347;
39986
39987 tmp = cpu_to_le32(rts_threshold);
39988 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
39989 index 334ccd6..47f8944 100644
39990 --- a/drivers/oprofile/buffer_sync.c
39991 +++ b/drivers/oprofile/buffer_sync.c
39992 @@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
39993 if (cookie == NO_COOKIE)
39994 offset = pc;
39995 if (cookie == INVALID_COOKIE) {
39996 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39997 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39998 offset = pc;
39999 }
40000 if (cookie != last_cookie) {
40001 @@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
40002 /* add userspace sample */
40003
40004 if (!mm) {
40005 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
40006 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
40007 return 0;
40008 }
40009
40010 cookie = lookup_dcookie(mm, s->eip, &offset);
40011
40012 if (cookie == INVALID_COOKIE) {
40013 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40014 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40015 return 0;
40016 }
40017
40018 @@ -562,7 +562,7 @@ void sync_buffer(int cpu)
40019 /* ignore backtraces if failed to add a sample */
40020 if (state == sb_bt_start) {
40021 state = sb_bt_ignore;
40022 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
40023 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
40024 }
40025 }
40026 release_mm(mm);
40027 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
40028 index 5df60a6..72f5c1c 100644
40029 --- a/drivers/oprofile/event_buffer.c
40030 +++ b/drivers/oprofile/event_buffer.c
40031 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
40032 }
40033
40034 if (buffer_pos == buffer_size) {
40035 - atomic_inc(&oprofile_stats.event_lost_overflow);
40036 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
40037 return;
40038 }
40039
40040 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
40041 index dc8a042..fe5f315 100644
40042 --- a/drivers/oprofile/oprof.c
40043 +++ b/drivers/oprofile/oprof.c
40044 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
40045 if (oprofile_ops.switch_events())
40046 return;
40047
40048 - atomic_inc(&oprofile_stats.multiplex_counter);
40049 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
40050 start_switch_worker();
40051 }
40052
40053 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
40054 index 61689e8..387f7f8 100644
40055 --- a/drivers/oprofile/oprofile_stats.c
40056 +++ b/drivers/oprofile/oprofile_stats.c
40057 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
40058 cpu_buf->sample_invalid_eip = 0;
40059 }
40060
40061 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
40062 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
40063 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
40064 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
40065 - atomic_set(&oprofile_stats.multiplex_counter, 0);
40066 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
40067 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
40068 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
40069 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
40070 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
40071 }
40072
40073
40074 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
40075 index 0b54e46..a37c527 100644
40076 --- a/drivers/oprofile/oprofile_stats.h
40077 +++ b/drivers/oprofile/oprofile_stats.h
40078 @@ -13,11 +13,11 @@
40079 #include <asm/atomic.h>
40080
40081 struct oprofile_stat_struct {
40082 - atomic_t sample_lost_no_mm;
40083 - atomic_t sample_lost_no_mapping;
40084 - atomic_t bt_lost_no_mapping;
40085 - atomic_t event_lost_overflow;
40086 - atomic_t multiplex_counter;
40087 + atomic_unchecked_t sample_lost_no_mm;
40088 + atomic_unchecked_t sample_lost_no_mapping;
40089 + atomic_unchecked_t bt_lost_no_mapping;
40090 + atomic_unchecked_t event_lost_overflow;
40091 + atomic_unchecked_t multiplex_counter;
40092 };
40093
40094 extern struct oprofile_stat_struct oprofile_stats;
40095 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
40096 index 2766a6d..80c77e2 100644
40097 --- a/drivers/oprofile/oprofilefs.c
40098 +++ b/drivers/oprofile/oprofilefs.c
40099 @@ -187,7 +187,7 @@ static const struct file_operations atomic_ro_fops = {
40100
40101
40102 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
40103 - char const *name, atomic_t *val)
40104 + char const *name, atomic_unchecked_t *val)
40105 {
40106 struct dentry *d = __oprofilefs_create_file(sb, root, name,
40107 &atomic_ro_fops, 0444);
40108 diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
40109 index 13a64bc..ad62835 100644
40110 --- a/drivers/parisc/pdc_stable.c
40111 +++ b/drivers/parisc/pdc_stable.c
40112 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
40113 return ret;
40114 }
40115
40116 -static struct sysfs_ops pdcspath_attr_ops = {
40117 +static const struct sysfs_ops pdcspath_attr_ops = {
40118 .show = pdcspath_attr_show,
40119 .store = pdcspath_attr_store,
40120 };
40121 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
40122 index 8eefe56..40751a7 100644
40123 --- a/drivers/parport/procfs.c
40124 +++ b/drivers/parport/procfs.c
40125 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
40126
40127 *ppos += len;
40128
40129 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
40130 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
40131 }
40132
40133 #ifdef CONFIG_PARPORT_1284
40134 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
40135
40136 *ppos += len;
40137
40138 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
40139 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
40140 }
40141 #endif /* IEEE1284.3 support. */
40142
40143 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
40144 index 73e7d8e..c80f3d2 100644
40145 --- a/drivers/pci/hotplug/acpiphp_glue.c
40146 +++ b/drivers/pci/hotplug/acpiphp_glue.c
40147 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
40148 }
40149
40150
40151 -static struct acpi_dock_ops acpiphp_dock_ops = {
40152 +static const struct acpi_dock_ops acpiphp_dock_ops = {
40153 .handler = handle_hotplug_event_func,
40154 };
40155
40156 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
40157 index 9fff878..ad0ad53 100644
40158 --- a/drivers/pci/hotplug/cpci_hotplug.h
40159 +++ b/drivers/pci/hotplug/cpci_hotplug.h
40160 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
40161 int (*hardware_test) (struct slot* slot, u32 value);
40162 u8 (*get_power) (struct slot* slot);
40163 int (*set_power) (struct slot* slot, int value);
40164 -};
40165 +} __no_const;
40166
40167 struct cpci_hp_controller {
40168 unsigned int irq;
40169 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
40170 index 76ba8a1..20ca857 100644
40171 --- a/drivers/pci/hotplug/cpqphp_nvram.c
40172 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
40173 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
40174
40175 void compaq_nvram_init (void __iomem *rom_start)
40176 {
40177 +
40178 +#ifndef CONFIG_PAX_KERNEXEC
40179 if (rom_start) {
40180 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
40181 }
40182 +#endif
40183 +
40184 dbg("int15 entry = %p\n", compaq_int15_entry_point);
40185
40186 /* initialize our int15 lock */
40187 diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
40188 index 6151389..0a894ef 100644
40189 --- a/drivers/pci/hotplug/fakephp.c
40190 +++ b/drivers/pci/hotplug/fakephp.c
40191 @@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
40192 }
40193
40194 static struct kobj_type legacy_ktype = {
40195 - .sysfs_ops = &(struct sysfs_ops){
40196 + .sysfs_ops = &(const struct sysfs_ops){
40197 .store = legacy_store, .show = legacy_show
40198 },
40199 .release = &legacy_release,
40200 diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
40201 index 5b680df..fe05b7e 100644
40202 --- a/drivers/pci/intel-iommu.c
40203 +++ b/drivers/pci/intel-iommu.c
40204 @@ -2643,7 +2643,7 @@ error:
40205 return 0;
40206 }
40207
40208 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
40209 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
40210 unsigned long offset, size_t size,
40211 enum dma_data_direction dir,
40212 struct dma_attrs *attrs)
40213 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
40214 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
40215 }
40216
40217 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40218 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40219 size_t size, enum dma_data_direction dir,
40220 struct dma_attrs *attrs)
40221 {
40222 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40223 }
40224 }
40225
40226 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
40227 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
40228 dma_addr_t *dma_handle, gfp_t flags)
40229 {
40230 void *vaddr;
40231 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
40232 return NULL;
40233 }
40234
40235 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40236 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40237 dma_addr_t dma_handle)
40238 {
40239 int order;
40240 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40241 free_pages((unsigned long)vaddr, order);
40242 }
40243
40244 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40245 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40246 int nelems, enum dma_data_direction dir,
40247 struct dma_attrs *attrs)
40248 {
40249 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
40250 return nelems;
40251 }
40252
40253 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40254 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40255 enum dma_data_direction dir, struct dma_attrs *attrs)
40256 {
40257 int i;
40258 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
40259 return nelems;
40260 }
40261
40262 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40263 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40264 {
40265 return !dma_addr;
40266 }
40267
40268 -struct dma_map_ops intel_dma_ops = {
40269 +const struct dma_map_ops intel_dma_ops = {
40270 .alloc_coherent = intel_alloc_coherent,
40271 .free_coherent = intel_free_coherent,
40272 .map_sg = intel_map_sg,
40273 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
40274 index 5b7056c..607bc94 100644
40275 --- a/drivers/pci/pcie/aspm.c
40276 +++ b/drivers/pci/pcie/aspm.c
40277 @@ -27,9 +27,9 @@
40278 #define MODULE_PARAM_PREFIX "pcie_aspm."
40279
40280 /* Note: those are not register definitions */
40281 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
40282 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
40283 -#define ASPM_STATE_L1 (4) /* L1 state */
40284 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
40285 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
40286 +#define ASPM_STATE_L1 (4U) /* L1 state */
40287 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
40288 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
40289
40290 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
40291 index 8105e32..ca10419 100644
40292 --- a/drivers/pci/probe.c
40293 +++ b/drivers/pci/probe.c
40294 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
40295 return ret;
40296 }
40297
40298 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
40299 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
40300 struct device_attribute *attr,
40301 char *buf)
40302 {
40303 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
40304 }
40305
40306 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
40307 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
40308 struct device_attribute *attr,
40309 char *buf)
40310 {
40311 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
40312 index a03ad8c..024b0da 100644
40313 --- a/drivers/pci/proc.c
40314 +++ b/drivers/pci/proc.c
40315 @@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
40316 static int __init pci_proc_init(void)
40317 {
40318 struct pci_dev *dev = NULL;
40319 +
40320 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40321 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40322 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
40323 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40324 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40325 +#endif
40326 +#else
40327 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
40328 +#endif
40329 proc_create("devices", 0, proc_bus_pci_dir,
40330 &proc_bus_pci_dev_operations);
40331 proc_initialized = 1;
40332 diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
40333 index 8c02b6c..5584d8e 100644
40334 --- a/drivers/pci/slot.c
40335 +++ b/drivers/pci/slot.c
40336 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
40337 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
40338 }
40339
40340 -static struct sysfs_ops pci_slot_sysfs_ops = {
40341 +static const struct sysfs_ops pci_slot_sysfs_ops = {
40342 .show = pci_slot_attr_show,
40343 .store = pci_slot_attr_store,
40344 };
40345 diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
40346 index 30cf71d2..50938f1 100644
40347 --- a/drivers/pcmcia/pcmcia_ioctl.c
40348 +++ b/drivers/pcmcia/pcmcia_ioctl.c
40349 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
40350 return -EFAULT;
40351 }
40352 }
40353 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40354 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40355 if (!buf)
40356 return -ENOMEM;
40357
40358 diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
40359 index 52183c4..b224c69 100644
40360 --- a/drivers/platform/x86/acer-wmi.c
40361 +++ b/drivers/platform/x86/acer-wmi.c
40362 @@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
40363 return 0;
40364 }
40365
40366 -static struct backlight_ops acer_bl_ops = {
40367 +static const struct backlight_ops acer_bl_ops = {
40368 .get_brightness = read_brightness,
40369 .update_status = update_bl_status,
40370 };
40371 diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
40372 index 767cb61..a87380b 100644
40373 --- a/drivers/platform/x86/asus-laptop.c
40374 +++ b/drivers/platform/x86/asus-laptop.c
40375 @@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
40376 */
40377 static int read_brightness(struct backlight_device *bd);
40378 static int update_bl_status(struct backlight_device *bd);
40379 -static struct backlight_ops asusbl_ops = {
40380 +static const struct backlight_ops asusbl_ops = {
40381 .get_brightness = read_brightness,
40382 .update_status = update_bl_status,
40383 };
40384 diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
40385 index d66c07a..a4abaac 100644
40386 --- a/drivers/platform/x86/asus_acpi.c
40387 +++ b/drivers/platform/x86/asus_acpi.c
40388 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
40389 return 0;
40390 }
40391
40392 -static struct backlight_ops asus_backlight_data = {
40393 +static const struct backlight_ops asus_backlight_data = {
40394 .get_brightness = read_brightness,
40395 .update_status = set_brightness_status,
40396 };
40397 diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
40398 index 11003bb..550ff1b 100644
40399 --- a/drivers/platform/x86/compal-laptop.c
40400 +++ b/drivers/platform/x86/compal-laptop.c
40401 @@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
40402 return set_lcd_level(b->props.brightness);
40403 }
40404
40405 -static struct backlight_ops compalbl_ops = {
40406 +static const struct backlight_ops compalbl_ops = {
40407 .get_brightness = bl_get_brightness,
40408 .update_status = bl_update_status,
40409 };
40410 diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
40411 index 07a74da..9dc99fa 100644
40412 --- a/drivers/platform/x86/dell-laptop.c
40413 +++ b/drivers/platform/x86/dell-laptop.c
40414 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
40415 return buffer.output[1];
40416 }
40417
40418 -static struct backlight_ops dell_ops = {
40419 +static const struct backlight_ops dell_ops = {
40420 .get_brightness = dell_get_intensity,
40421 .update_status = dell_send_intensity,
40422 };
40423 diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
40424 index c533b1c..5c81f22 100644
40425 --- a/drivers/platform/x86/eeepc-laptop.c
40426 +++ b/drivers/platform/x86/eeepc-laptop.c
40427 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
40428 */
40429 static int read_brightness(struct backlight_device *bd);
40430 static int update_bl_status(struct backlight_device *bd);
40431 -static struct backlight_ops eeepcbl_ops = {
40432 +static const struct backlight_ops eeepcbl_ops = {
40433 .get_brightness = read_brightness,
40434 .update_status = update_bl_status,
40435 };
40436 diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
40437 index bcd4ba8..a249b35 100644
40438 --- a/drivers/platform/x86/fujitsu-laptop.c
40439 +++ b/drivers/platform/x86/fujitsu-laptop.c
40440 @@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
40441 return ret;
40442 }
40443
40444 -static struct backlight_ops fujitsubl_ops = {
40445 +static const struct backlight_ops fujitsubl_ops = {
40446 .get_brightness = bl_get_brightness,
40447 .update_status = bl_update_status,
40448 };
40449 diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
40450 index 759763d..1093ba2 100644
40451 --- a/drivers/platform/x86/msi-laptop.c
40452 +++ b/drivers/platform/x86/msi-laptop.c
40453 @@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
40454 return set_lcd_level(b->props.brightness);
40455 }
40456
40457 -static struct backlight_ops msibl_ops = {
40458 +static const struct backlight_ops msibl_ops = {
40459 .get_brightness = bl_get_brightness,
40460 .update_status = bl_update_status,
40461 };
40462 diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
40463 index fe7cf01..9012d8d 100644
40464 --- a/drivers/platform/x86/panasonic-laptop.c
40465 +++ b/drivers/platform/x86/panasonic-laptop.c
40466 @@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
40467 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
40468 }
40469
40470 -static struct backlight_ops pcc_backlight_ops = {
40471 +static const struct backlight_ops pcc_backlight_ops = {
40472 .get_brightness = bl_get,
40473 .update_status = bl_set_status,
40474 };
40475 diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
40476 index a2a742c..b37e25e 100644
40477 --- a/drivers/platform/x86/sony-laptop.c
40478 +++ b/drivers/platform/x86/sony-laptop.c
40479 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
40480 }
40481
40482 static struct backlight_device *sony_backlight_device;
40483 -static struct backlight_ops sony_backlight_ops = {
40484 +static const struct backlight_ops sony_backlight_ops = {
40485 .update_status = sony_backlight_update_status,
40486 .get_brightness = sony_backlight_get_brightness,
40487 };
40488 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
40489 index 68271ae..5e8fb10 100644
40490 --- a/drivers/platform/x86/thinkpad_acpi.c
40491 +++ b/drivers/platform/x86/thinkpad_acpi.c
40492 @@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
40493 return 0;
40494 }
40495
40496 -void static hotkey_mask_warn_incomplete_mask(void)
40497 +static void hotkey_mask_warn_incomplete_mask(void)
40498 {
40499 /* log only what the user can fix... */
40500 const u32 wantedmask = hotkey_driver_mask &
40501 @@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
40502 BACKLIGHT_UPDATE_HOTKEY);
40503 }
40504
40505 -static struct backlight_ops ibm_backlight_data = {
40506 +static const struct backlight_ops ibm_backlight_data = {
40507 .get_brightness = brightness_get,
40508 .update_status = brightness_update_status,
40509 };
40510 diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
40511 index 51c0a8b..0786629 100644
40512 --- a/drivers/platform/x86/toshiba_acpi.c
40513 +++ b/drivers/platform/x86/toshiba_acpi.c
40514 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
40515 return AE_OK;
40516 }
40517
40518 -static struct backlight_ops toshiba_backlight_data = {
40519 +static const struct backlight_ops toshiba_backlight_data = {
40520 .get_brightness = get_lcd,
40521 .update_status = set_lcd_status,
40522 };
40523 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
40524 index fc83783c..cf370d7 100644
40525 --- a/drivers/pnp/pnpbios/bioscalls.c
40526 +++ b/drivers/pnp/pnpbios/bioscalls.c
40527 @@ -60,7 +60,7 @@ do { \
40528 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
40529 } while(0)
40530
40531 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
40532 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
40533 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
40534
40535 /*
40536 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40537
40538 cpu = get_cpu();
40539 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
40540 +
40541 + pax_open_kernel();
40542 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
40543 + pax_close_kernel();
40544
40545 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
40546 spin_lock_irqsave(&pnp_bios_lock, flags);
40547 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40548 :"memory");
40549 spin_unlock_irqrestore(&pnp_bios_lock, flags);
40550
40551 + pax_open_kernel();
40552 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
40553 + pax_close_kernel();
40554 +
40555 put_cpu();
40556
40557 /* If we get here and this is set then the PnP BIOS faulted on us. */
40558 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
40559 return status;
40560 }
40561
40562 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
40563 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
40564 {
40565 int i;
40566
40567 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40568 pnp_bios_callpoint.offset = header->fields.pm16offset;
40569 pnp_bios_callpoint.segment = PNP_CS16;
40570
40571 + pax_open_kernel();
40572 +
40573 for_each_possible_cpu(i) {
40574 struct desc_struct *gdt = get_cpu_gdt_table(i);
40575 if (!gdt)
40576 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40577 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
40578 (unsigned long)__va(header->fields.pm16dseg));
40579 }
40580 +
40581 + pax_close_kernel();
40582 }
40583 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
40584 index ba97654..66b99d4 100644
40585 --- a/drivers/pnp/resource.c
40586 +++ b/drivers/pnp/resource.c
40587 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
40588 return 1;
40589
40590 /* check if the resource is valid */
40591 - if (*irq < 0 || *irq > 15)
40592 + if (*irq > 15)
40593 return 0;
40594
40595 /* check if the resource is reserved */
40596 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
40597 return 1;
40598
40599 /* check if the resource is valid */
40600 - if (*dma < 0 || *dma == 4 || *dma > 7)
40601 + if (*dma == 4 || *dma > 7)
40602 return 0;
40603
40604 /* check if the resource is reserved */
40605 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
40606 index 62bb981..24a2dc9 100644
40607 --- a/drivers/power/bq27x00_battery.c
40608 +++ b/drivers/power/bq27x00_battery.c
40609 @@ -44,7 +44,7 @@ struct bq27x00_device_info;
40610 struct bq27x00_access_methods {
40611 int (*read)(u8 reg, int *rt_value, int b_single,
40612 struct bq27x00_device_info *di);
40613 -};
40614 +} __no_const;
40615
40616 struct bq27x00_device_info {
40617 struct device *dev;
40618 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
40619 index 62227cd..b5b538b 100644
40620 --- a/drivers/rtc/rtc-dev.c
40621 +++ b/drivers/rtc/rtc-dev.c
40622 @@ -14,6 +14,7 @@
40623 #include <linux/module.h>
40624 #include <linux/rtc.h>
40625 #include <linux/sched.h>
40626 +#include <linux/grsecurity.h>
40627 #include "rtc-core.h"
40628
40629 static dev_t rtc_devt;
40630 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
40631 if (copy_from_user(&tm, uarg, sizeof(tm)))
40632 return -EFAULT;
40633
40634 + gr_log_timechange();
40635 +
40636 return rtc_set_time(rtc, &tm);
40637
40638 case RTC_PIE_ON:
40639 diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
40640 index 968e3c7..fbc637a 100644
40641 --- a/drivers/s390/cio/qdio_perf.c
40642 +++ b/drivers/s390/cio/qdio_perf.c
40643 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
40644 static int qdio_perf_proc_show(struct seq_file *m, void *v)
40645 {
40646 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
40647 - (long)atomic_long_read(&perf_stats.qdio_int));
40648 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
40649 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
40650 - (long)atomic_long_read(&perf_stats.pci_int));
40651 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
40652 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
40653 - (long)atomic_long_read(&perf_stats.thin_int));
40654 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
40655 seq_printf(m, "\n");
40656 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
40657 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
40658 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
40659 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
40660 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
40661 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
40662 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
40663 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
40664 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
40665 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
40666 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
40667 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
40668 - (long)atomic_long_read(&perf_stats.thinint_inbound),
40669 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
40670 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
40671 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
40672 seq_printf(m, "\n");
40673 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
40674 - (long)atomic_long_read(&perf_stats.siga_in));
40675 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
40676 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
40677 - (long)atomic_long_read(&perf_stats.siga_out));
40678 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
40679 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
40680 - (long)atomic_long_read(&perf_stats.siga_sync));
40681 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
40682 seq_printf(m, "\n");
40683 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
40684 - (long)atomic_long_read(&perf_stats.inbound_handler));
40685 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
40686 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
40687 - (long)atomic_long_read(&perf_stats.outbound_handler));
40688 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
40689 seq_printf(m, "\n");
40690 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
40691 - (long)atomic_long_read(&perf_stats.fast_requeue));
40692 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
40693 seq_printf(m, "Number of outbound target full condition\t: %li\n",
40694 - (long)atomic_long_read(&perf_stats.outbound_target_full));
40695 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
40696 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
40697 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
40698 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
40699 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
40700 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
40701 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
40702 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
40703 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
40704 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
40705 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
40706 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
40707 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
40708 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
40709 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
40710 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
40711 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
40712 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
40713 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
40714 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
40715 seq_printf(m, "\n");
40716 return 0;
40717 }
40718 diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
40719 index ff4504c..b3604c3 100644
40720 --- a/drivers/s390/cio/qdio_perf.h
40721 +++ b/drivers/s390/cio/qdio_perf.h
40722 @@ -13,46 +13,46 @@
40723
40724 struct qdio_perf_stats {
40725 /* interrupt handler calls */
40726 - atomic_long_t qdio_int;
40727 - atomic_long_t pci_int;
40728 - atomic_long_t thin_int;
40729 + atomic_long_unchecked_t qdio_int;
40730 + atomic_long_unchecked_t pci_int;
40731 + atomic_long_unchecked_t thin_int;
40732
40733 /* tasklet runs */
40734 - atomic_long_t tasklet_inbound;
40735 - atomic_long_t tasklet_outbound;
40736 - atomic_long_t tasklet_thinint;
40737 - atomic_long_t tasklet_thinint_loop;
40738 - atomic_long_t thinint_inbound;
40739 - atomic_long_t thinint_inbound_loop;
40740 - atomic_long_t thinint_inbound_loop2;
40741 + atomic_long_unchecked_t tasklet_inbound;
40742 + atomic_long_unchecked_t tasklet_outbound;
40743 + atomic_long_unchecked_t tasklet_thinint;
40744 + atomic_long_unchecked_t tasklet_thinint_loop;
40745 + atomic_long_unchecked_t thinint_inbound;
40746 + atomic_long_unchecked_t thinint_inbound_loop;
40747 + atomic_long_unchecked_t thinint_inbound_loop2;
40748
40749 /* signal adapter calls */
40750 - atomic_long_t siga_out;
40751 - atomic_long_t siga_in;
40752 - atomic_long_t siga_sync;
40753 + atomic_long_unchecked_t siga_out;
40754 + atomic_long_unchecked_t siga_in;
40755 + atomic_long_unchecked_t siga_sync;
40756
40757 /* misc */
40758 - atomic_long_t inbound_handler;
40759 - atomic_long_t outbound_handler;
40760 - atomic_long_t fast_requeue;
40761 - atomic_long_t outbound_target_full;
40762 + atomic_long_unchecked_t inbound_handler;
40763 + atomic_long_unchecked_t outbound_handler;
40764 + atomic_long_unchecked_t fast_requeue;
40765 + atomic_long_unchecked_t outbound_target_full;
40766
40767 /* for debugging */
40768 - atomic_long_t debug_tl_out_timer;
40769 - atomic_long_t debug_stop_polling;
40770 - atomic_long_t debug_eqbs_all;
40771 - atomic_long_t debug_eqbs_incomplete;
40772 - atomic_long_t debug_sqbs_all;
40773 - atomic_long_t debug_sqbs_incomplete;
40774 + atomic_long_unchecked_t debug_tl_out_timer;
40775 + atomic_long_unchecked_t debug_stop_polling;
40776 + atomic_long_unchecked_t debug_eqbs_all;
40777 + atomic_long_unchecked_t debug_eqbs_incomplete;
40778 + atomic_long_unchecked_t debug_sqbs_all;
40779 + atomic_long_unchecked_t debug_sqbs_incomplete;
40780 };
40781
40782 extern struct qdio_perf_stats perf_stats;
40783 extern int qdio_performance_stats;
40784
40785 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
40786 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
40787 {
40788 if (qdio_performance_stats)
40789 - atomic_long_inc(count);
40790 + atomic_long_inc_unchecked(count);
40791 }
40792
40793 int qdio_setup_perf_stats(void);
40794 diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
40795 index 1ddcf40..a85f062 100644
40796 --- a/drivers/scsi/BusLogic.c
40797 +++ b/drivers/scsi/BusLogic.c
40798 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
40799 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
40800 *PrototypeHostAdapter)
40801 {
40802 + pax_track_stack();
40803 +
40804 /*
40805 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
40806 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
40807 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
40808 index cdbdec9..b7d560b 100644
40809 --- a/drivers/scsi/aacraid/aacraid.h
40810 +++ b/drivers/scsi/aacraid/aacraid.h
40811 @@ -471,7 +471,7 @@ struct adapter_ops
40812 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
40813 /* Administrative operations */
40814 int (*adapter_comm)(struct aac_dev * dev, int comm);
40815 -};
40816 +} __no_const;
40817
40818 /*
40819 * Define which interrupt handler needs to be installed
40820 diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
40821 index a5b8e7b..a6a0e43 100644
40822 --- a/drivers/scsi/aacraid/commctrl.c
40823 +++ b/drivers/scsi/aacraid/commctrl.c
40824 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
40825 u32 actual_fibsize64, actual_fibsize = 0;
40826 int i;
40827
40828 + pax_track_stack();
40829
40830 if (dev->in_reset) {
40831 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
40832 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
40833 index 9b97c3e..f099725 100644
40834 --- a/drivers/scsi/aacraid/linit.c
40835 +++ b/drivers/scsi/aacraid/linit.c
40836 @@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
40837 #elif defined(__devinitconst)
40838 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40839 #else
40840 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
40841 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40842 #endif
40843 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
40844 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
40845 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
40846 index 996f722..9127845 100644
40847 --- a/drivers/scsi/aic94xx/aic94xx_init.c
40848 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
40849 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
40850 flash_error_table[i].reason);
40851 }
40852
40853 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
40854 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
40855 asd_show_update_bios, asd_store_update_bios);
40856
40857 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
40858 @@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
40859 .lldd_control_phy = asd_control_phy,
40860 };
40861
40862 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
40863 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
40864 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
40865 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
40866 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
40867 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
40868 index 58efd4b..cb48dc7 100644
40869 --- a/drivers/scsi/bfa/bfa_ioc.h
40870 +++ b/drivers/scsi/bfa/bfa_ioc.h
40871 @@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
40872 bfa_ioc_disable_cbfn_t disable_cbfn;
40873 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
40874 bfa_ioc_reset_cbfn_t reset_cbfn;
40875 -};
40876 +} __no_const;
40877
40878 /**
40879 * Heartbeat failure notification queue element.
40880 diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
40881 index 7ad177e..5503586 100644
40882 --- a/drivers/scsi/bfa/bfa_iocfc.h
40883 +++ b/drivers/scsi/bfa/bfa_iocfc.h
40884 @@ -61,7 +61,7 @@ struct bfa_hwif_s {
40885 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
40886 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
40887 u32 *nvecs, u32 *maxvec);
40888 -};
40889 +} __no_const;
40890 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
40891
40892 struct bfa_iocfc_s {
40893 diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
40894 index 4967643..cbec06b 100644
40895 --- a/drivers/scsi/dpt_i2o.c
40896 +++ b/drivers/scsi/dpt_i2o.c
40897 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
40898 dma_addr_t addr;
40899 ulong flags = 0;
40900
40901 + pax_track_stack();
40902 +
40903 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
40904 // get user msg size in u32s
40905 if(get_user(size, &user_msg[0])){
40906 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
40907 s32 rcode;
40908 dma_addr_t addr;
40909
40910 + pax_track_stack();
40911 +
40912 memset(msg, 0 , sizeof(msg));
40913 len = scsi_bufflen(cmd);
40914 direction = 0x00000000;
40915 diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
40916 index c7076ce..e20c67c 100644
40917 --- a/drivers/scsi/eata.c
40918 +++ b/drivers/scsi/eata.c
40919 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
40920 struct hostdata *ha;
40921 char name[16];
40922
40923 + pax_track_stack();
40924 +
40925 sprintf(name, "%s%d", driver_name, j);
40926
40927 if (!request_region(port_base, REGION_SIZE, driver_name)) {
40928 diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
40929 index 11ae5c9..891daec 100644
40930 --- a/drivers/scsi/fcoe/libfcoe.c
40931 +++ b/drivers/scsi/fcoe/libfcoe.c
40932 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
40933 size_t rlen;
40934 size_t dlen;
40935
40936 + pax_track_stack();
40937 +
40938 fiph = (struct fip_header *)skb->data;
40939 sub = fiph->fip_subcode;
40940 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
40941 diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
40942 index 71c7bbe..e93088a 100644
40943 --- a/drivers/scsi/fnic/fnic_main.c
40944 +++ b/drivers/scsi/fnic/fnic_main.c
40945 @@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
40946 /* Start local port initiatialization */
40947
40948 lp->link_up = 0;
40949 - lp->tt = fnic_transport_template;
40950 + memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
40951
40952 lp->max_retry_count = fnic->config.flogi_retries;
40953 lp->max_rport_retry_count = fnic->config.plogi_retries;
40954 diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
40955 index bb96d74..9ec3ce4 100644
40956 --- a/drivers/scsi/gdth.c
40957 +++ b/drivers/scsi/gdth.c
40958 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
40959 ulong flags;
40960 gdth_ha_str *ha;
40961
40962 + pax_track_stack();
40963 +
40964 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
40965 return -EFAULT;
40966 ha = gdth_find_ha(ldrv.ionode);
40967 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
40968 gdth_ha_str *ha;
40969 int rval;
40970
40971 + pax_track_stack();
40972 +
40973 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
40974 res.number >= MAX_HDRIVES)
40975 return -EFAULT;
40976 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
40977 gdth_ha_str *ha;
40978 int rval;
40979
40980 + pax_track_stack();
40981 +
40982 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
40983 return -EFAULT;
40984 ha = gdth_find_ha(gen.ionode);
40985 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
40986 int i;
40987 gdth_cmd_str gdtcmd;
40988 char cmnd[MAX_COMMAND_SIZE];
40989 +
40990 + pax_track_stack();
40991 +
40992 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
40993
40994 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
40995 diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
40996 index 1258da3..20d8ae6 100644
40997 --- a/drivers/scsi/gdth_proc.c
40998 +++ b/drivers/scsi/gdth_proc.c
40999 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
41000 ulong64 paddr;
41001
41002 char cmnd[MAX_COMMAND_SIZE];
41003 +
41004 + pax_track_stack();
41005 +
41006 memset(cmnd, 0xff, 12);
41007 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
41008
41009 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
41010 gdth_hget_str *phg;
41011 char cmnd[MAX_COMMAND_SIZE];
41012
41013 + pax_track_stack();
41014 +
41015 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
41016 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
41017 if (!gdtcmd || !estr)
41018 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
41019 index d03a926..f324286 100644
41020 --- a/drivers/scsi/hosts.c
41021 +++ b/drivers/scsi/hosts.c
41022 @@ -40,7 +40,7 @@
41023 #include "scsi_logging.h"
41024
41025
41026 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
41027 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
41028
41029
41030 static void scsi_host_cls_release(struct device *dev)
41031 @@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
41032 * subtract one because we increment first then return, but we need to
41033 * know what the next host number was before increment
41034 */
41035 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
41036 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
41037 shost->dma_channel = 0xff;
41038
41039 /* These three are default values which can be overridden */
41040 diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
41041 index a601159..55e19d2 100644
41042 --- a/drivers/scsi/ipr.c
41043 +++ b/drivers/scsi/ipr.c
41044 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
41045 return true;
41046 }
41047
41048 -static struct ata_port_operations ipr_sata_ops = {
41049 +static const struct ata_port_operations ipr_sata_ops = {
41050 .phy_reset = ipr_ata_phy_reset,
41051 .hardreset = ipr_sata_reset,
41052 .post_internal_cmd = ipr_ata_post_internal,
41053 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
41054 index 4e49fbc..97907ff 100644
41055 --- a/drivers/scsi/ips.h
41056 +++ b/drivers/scsi/ips.h
41057 @@ -1027,7 +1027,7 @@ typedef struct {
41058 int (*intr)(struct ips_ha *);
41059 void (*enableint)(struct ips_ha *);
41060 uint32_t (*statupd)(struct ips_ha *);
41061 -} ips_hw_func_t;
41062 +} __no_const ips_hw_func_t;
41063
41064 typedef struct ips_ha {
41065 uint8_t ha_id[IPS_MAX_CHANNELS+1];
41066 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
41067 index c1c1574..a9c9348 100644
41068 --- a/drivers/scsi/libfc/fc_exch.c
41069 +++ b/drivers/scsi/libfc/fc_exch.c
41070 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
41071 * all together if not used XXX
41072 */
41073 struct {
41074 - atomic_t no_free_exch;
41075 - atomic_t no_free_exch_xid;
41076 - atomic_t xid_not_found;
41077 - atomic_t xid_busy;
41078 - atomic_t seq_not_found;
41079 - atomic_t non_bls_resp;
41080 + atomic_unchecked_t no_free_exch;
41081 + atomic_unchecked_t no_free_exch_xid;
41082 + atomic_unchecked_t xid_not_found;
41083 + atomic_unchecked_t xid_busy;
41084 + atomic_unchecked_t seq_not_found;
41085 + atomic_unchecked_t non_bls_resp;
41086 } stats;
41087 };
41088 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
41089 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
41090 /* allocate memory for exchange */
41091 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
41092 if (!ep) {
41093 - atomic_inc(&mp->stats.no_free_exch);
41094 + atomic_inc_unchecked(&mp->stats.no_free_exch);
41095 goto out;
41096 }
41097 memset(ep, 0, sizeof(*ep));
41098 @@ -557,7 +557,7 @@ out:
41099 return ep;
41100 err:
41101 spin_unlock_bh(&pool->lock);
41102 - atomic_inc(&mp->stats.no_free_exch_xid);
41103 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
41104 mempool_free(ep, mp->ep_pool);
41105 return NULL;
41106 }
41107 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41108 xid = ntohs(fh->fh_ox_id); /* we originated exch */
41109 ep = fc_exch_find(mp, xid);
41110 if (!ep) {
41111 - atomic_inc(&mp->stats.xid_not_found);
41112 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41113 reject = FC_RJT_OX_ID;
41114 goto out;
41115 }
41116 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41117 ep = fc_exch_find(mp, xid);
41118 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
41119 if (ep) {
41120 - atomic_inc(&mp->stats.xid_busy);
41121 + atomic_inc_unchecked(&mp->stats.xid_busy);
41122 reject = FC_RJT_RX_ID;
41123 goto rel;
41124 }
41125 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41126 }
41127 xid = ep->xid; /* get our XID */
41128 } else if (!ep) {
41129 - atomic_inc(&mp->stats.xid_not_found);
41130 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41131 reject = FC_RJT_RX_ID; /* XID not found */
41132 goto out;
41133 }
41134 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41135 } else {
41136 sp = &ep->seq;
41137 if (sp->id != fh->fh_seq_id) {
41138 - atomic_inc(&mp->stats.seq_not_found);
41139 + atomic_inc_unchecked(&mp->stats.seq_not_found);
41140 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
41141 goto rel;
41142 }
41143 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41144
41145 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
41146 if (!ep) {
41147 - atomic_inc(&mp->stats.xid_not_found);
41148 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41149 goto out;
41150 }
41151 if (ep->esb_stat & ESB_ST_COMPLETE) {
41152 - atomic_inc(&mp->stats.xid_not_found);
41153 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41154 goto out;
41155 }
41156 if (ep->rxid == FC_XID_UNKNOWN)
41157 ep->rxid = ntohs(fh->fh_rx_id);
41158 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
41159 - atomic_inc(&mp->stats.xid_not_found);
41160 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41161 goto rel;
41162 }
41163 if (ep->did != ntoh24(fh->fh_s_id) &&
41164 ep->did != FC_FID_FLOGI) {
41165 - atomic_inc(&mp->stats.xid_not_found);
41166 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41167 goto rel;
41168 }
41169 sof = fr_sof(fp);
41170 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41171 } else {
41172 sp = &ep->seq;
41173 if (sp->id != fh->fh_seq_id) {
41174 - atomic_inc(&mp->stats.seq_not_found);
41175 + atomic_inc_unchecked(&mp->stats.seq_not_found);
41176 goto rel;
41177 }
41178 }
41179 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41180 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
41181
41182 if (!sp)
41183 - atomic_inc(&mp->stats.xid_not_found);
41184 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41185 else
41186 - atomic_inc(&mp->stats.non_bls_resp);
41187 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
41188
41189 fc_frame_free(fp);
41190 }
41191 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
41192 index 0ee989f..a582241 100644
41193 --- a/drivers/scsi/libsas/sas_ata.c
41194 +++ b/drivers/scsi/libsas/sas_ata.c
41195 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
41196 }
41197 }
41198
41199 -static struct ata_port_operations sas_sata_ops = {
41200 +static const struct ata_port_operations sas_sata_ops = {
41201 .phy_reset = sas_ata_phy_reset,
41202 .post_internal_cmd = sas_ata_post_internal,
41203 .qc_defer = ata_std_qc_defer,
41204 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
41205 index aa10f79..5cc79e4 100644
41206 --- a/drivers/scsi/lpfc/lpfc.h
41207 +++ b/drivers/scsi/lpfc/lpfc.h
41208 @@ -400,7 +400,7 @@ struct lpfc_vport {
41209 struct dentry *debug_nodelist;
41210 struct dentry *vport_debugfs_root;
41211 struct lpfc_debugfs_trc *disc_trc;
41212 - atomic_t disc_trc_cnt;
41213 + atomic_unchecked_t disc_trc_cnt;
41214 #endif
41215 uint8_t stat_data_enabled;
41216 uint8_t stat_data_blocked;
41217 @@ -725,8 +725,8 @@ struct lpfc_hba {
41218 struct timer_list fabric_block_timer;
41219 unsigned long bit_flags;
41220 #define FABRIC_COMANDS_BLOCKED 0
41221 - atomic_t num_rsrc_err;
41222 - atomic_t num_cmd_success;
41223 + atomic_unchecked_t num_rsrc_err;
41224 + atomic_unchecked_t num_cmd_success;
41225 unsigned long last_rsrc_error_time;
41226 unsigned long last_ramp_down_time;
41227 unsigned long last_ramp_up_time;
41228 @@ -740,7 +740,7 @@ struct lpfc_hba {
41229 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
41230 struct dentry *debug_slow_ring_trc;
41231 struct lpfc_debugfs_trc *slow_ring_trc;
41232 - atomic_t slow_ring_trc_cnt;
41233 + atomic_unchecked_t slow_ring_trc_cnt;
41234 #endif
41235
41236 /* Used for deferred freeing of ELS data buffers */
41237 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
41238 index 8d0f0de..7c77a62 100644
41239 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
41240 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
41241 @@ -124,7 +124,7 @@ struct lpfc_debug {
41242 int len;
41243 };
41244
41245 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41246 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41247 static unsigned long lpfc_debugfs_start_time = 0L;
41248
41249 /**
41250 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
41251 lpfc_debugfs_enable = 0;
41252
41253 len = 0;
41254 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
41255 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
41256 (lpfc_debugfs_max_disc_trc - 1);
41257 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
41258 dtp = vport->disc_trc + i;
41259 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
41260 lpfc_debugfs_enable = 0;
41261
41262 len = 0;
41263 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
41264 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
41265 (lpfc_debugfs_max_slow_ring_trc - 1);
41266 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
41267 dtp = phba->slow_ring_trc + i;
41268 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
41269 uint32_t *ptr;
41270 char buffer[1024];
41271
41272 + pax_track_stack();
41273 +
41274 off = 0;
41275 spin_lock_irq(&phba->hbalock);
41276
41277 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
41278 !vport || !vport->disc_trc)
41279 return;
41280
41281 - index = atomic_inc_return(&vport->disc_trc_cnt) &
41282 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
41283 (lpfc_debugfs_max_disc_trc - 1);
41284 dtp = vport->disc_trc + index;
41285 dtp->fmt = fmt;
41286 dtp->data1 = data1;
41287 dtp->data2 = data2;
41288 dtp->data3 = data3;
41289 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41290 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41291 dtp->jif = jiffies;
41292 #endif
41293 return;
41294 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
41295 !phba || !phba->slow_ring_trc)
41296 return;
41297
41298 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
41299 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
41300 (lpfc_debugfs_max_slow_ring_trc - 1);
41301 dtp = phba->slow_ring_trc + index;
41302 dtp->fmt = fmt;
41303 dtp->data1 = data1;
41304 dtp->data2 = data2;
41305 dtp->data3 = data3;
41306 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41307 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41308 dtp->jif = jiffies;
41309 #endif
41310 return;
41311 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41312 "slow_ring buffer\n");
41313 goto debug_failed;
41314 }
41315 - atomic_set(&phba->slow_ring_trc_cnt, 0);
41316 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
41317 memset(phba->slow_ring_trc, 0,
41318 (sizeof(struct lpfc_debugfs_trc) *
41319 lpfc_debugfs_max_slow_ring_trc));
41320 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41321 "buffer\n");
41322 goto debug_failed;
41323 }
41324 - atomic_set(&vport->disc_trc_cnt, 0);
41325 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
41326
41327 snprintf(name, sizeof(name), "discovery_trace");
41328 vport->debug_disc_trc =
41329 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
41330 index 549bc7d..8189dbb 100644
41331 --- a/drivers/scsi/lpfc/lpfc_init.c
41332 +++ b/drivers/scsi/lpfc/lpfc_init.c
41333 @@ -8021,8 +8021,10 @@ lpfc_init(void)
41334 printk(LPFC_COPYRIGHT "\n");
41335
41336 if (lpfc_enable_npiv) {
41337 - lpfc_transport_functions.vport_create = lpfc_vport_create;
41338 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41339 + pax_open_kernel();
41340 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
41341 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41342 + pax_close_kernel();
41343 }
41344 lpfc_transport_template =
41345 fc_attach_transport(&lpfc_transport_functions);
41346 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
41347 index c88f59f..ff2a42f 100644
41348 --- a/drivers/scsi/lpfc/lpfc_scsi.c
41349 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
41350 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
41351 uint32_t evt_posted;
41352
41353 spin_lock_irqsave(&phba->hbalock, flags);
41354 - atomic_inc(&phba->num_rsrc_err);
41355 + atomic_inc_unchecked(&phba->num_rsrc_err);
41356 phba->last_rsrc_error_time = jiffies;
41357
41358 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
41359 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
41360 unsigned long flags;
41361 struct lpfc_hba *phba = vport->phba;
41362 uint32_t evt_posted;
41363 - atomic_inc(&phba->num_cmd_success);
41364 + atomic_inc_unchecked(&phba->num_cmd_success);
41365
41366 if (vport->cfg_lun_queue_depth <= queue_depth)
41367 return;
41368 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41369 int i;
41370 struct lpfc_rport_data *rdata;
41371
41372 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
41373 - num_cmd_success = atomic_read(&phba->num_cmd_success);
41374 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
41375 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
41376
41377 vports = lpfc_create_vport_work_array(phba);
41378 if (vports != NULL)
41379 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41380 }
41381 }
41382 lpfc_destroy_vport_work_array(phba, vports);
41383 - atomic_set(&phba->num_rsrc_err, 0);
41384 - atomic_set(&phba->num_cmd_success, 0);
41385 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
41386 + atomic_set_unchecked(&phba->num_cmd_success, 0);
41387 }
41388
41389 /**
41390 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
41391 }
41392 }
41393 lpfc_destroy_vport_work_array(phba, vports);
41394 - atomic_set(&phba->num_rsrc_err, 0);
41395 - atomic_set(&phba->num_cmd_success, 0);
41396 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
41397 + atomic_set_unchecked(&phba->num_cmd_success, 0);
41398 }
41399
41400 /**
41401 diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
41402 index 234f0b7..3020aea 100644
41403 --- a/drivers/scsi/megaraid/megaraid_mbox.c
41404 +++ b/drivers/scsi/megaraid/megaraid_mbox.c
41405 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
41406 int rval;
41407 int i;
41408
41409 + pax_track_stack();
41410 +
41411 // Allocate memory for the base list of scb for management module.
41412 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
41413
41414 diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
41415 index 7a117c1..ee01e9e 100644
41416 --- a/drivers/scsi/osd/osd_initiator.c
41417 +++ b/drivers/scsi/osd/osd_initiator.c
41418 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
41419 int nelem = ARRAY_SIZE(get_attrs), a = 0;
41420 int ret;
41421
41422 + pax_track_stack();
41423 +
41424 or = osd_start_request(od, GFP_KERNEL);
41425 if (!or)
41426 return -ENOMEM;
41427 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
41428 index 9ab8c86..9425ad3 100644
41429 --- a/drivers/scsi/pmcraid.c
41430 +++ b/drivers/scsi/pmcraid.c
41431 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
41432 res->scsi_dev = scsi_dev;
41433 scsi_dev->hostdata = res;
41434 res->change_detected = 0;
41435 - atomic_set(&res->read_failures, 0);
41436 - atomic_set(&res->write_failures, 0);
41437 + atomic_set_unchecked(&res->read_failures, 0);
41438 + atomic_set_unchecked(&res->write_failures, 0);
41439 rc = 0;
41440 }
41441 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
41442 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
41443
41444 /* If this was a SCSI read/write command keep count of errors */
41445 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
41446 - atomic_inc(&res->read_failures);
41447 + atomic_inc_unchecked(&res->read_failures);
41448 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
41449 - atomic_inc(&res->write_failures);
41450 + atomic_inc_unchecked(&res->write_failures);
41451
41452 if (!RES_IS_GSCSI(res->cfg_entry) &&
41453 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
41454 @@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
41455
41456 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
41457 /* add resources only after host is added into system */
41458 - if (!atomic_read(&pinstance->expose_resources))
41459 + if (!atomic_read_unchecked(&pinstance->expose_resources))
41460 return;
41461
41462 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
41463 @@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
41464 init_waitqueue_head(&pinstance->reset_wait_q);
41465
41466 atomic_set(&pinstance->outstanding_cmds, 0);
41467 - atomic_set(&pinstance->expose_resources, 0);
41468 + atomic_set_unchecked(&pinstance->expose_resources, 0);
41469
41470 INIT_LIST_HEAD(&pinstance->free_res_q);
41471 INIT_LIST_HEAD(&pinstance->used_res_q);
41472 @@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
41473 /* Schedule worker thread to handle CCN and take care of adding and
41474 * removing devices to OS
41475 */
41476 - atomic_set(&pinstance->expose_resources, 1);
41477 + atomic_set_unchecked(&pinstance->expose_resources, 1);
41478 schedule_work(&pinstance->worker_q);
41479 return rc;
41480
41481 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
41482 index 3441b3f..6cbe8f7 100644
41483 --- a/drivers/scsi/pmcraid.h
41484 +++ b/drivers/scsi/pmcraid.h
41485 @@ -690,7 +690,7 @@ struct pmcraid_instance {
41486 atomic_t outstanding_cmds;
41487
41488 /* should add/delete resources to mid-layer now ?*/
41489 - atomic_t expose_resources;
41490 + atomic_unchecked_t expose_resources;
41491
41492 /* Tasklet to handle deferred processing */
41493 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
41494 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
41495 struct list_head queue; /* link to "to be exposed" resources */
41496 struct pmcraid_config_table_entry cfg_entry;
41497 struct scsi_device *scsi_dev; /* Link scsi_device structure */
41498 - atomic_t read_failures; /* count of failed READ commands */
41499 - atomic_t write_failures; /* count of failed WRITE commands */
41500 + atomic_unchecked_t read_failures; /* count of failed READ commands */
41501 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
41502
41503 /* To indicate add/delete/modify during CCN */
41504 u8 change_detected;
41505 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
41506 index 2150618..7034215 100644
41507 --- a/drivers/scsi/qla2xxx/qla_def.h
41508 +++ b/drivers/scsi/qla2xxx/qla_def.h
41509 @@ -2089,7 +2089,7 @@ struct isp_operations {
41510
41511 int (*get_flash_version) (struct scsi_qla_host *, void *);
41512 int (*start_scsi) (srb_t *);
41513 -};
41514 +} __no_const;
41515
41516 /* MSI-X Support *************************************************************/
41517
41518 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
41519 index 81b5f29..2ae1fad 100644
41520 --- a/drivers/scsi/qla4xxx/ql4_def.h
41521 +++ b/drivers/scsi/qla4xxx/ql4_def.h
41522 @@ -240,7 +240,7 @@ struct ddb_entry {
41523 atomic_t retry_relogin_timer; /* Min Time between relogins
41524 * (4000 only) */
41525 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
41526 - atomic_t relogin_retry_count; /* Num of times relogin has been
41527 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
41528 * retried */
41529
41530 uint16_t port;
41531 diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
41532 index af8c323..515dd51 100644
41533 --- a/drivers/scsi/qla4xxx/ql4_init.c
41534 +++ b/drivers/scsi/qla4xxx/ql4_init.c
41535 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
41536 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
41537 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
41538 atomic_set(&ddb_entry->relogin_timer, 0);
41539 - atomic_set(&ddb_entry->relogin_retry_count, 0);
41540 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41541 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41542 list_add_tail(&ddb_entry->list, &ha->ddb_list);
41543 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
41544 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
41545 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41546 atomic_set(&ddb_entry->port_down_timer,
41547 ha->port_down_retry_count);
41548 - atomic_set(&ddb_entry->relogin_retry_count, 0);
41549 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41550 atomic_set(&ddb_entry->relogin_timer, 0);
41551 clear_bit(DF_RELOGIN, &ddb_entry->flags);
41552 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
41553 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
41554 index 83c8b5e..a82b348 100644
41555 --- a/drivers/scsi/qla4xxx/ql4_os.c
41556 +++ b/drivers/scsi/qla4xxx/ql4_os.c
41557 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
41558 ddb_entry->fw_ddb_device_state ==
41559 DDB_DS_SESSION_FAILED) {
41560 /* Reset retry relogin timer */
41561 - atomic_inc(&ddb_entry->relogin_retry_count);
41562 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
41563 DEBUG2(printk("scsi%ld: index[%d] relogin"
41564 " timed out-retrying"
41565 " relogin (%d)\n",
41566 ha->host_no,
41567 ddb_entry->fw_ddb_index,
41568 - atomic_read(&ddb_entry->
41569 + atomic_read_unchecked(&ddb_entry->
41570 relogin_retry_count))
41571 );
41572 start_dpc++;
41573 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
41574 index dd098ca..686ce01 100644
41575 --- a/drivers/scsi/scsi.c
41576 +++ b/drivers/scsi/scsi.c
41577 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
41578 unsigned long timeout;
41579 int rtn = 0;
41580
41581 - atomic_inc(&cmd->device->iorequest_cnt);
41582 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41583
41584 /* check if the device is still usable */
41585 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
41586 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
41587 index bc3e363..e1a8e50 100644
41588 --- a/drivers/scsi/scsi_debug.c
41589 +++ b/drivers/scsi/scsi_debug.c
41590 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
41591 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
41592 unsigned char *cmd = (unsigned char *)scp->cmnd;
41593
41594 + pax_track_stack();
41595 +
41596 if ((errsts = check_readiness(scp, 1, devip)))
41597 return errsts;
41598 memset(arr, 0, sizeof(arr));
41599 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
41600 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
41601 unsigned char *cmd = (unsigned char *)scp->cmnd;
41602
41603 + pax_track_stack();
41604 +
41605 if ((errsts = check_readiness(scp, 1, devip)))
41606 return errsts;
41607 memset(arr, 0, sizeof(arr));
41608 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
41609 index 8df12522..c4c1472 100644
41610 --- a/drivers/scsi/scsi_lib.c
41611 +++ b/drivers/scsi/scsi_lib.c
41612 @@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
41613 shost = sdev->host;
41614 scsi_init_cmd_errh(cmd);
41615 cmd->result = DID_NO_CONNECT << 16;
41616 - atomic_inc(&cmd->device->iorequest_cnt);
41617 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41618
41619 /*
41620 * SCSI request completion path will do scsi_device_unbusy(),
41621 @@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq)
41622 */
41623 cmd->serial_number = 0;
41624
41625 - atomic_inc(&cmd->device->iodone_cnt);
41626 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
41627 if (cmd->result)
41628 - atomic_inc(&cmd->device->ioerr_cnt);
41629 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
41630
41631 disposition = scsi_decide_disposition(cmd);
41632 if (disposition != SUCCESS &&
41633 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
41634 index 91a93e0..eae0fe3 100644
41635 --- a/drivers/scsi/scsi_sysfs.c
41636 +++ b/drivers/scsi/scsi_sysfs.c
41637 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
41638 char *buf) \
41639 { \
41640 struct scsi_device *sdev = to_scsi_device(dev); \
41641 - unsigned long long count = atomic_read(&sdev->field); \
41642 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
41643 return snprintf(buf, 20, "0x%llx\n", count); \
41644 } \
41645 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
41646 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
41647 index 1030327..f91fd30 100644
41648 --- a/drivers/scsi/scsi_tgt_lib.c
41649 +++ b/drivers/scsi/scsi_tgt_lib.c
41650 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
41651 int err;
41652
41653 dprintk("%lx %u\n", uaddr, len);
41654 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
41655 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
41656 if (err) {
41657 /*
41658 * TODO: need to fixup sg_tablesize, max_segment_size,
41659 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
41660 index db02e31..1b42ea9 100644
41661 --- a/drivers/scsi/scsi_transport_fc.c
41662 +++ b/drivers/scsi/scsi_transport_fc.c
41663 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
41664 * Netlink Infrastructure
41665 */
41666
41667 -static atomic_t fc_event_seq;
41668 +static atomic_unchecked_t fc_event_seq;
41669
41670 /**
41671 * fc_get_event_number - Obtain the next sequential FC event number
41672 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
41673 u32
41674 fc_get_event_number(void)
41675 {
41676 - return atomic_add_return(1, &fc_event_seq);
41677 + return atomic_add_return_unchecked(1, &fc_event_seq);
41678 }
41679 EXPORT_SYMBOL(fc_get_event_number);
41680
41681 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
41682 {
41683 int error;
41684
41685 - atomic_set(&fc_event_seq, 0);
41686 + atomic_set_unchecked(&fc_event_seq, 0);
41687
41688 error = transport_class_register(&fc_host_class);
41689 if (error)
41690 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
41691 index de2f8c4..63c5278 100644
41692 --- a/drivers/scsi/scsi_transport_iscsi.c
41693 +++ b/drivers/scsi/scsi_transport_iscsi.c
41694 @@ -81,7 +81,7 @@ struct iscsi_internal {
41695 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
41696 };
41697
41698 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
41699 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
41700 static struct workqueue_struct *iscsi_eh_timer_workq;
41701
41702 /*
41703 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
41704 int err;
41705
41706 ihost = shost->shost_data;
41707 - session->sid = atomic_add_return(1, &iscsi_session_nr);
41708 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
41709
41710 if (id == ISCSI_MAX_TARGET) {
41711 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
41712 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
41713 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
41714 ISCSI_TRANSPORT_VERSION);
41715
41716 - atomic_set(&iscsi_session_nr, 0);
41717 + atomic_set_unchecked(&iscsi_session_nr, 0);
41718
41719 err = class_register(&iscsi_transport_class);
41720 if (err)
41721 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
41722 index 21a045e..ec89e03 100644
41723 --- a/drivers/scsi/scsi_transport_srp.c
41724 +++ b/drivers/scsi/scsi_transport_srp.c
41725 @@ -33,7 +33,7 @@
41726 #include "scsi_transport_srp_internal.h"
41727
41728 struct srp_host_attrs {
41729 - atomic_t next_port_id;
41730 + atomic_unchecked_t next_port_id;
41731 };
41732 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
41733
41734 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
41735 struct Scsi_Host *shost = dev_to_shost(dev);
41736 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
41737
41738 - atomic_set(&srp_host->next_port_id, 0);
41739 + atomic_set_unchecked(&srp_host->next_port_id, 0);
41740 return 0;
41741 }
41742
41743 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
41744 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
41745 rport->roles = ids->roles;
41746
41747 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
41748 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
41749 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
41750
41751 transport_setup_device(&rport->dev);
41752 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
41753 index 040f751..98a5ed2 100644
41754 --- a/drivers/scsi/sg.c
41755 +++ b/drivers/scsi/sg.c
41756 @@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
41757 sdp->disk->disk_name,
41758 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
41759 NULL,
41760 - (char *)arg);
41761 + (char __user *)arg);
41762 case BLKTRACESTART:
41763 return blk_trace_startstop(sdp->device->request_queue, 1);
41764 case BLKTRACESTOP:
41765 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
41766 const struct file_operations * fops;
41767 };
41768
41769 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
41770 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
41771 {"allow_dio", &adio_fops},
41772 {"debug", &debug_fops},
41773 {"def_reserved_size", &dressz_fops},
41774 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
41775 {
41776 int k, mask;
41777 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
41778 - struct sg_proc_leaf * leaf;
41779 + const struct sg_proc_leaf * leaf;
41780
41781 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
41782 if (!sg_proc_sgp)
41783 diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
41784 index c19ca5e..3eb5959 100644
41785 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c
41786 +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
41787 @@ -1758,6 +1758,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
41788 int do_iounmap = 0;
41789 int do_disable_device = 1;
41790
41791 + pax_track_stack();
41792 +
41793 memset(&sym_dev, 0, sizeof(sym_dev));
41794 memset(&nvram, 0, sizeof(nvram));
41795 sym_dev.pdev = pdev;
41796 diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
41797 index eadc1ab..2d81457 100644
41798 --- a/drivers/serial/kgdboc.c
41799 +++ b/drivers/serial/kgdboc.c
41800 @@ -18,7 +18,7 @@
41801
41802 #define MAX_CONFIG_LEN 40
41803
41804 -static struct kgdb_io kgdboc_io_ops;
41805 +static const struct kgdb_io kgdboc_io_ops;
41806
41807 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
41808 static int configured = -1;
41809 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
41810 module_put(THIS_MODULE);
41811 }
41812
41813 -static struct kgdb_io kgdboc_io_ops = {
41814 +static const struct kgdb_io kgdboc_io_ops = {
41815 .name = "kgdboc",
41816 .read_char = kgdboc_get_char,
41817 .write_char = kgdboc_put_char,
41818 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
41819 index b76f246..7f41af7 100644
41820 --- a/drivers/spi/spi.c
41821 +++ b/drivers/spi/spi.c
41822 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
41823 EXPORT_SYMBOL_GPL(spi_sync);
41824
41825 /* portable code must never pass more than 32 bytes */
41826 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
41827 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
41828
41829 static u8 *buf;
41830
41831 diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
41832 index b9b37ff..19dfa23 100644
41833 --- a/drivers/staging/android/binder.c
41834 +++ b/drivers/staging/android/binder.c
41835 @@ -2761,7 +2761,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
41836 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
41837 }
41838
41839 -static struct vm_operations_struct binder_vm_ops = {
41840 +static const struct vm_operations_struct binder_vm_ops = {
41841 .open = binder_vma_open,
41842 .close = binder_vma_close,
41843 };
41844 diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
41845 index cda26bb..39fed3f 100644
41846 --- a/drivers/staging/b3dfg/b3dfg.c
41847 +++ b/drivers/staging/b3dfg/b3dfg.c
41848 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
41849 return VM_FAULT_NOPAGE;
41850 }
41851
41852 -static struct vm_operations_struct b3dfg_vm_ops = {
41853 +static const struct vm_operations_struct b3dfg_vm_ops = {
41854 .fault = b3dfg_vma_fault,
41855 };
41856
41857 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
41858 return r;
41859 }
41860
41861 -static struct file_operations b3dfg_fops = {
41862 +static const struct file_operations b3dfg_fops = {
41863 .owner = THIS_MODULE,
41864 .open = b3dfg_open,
41865 .release = b3dfg_release,
41866 diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
41867 index 908f25a..c9a579b 100644
41868 --- a/drivers/staging/comedi/comedi_fops.c
41869 +++ b/drivers/staging/comedi/comedi_fops.c
41870 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
41871 mutex_unlock(&dev->mutex);
41872 }
41873
41874 -static struct vm_operations_struct comedi_vm_ops = {
41875 +static const struct vm_operations_struct comedi_vm_ops = {
41876 .close = comedi_unmap,
41877 };
41878
41879 diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
41880 index e55a0db..577b776 100644
41881 --- a/drivers/staging/dream/qdsp5/adsp_driver.c
41882 +++ b/drivers/staging/dream/qdsp5/adsp_driver.c
41883 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
41884 static dev_t adsp_devno;
41885 static struct class *adsp_class;
41886
41887 -static struct file_operations adsp_fops = {
41888 +static const struct file_operations adsp_fops = {
41889 .owner = THIS_MODULE,
41890 .open = adsp_open,
41891 .unlocked_ioctl = adsp_ioctl,
41892 diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
41893 index ad2390f..4116ee8 100644
41894 --- a/drivers/staging/dream/qdsp5/audio_aac.c
41895 +++ b/drivers/staging/dream/qdsp5/audio_aac.c
41896 @@ -1022,7 +1022,7 @@ done:
41897 return rc;
41898 }
41899
41900 -static struct file_operations audio_aac_fops = {
41901 +static const struct file_operations audio_aac_fops = {
41902 .owner = THIS_MODULE,
41903 .open = audio_open,
41904 .release = audio_release,
41905 diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
41906 index cd818a5..870b37b 100644
41907 --- a/drivers/staging/dream/qdsp5/audio_amrnb.c
41908 +++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
41909 @@ -833,7 +833,7 @@ done:
41910 return rc;
41911 }
41912
41913 -static struct file_operations audio_amrnb_fops = {
41914 +static const struct file_operations audio_amrnb_fops = {
41915 .owner = THIS_MODULE,
41916 .open = audamrnb_open,
41917 .release = audamrnb_release,
41918 diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
41919 index 4b43e18..cedafda 100644
41920 --- a/drivers/staging/dream/qdsp5/audio_evrc.c
41921 +++ b/drivers/staging/dream/qdsp5/audio_evrc.c
41922 @@ -805,7 +805,7 @@ dma_fail:
41923 return rc;
41924 }
41925
41926 -static struct file_operations audio_evrc_fops = {
41927 +static const struct file_operations audio_evrc_fops = {
41928 .owner = THIS_MODULE,
41929 .open = audevrc_open,
41930 .release = audevrc_release,
41931 diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
41932 index 3d950a2..9431118 100644
41933 --- a/drivers/staging/dream/qdsp5/audio_in.c
41934 +++ b/drivers/staging/dream/qdsp5/audio_in.c
41935 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
41936 return 0;
41937 }
41938
41939 -static struct file_operations audio_fops = {
41940 +static const struct file_operations audio_fops = {
41941 .owner = THIS_MODULE,
41942 .open = audio_in_open,
41943 .release = audio_in_release,
41944 @@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
41945 .unlocked_ioctl = audio_in_ioctl,
41946 };
41947
41948 -static struct file_operations audpre_fops = {
41949 +static const struct file_operations audpre_fops = {
41950 .owner = THIS_MODULE,
41951 .open = audpre_open,
41952 .unlocked_ioctl = audpre_ioctl,
41953 diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
41954 index b95574f..286c2f4 100644
41955 --- a/drivers/staging/dream/qdsp5/audio_mp3.c
41956 +++ b/drivers/staging/dream/qdsp5/audio_mp3.c
41957 @@ -941,7 +941,7 @@ done:
41958 return rc;
41959 }
41960
41961 -static struct file_operations audio_mp3_fops = {
41962 +static const struct file_operations audio_mp3_fops = {
41963 .owner = THIS_MODULE,
41964 .open = audio_open,
41965 .release = audio_release,
41966 diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
41967 index d1adcf6..f8f9833 100644
41968 --- a/drivers/staging/dream/qdsp5/audio_out.c
41969 +++ b/drivers/staging/dream/qdsp5/audio_out.c
41970 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
41971 return 0;
41972 }
41973
41974 -static struct file_operations audio_fops = {
41975 +static const struct file_operations audio_fops = {
41976 .owner = THIS_MODULE,
41977 .open = audio_open,
41978 .release = audio_release,
41979 @@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
41980 .unlocked_ioctl = audio_ioctl,
41981 };
41982
41983 -static struct file_operations audpp_fops = {
41984 +static const struct file_operations audpp_fops = {
41985 .owner = THIS_MODULE,
41986 .open = audpp_open,
41987 .unlocked_ioctl = audpp_ioctl,
41988 diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
41989 index f0f50e3..f6b9dbc 100644
41990 --- a/drivers/staging/dream/qdsp5/audio_qcelp.c
41991 +++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
41992 @@ -816,7 +816,7 @@ err:
41993 return rc;
41994 }
41995
41996 -static struct file_operations audio_qcelp_fops = {
41997 +static const struct file_operations audio_qcelp_fops = {
41998 .owner = THIS_MODULE,
41999 .open = audqcelp_open,
42000 .release = audqcelp_release,
42001 diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
42002 index 037d7ff..5469ec3 100644
42003 --- a/drivers/staging/dream/qdsp5/snd.c
42004 +++ b/drivers/staging/dream/qdsp5/snd.c
42005 @@ -242,7 +242,7 @@ err:
42006 return rc;
42007 }
42008
42009 -static struct file_operations snd_fops = {
42010 +static const struct file_operations snd_fops = {
42011 .owner = THIS_MODULE,
42012 .open = snd_open,
42013 .release = snd_release,
42014 diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
42015 index d4e7d88..0ea632a 100644
42016 --- a/drivers/staging/dream/smd/smd_qmi.c
42017 +++ b/drivers/staging/dream/smd/smd_qmi.c
42018 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
42019 return 0;
42020 }
42021
42022 -static struct file_operations qmi_fops = {
42023 +static const struct file_operations qmi_fops = {
42024 .owner = THIS_MODULE,
42025 .read = qmi_read,
42026 .write = qmi_write,
42027 diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
42028 index cd3910b..ff053d3 100644
42029 --- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
42030 +++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
42031 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
42032 return rc;
42033 }
42034
42035 -static struct file_operations rpcrouter_server_fops = {
42036 +static const struct file_operations rpcrouter_server_fops = {
42037 .owner = THIS_MODULE,
42038 .open = rpcrouter_open,
42039 .release = rpcrouter_release,
42040 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
42041 .unlocked_ioctl = rpcrouter_ioctl,
42042 };
42043
42044 -static struct file_operations rpcrouter_router_fops = {
42045 +static const struct file_operations rpcrouter_router_fops = {
42046 .owner = THIS_MODULE,
42047 .open = rpcrouter_open,
42048 .release = rpcrouter_release,
42049 diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
42050 index c24e4e0..07665be 100644
42051 --- a/drivers/staging/dst/dcore.c
42052 +++ b/drivers/staging/dst/dcore.c
42053 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
42054 return 0;
42055 }
42056
42057 -static struct block_device_operations dst_blk_ops = {
42058 +static const struct block_device_operations dst_blk_ops = {
42059 .open = dst_bdev_open,
42060 .release = dst_bdev_release,
42061 .owner = THIS_MODULE,
42062 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
42063 n->size = ctl->size;
42064
42065 atomic_set(&n->refcnt, 1);
42066 - atomic_long_set(&n->gen, 0);
42067 + atomic_long_set_unchecked(&n->gen, 0);
42068 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
42069
42070 err = dst_node_sysfs_init(n);
42071 diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
42072 index 557d372..8d84422 100644
42073 --- a/drivers/staging/dst/trans.c
42074 +++ b/drivers/staging/dst/trans.c
42075 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
42076 t->error = 0;
42077 t->retries = 0;
42078 atomic_set(&t->refcnt, 1);
42079 - t->gen = atomic_long_inc_return(&n->gen);
42080 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
42081
42082 t->enc = bio_data_dir(bio);
42083 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
42084 diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
42085 index 94f7752..d051514 100644
42086 --- a/drivers/staging/et131x/et1310_tx.c
42087 +++ b/drivers/staging/et131x/et1310_tx.c
42088 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
42089 struct net_device_stats *stats = &etdev->net_stats;
42090
42091 if (pMpTcb->Flags & fMP_DEST_BROAD)
42092 - atomic_inc(&etdev->Stats.brdcstxmt);
42093 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
42094 else if (pMpTcb->Flags & fMP_DEST_MULTI)
42095 - atomic_inc(&etdev->Stats.multixmt);
42096 + atomic_inc_unchecked(&etdev->Stats.multixmt);
42097 else
42098 - atomic_inc(&etdev->Stats.unixmt);
42099 + atomic_inc_unchecked(&etdev->Stats.unixmt);
42100
42101 if (pMpTcb->Packet) {
42102 stats->tx_bytes += pMpTcb->Packet->len;
42103 diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
42104 index 1dfe06f..f469b4d 100644
42105 --- a/drivers/staging/et131x/et131x_adapter.h
42106 +++ b/drivers/staging/et131x/et131x_adapter.h
42107 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
42108 * operations
42109 */
42110 u32 unircv; /* # multicast packets received */
42111 - atomic_t unixmt; /* # multicast packets for Tx */
42112 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
42113 u32 multircv; /* # multicast packets received */
42114 - atomic_t multixmt; /* # multicast packets for Tx */
42115 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
42116 u32 brdcstrcv; /* # broadcast packets received */
42117 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
42118 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
42119 u32 norcvbuf; /* # Rx packets discarded */
42120 u32 noxmtbuf; /* # Tx packets discarded */
42121
42122 diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
42123 index 4bd353a..e28f455 100644
42124 --- a/drivers/staging/go7007/go7007-v4l2.c
42125 +++ b/drivers/staging/go7007/go7007-v4l2.c
42126 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42127 return 0;
42128 }
42129
42130 -static struct vm_operations_struct go7007_vm_ops = {
42131 +static const struct vm_operations_struct go7007_vm_ops = {
42132 .open = go7007_vm_open,
42133 .close = go7007_vm_close,
42134 .fault = go7007_vm_fault,
42135 diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
42136 index 366dc95..b974d87 100644
42137 --- a/drivers/staging/hv/Channel.c
42138 +++ b/drivers/staging/hv/Channel.c
42139 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
42140
42141 DPRINT_ENTER(VMBUS);
42142
42143 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
42144 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
42145 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
42146 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
42147
42148 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
42149 ASSERT(msgInfo != NULL);
42150 diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
42151 index b12237f..01ae28a 100644
42152 --- a/drivers/staging/hv/Hv.c
42153 +++ b/drivers/staging/hv/Hv.c
42154 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
42155 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
42156 u32 outputAddressHi = outputAddress >> 32;
42157 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
42158 - volatile void *hypercallPage = gHvContext.HypercallPage;
42159 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
42160
42161 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
42162 Control, Input, Output);
42163 diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
42164 index d089bb1..2ebc158 100644
42165 --- a/drivers/staging/hv/VmbusApi.h
42166 +++ b/drivers/staging/hv/VmbusApi.h
42167 @@ -109,7 +109,7 @@ struct vmbus_channel_interface {
42168 u32 *GpadlHandle);
42169 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
42170 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
42171 -};
42172 +} __no_const;
42173
42174 /* Base driver object */
42175 struct hv_driver {
42176 diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
42177 index 5a37cce..6ecc88c 100644
42178 --- a/drivers/staging/hv/VmbusPrivate.h
42179 +++ b/drivers/staging/hv/VmbusPrivate.h
42180 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
42181 struct VMBUS_CONNECTION {
42182 enum VMBUS_CONNECT_STATE ConnectState;
42183
42184 - atomic_t NextGpadlHandle;
42185 + atomic_unchecked_t NextGpadlHandle;
42186
42187 /*
42188 * Represents channel interrupts. Each bit position represents a
42189 diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
42190 index 871a202..ca50ddf 100644
42191 --- a/drivers/staging/hv/blkvsc_drv.c
42192 +++ b/drivers/staging/hv/blkvsc_drv.c
42193 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
42194 /* The one and only one */
42195 static struct blkvsc_driver_context g_blkvsc_drv;
42196
42197 -static struct block_device_operations block_ops = {
42198 +static const struct block_device_operations block_ops = {
42199 .owner = THIS_MODULE,
42200 .open = blkvsc_open,
42201 .release = blkvsc_release,
42202 diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
42203 index 6acc49a..fbc8d46 100644
42204 --- a/drivers/staging/hv/vmbus_drv.c
42205 +++ b/drivers/staging/hv/vmbus_drv.c
42206 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
42207 to_device_context(root_device_obj);
42208 struct device_context *child_device_ctx =
42209 to_device_context(child_device_obj);
42210 - static atomic_t device_num = ATOMIC_INIT(0);
42211 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42212
42213 DPRINT_ENTER(VMBUS_DRV);
42214
42215 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
42216
42217 /* Set the device name. Otherwise, device_register() will fail. */
42218 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
42219 - atomic_inc_return(&device_num));
42220 + atomic_inc_return_unchecked(&device_num));
42221
42222 /* The new device belongs to this bus */
42223 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
42224 diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
42225 index d926189..17b19fd 100644
42226 --- a/drivers/staging/iio/ring_generic.h
42227 +++ b/drivers/staging/iio/ring_generic.h
42228 @@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
42229
42230 int (*is_enabled)(struct iio_ring_buffer *ring);
42231 int (*enable)(struct iio_ring_buffer *ring);
42232 -};
42233 +} __no_const;
42234
42235 /**
42236 * struct iio_ring_buffer - general ring buffer structure
42237 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
42238 index 1b237b7..88c624e 100644
42239 --- a/drivers/staging/octeon/ethernet-rx.c
42240 +++ b/drivers/staging/octeon/ethernet-rx.c
42241 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42242 /* Increment RX stats for virtual ports */
42243 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
42244 #ifdef CONFIG_64BIT
42245 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
42246 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
42247 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
42248 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
42249 #else
42250 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
42251 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
42252 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
42253 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
42254 #endif
42255 }
42256 netif_receive_skb(skb);
42257 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42258 dev->name);
42259 */
42260 #ifdef CONFIG_64BIT
42261 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
42262 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
42263 #else
42264 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
42265 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
42266 #endif
42267 dev_kfree_skb_irq(skb);
42268 }
42269 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
42270 index 492c502..d9909f1 100644
42271 --- a/drivers/staging/octeon/ethernet.c
42272 +++ b/drivers/staging/octeon/ethernet.c
42273 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
42274 * since the RX tasklet also increments it.
42275 */
42276 #ifdef CONFIG_64BIT
42277 - atomic64_add(rx_status.dropped_packets,
42278 - (atomic64_t *)&priv->stats.rx_dropped);
42279 + atomic64_add_unchecked(rx_status.dropped_packets,
42280 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42281 #else
42282 - atomic_add(rx_status.dropped_packets,
42283 - (atomic_t *)&priv->stats.rx_dropped);
42284 + atomic_add_unchecked(rx_status.dropped_packets,
42285 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
42286 #endif
42287 }
42288
42289 diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
42290 index a35bd5d..28fff45 100644
42291 --- a/drivers/staging/otus/80211core/pub_zfi.h
42292 +++ b/drivers/staging/otus/80211core/pub_zfi.h
42293 @@ -531,7 +531,7 @@ struct zsCbFuncTbl
42294 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
42295
42296 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
42297 -};
42298 +} __no_const;
42299
42300 extern void zfZeroMemory(u8_t* va, u16_t length);
42301 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
42302 diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
42303 index c39a25f..696f5aa 100644
42304 --- a/drivers/staging/panel/panel.c
42305 +++ b/drivers/staging/panel/panel.c
42306 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
42307 return 0;
42308 }
42309
42310 -static struct file_operations lcd_fops = {
42311 +static const struct file_operations lcd_fops = {
42312 .write = lcd_write,
42313 .open = lcd_open,
42314 .release = lcd_release,
42315 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
42316 return 0;
42317 }
42318
42319 -static struct file_operations keypad_fops = {
42320 +static const struct file_operations keypad_fops = {
42321 .read = keypad_read, /* read */
42322 .open = keypad_open, /* open */
42323 .release = keypad_release, /* close */
42324 diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
42325 index 270ebcb..37e46af 100644
42326 --- a/drivers/staging/phison/phison.c
42327 +++ b/drivers/staging/phison/phison.c
42328 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
42329 ATA_BMDMA_SHT(DRV_NAME),
42330 };
42331
42332 -static struct ata_port_operations phison_ops = {
42333 +static const struct ata_port_operations phison_ops = {
42334 .inherits = &ata_bmdma_port_ops,
42335 .prereset = phison_pre_reset,
42336 };
42337 diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
42338 index 2eb8e3d..57616a7 100644
42339 --- a/drivers/staging/poch/poch.c
42340 +++ b/drivers/staging/poch/poch.c
42341 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
42342 return 0;
42343 }
42344
42345 -static struct file_operations poch_fops = {
42346 +static const struct file_operations poch_fops = {
42347 .owner = THIS_MODULE,
42348 .open = poch_open,
42349 .release = poch_release,
42350 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
42351 index c94de31..19402bc 100644
42352 --- a/drivers/staging/pohmelfs/inode.c
42353 +++ b/drivers/staging/pohmelfs/inode.c
42354 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42355 mutex_init(&psb->mcache_lock);
42356 psb->mcache_root = RB_ROOT;
42357 psb->mcache_timeout = msecs_to_jiffies(5000);
42358 - atomic_long_set(&psb->mcache_gen, 0);
42359 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
42360
42361 psb->trans_max_pages = 100;
42362
42363 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42364 INIT_LIST_HEAD(&psb->crypto_ready_list);
42365 INIT_LIST_HEAD(&psb->crypto_active_list);
42366
42367 - atomic_set(&psb->trans_gen, 1);
42368 + atomic_set_unchecked(&psb->trans_gen, 1);
42369 atomic_long_set(&psb->total_inodes, 0);
42370
42371 mutex_init(&psb->state_lock);
42372 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
42373 index e22665c..a2a9390 100644
42374 --- a/drivers/staging/pohmelfs/mcache.c
42375 +++ b/drivers/staging/pohmelfs/mcache.c
42376 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
42377 m->data = data;
42378 m->start = start;
42379 m->size = size;
42380 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
42381 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
42382
42383 mutex_lock(&psb->mcache_lock);
42384 err = pohmelfs_mcache_insert(psb, m);
42385 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
42386 index 623a07d..4035c19 100644
42387 --- a/drivers/staging/pohmelfs/netfs.h
42388 +++ b/drivers/staging/pohmelfs/netfs.h
42389 @@ -570,14 +570,14 @@ struct pohmelfs_config;
42390 struct pohmelfs_sb {
42391 struct rb_root mcache_root;
42392 struct mutex mcache_lock;
42393 - atomic_long_t mcache_gen;
42394 + atomic_long_unchecked_t mcache_gen;
42395 unsigned long mcache_timeout;
42396
42397 unsigned int idx;
42398
42399 unsigned int trans_retries;
42400
42401 - atomic_t trans_gen;
42402 + atomic_unchecked_t trans_gen;
42403
42404 unsigned int crypto_attached_size;
42405 unsigned int crypto_align_size;
42406 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
42407 index 36a2535..0591bf4 100644
42408 --- a/drivers/staging/pohmelfs/trans.c
42409 +++ b/drivers/staging/pohmelfs/trans.c
42410 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
42411 int err;
42412 struct netfs_cmd *cmd = t->iovec.iov_base;
42413
42414 - t->gen = atomic_inc_return(&psb->trans_gen);
42415 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
42416
42417 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
42418 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
42419 diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
42420 index f890a16..509ece8 100644
42421 --- a/drivers/staging/sep/sep_driver.c
42422 +++ b/drivers/staging/sep/sep_driver.c
42423 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
42424 static dev_t sep_devno;
42425
42426 /* the files operations structure of the driver */
42427 -static struct file_operations sep_file_operations = {
42428 +static const struct file_operations sep_file_operations = {
42429 .owner = THIS_MODULE,
42430 .ioctl = sep_ioctl,
42431 .poll = sep_poll,
42432 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
42433 index 5e16bc3..7655b10 100644
42434 --- a/drivers/staging/usbip/usbip_common.h
42435 +++ b/drivers/staging/usbip/usbip_common.h
42436 @@ -374,7 +374,7 @@ struct usbip_device {
42437 void (*shutdown)(struct usbip_device *);
42438 void (*reset)(struct usbip_device *);
42439 void (*unusable)(struct usbip_device *);
42440 - } eh_ops;
42441 + } __no_const eh_ops;
42442 };
42443
42444
42445 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42446 index 57f7946..d9df23d 100644
42447 --- a/drivers/staging/usbip/vhci.h
42448 +++ b/drivers/staging/usbip/vhci.h
42449 @@ -92,7 +92,7 @@ struct vhci_hcd {
42450 unsigned resuming:1;
42451 unsigned long re_timeout;
42452
42453 - atomic_t seqnum;
42454 + atomic_unchecked_t seqnum;
42455
42456 /*
42457 * NOTE:
42458 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42459 index 20cd7db..c2693ff 100644
42460 --- a/drivers/staging/usbip/vhci_hcd.c
42461 +++ b/drivers/staging/usbip/vhci_hcd.c
42462 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
42463 return;
42464 }
42465
42466 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
42467 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42468 if (priv->seqnum == 0xffff)
42469 usbip_uinfo("seqnum max\n");
42470
42471 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
42472 return -ENOMEM;
42473 }
42474
42475 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
42476 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42477 if (unlink->seqnum == 0xffff)
42478 usbip_uinfo("seqnum max\n");
42479
42480 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
42481 vdev->rhport = rhport;
42482 }
42483
42484 - atomic_set(&vhci->seqnum, 0);
42485 + atomic_set_unchecked(&vhci->seqnum, 0);
42486 spin_lock_init(&vhci->lock);
42487
42488
42489 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
42490 index 7fd76fe..673695a 100644
42491 --- a/drivers/staging/usbip/vhci_rx.c
42492 +++ b/drivers/staging/usbip/vhci_rx.c
42493 @@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42494 usbip_uerr("cannot find a urb of seqnum %u\n",
42495 pdu->base.seqnum);
42496 usbip_uinfo("max seqnum %d\n",
42497 - atomic_read(&the_controller->seqnum));
42498 + atomic_read_unchecked(&the_controller->seqnum));
42499 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42500 return;
42501 }
42502 diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
42503 index 7891288..8e31300 100644
42504 --- a/drivers/staging/vme/devices/vme_user.c
42505 +++ b/drivers/staging/vme/devices/vme_user.c
42506 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
42507 static int __init vme_user_probe(struct device *, int, int);
42508 static int __exit vme_user_remove(struct device *, int, int);
42509
42510 -static struct file_operations vme_user_fops = {
42511 +static const struct file_operations vme_user_fops = {
42512 .open = vme_user_open,
42513 .release = vme_user_release,
42514 .read = vme_user_read,
42515 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42516 index 58abf44..00c1fc8 100644
42517 --- a/drivers/staging/vt6655/hostap.c
42518 +++ b/drivers/staging/vt6655/hostap.c
42519 @@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42520 PSDevice apdev_priv;
42521 struct net_device *dev = pDevice->dev;
42522 int ret;
42523 - const struct net_device_ops apdev_netdev_ops = {
42524 + net_device_ops_no_const apdev_netdev_ops = {
42525 .ndo_start_xmit = pDevice->tx_80211,
42526 };
42527
42528 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42529 index 0c8267a..db1f363 100644
42530 --- a/drivers/staging/vt6656/hostap.c
42531 +++ b/drivers/staging/vt6656/hostap.c
42532 @@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42533 PSDevice apdev_priv;
42534 struct net_device *dev = pDevice->dev;
42535 int ret;
42536 - const struct net_device_ops apdev_netdev_ops = {
42537 + net_device_ops_no_const apdev_netdev_ops = {
42538 .ndo_start_xmit = pDevice->tx_80211,
42539 };
42540
42541 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
42542 index 925678b..da7f5ed 100644
42543 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
42544 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
42545 @@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
42546
42547 struct usbctlx_completor {
42548 int (*complete) (struct usbctlx_completor *);
42549 -};
42550 +} __no_const;
42551 typedef struct usbctlx_completor usbctlx_completor_t;
42552
42553 static int
42554 diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
42555 index 40de151..924f268 100644
42556 --- a/drivers/telephony/ixj.c
42557 +++ b/drivers/telephony/ixj.c
42558 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
42559 bool mContinue;
42560 char *pIn, *pOut;
42561
42562 + pax_track_stack();
42563 +
42564 if (!SCI_Prepare(j))
42565 return 0;
42566
42567 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
42568 index e941367..b631f5a 100644
42569 --- a/drivers/uio/uio.c
42570 +++ b/drivers/uio/uio.c
42571 @@ -23,6 +23,7 @@
42572 #include <linux/string.h>
42573 #include <linux/kobject.h>
42574 #include <linux/uio_driver.h>
42575 +#include <asm/local.h>
42576
42577 #define UIO_MAX_DEVICES 255
42578
42579 @@ -30,10 +31,10 @@ struct uio_device {
42580 struct module *owner;
42581 struct device *dev;
42582 int minor;
42583 - atomic_t event;
42584 + atomic_unchecked_t event;
42585 struct fasync_struct *async_queue;
42586 wait_queue_head_t wait;
42587 - int vma_count;
42588 + local_t vma_count;
42589 struct uio_info *info;
42590 struct kobject *map_dir;
42591 struct kobject *portio_dir;
42592 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
42593 return entry->show(mem, buf);
42594 }
42595
42596 -static struct sysfs_ops map_sysfs_ops = {
42597 +static const struct sysfs_ops map_sysfs_ops = {
42598 .show = map_type_show,
42599 };
42600
42601 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
42602 return entry->show(port, buf);
42603 }
42604
42605 -static struct sysfs_ops portio_sysfs_ops = {
42606 +static const struct sysfs_ops portio_sysfs_ops = {
42607 .show = portio_type_show,
42608 };
42609
42610 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
42611 struct uio_device *idev = dev_get_drvdata(dev);
42612 if (idev)
42613 return sprintf(buf, "%u\n",
42614 - (unsigned int)atomic_read(&idev->event));
42615 + (unsigned int)atomic_read_unchecked(&idev->event));
42616 else
42617 return -ENODEV;
42618 }
42619 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
42620 {
42621 struct uio_device *idev = info->uio_dev;
42622
42623 - atomic_inc(&idev->event);
42624 + atomic_inc_unchecked(&idev->event);
42625 wake_up_interruptible(&idev->wait);
42626 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
42627 }
42628 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
42629 }
42630
42631 listener->dev = idev;
42632 - listener->event_count = atomic_read(&idev->event);
42633 + listener->event_count = atomic_read_unchecked(&idev->event);
42634 filep->private_data = listener;
42635
42636 if (idev->info->open) {
42637 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
42638 return -EIO;
42639
42640 poll_wait(filep, &idev->wait, wait);
42641 - if (listener->event_count != atomic_read(&idev->event))
42642 + if (listener->event_count != atomic_read_unchecked(&idev->event))
42643 return POLLIN | POLLRDNORM;
42644 return 0;
42645 }
42646 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
42647 do {
42648 set_current_state(TASK_INTERRUPTIBLE);
42649
42650 - event_count = atomic_read(&idev->event);
42651 + event_count = atomic_read_unchecked(&idev->event);
42652 if (event_count != listener->event_count) {
42653 if (copy_to_user(buf, &event_count, count))
42654 retval = -EFAULT;
42655 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
42656 static void uio_vma_open(struct vm_area_struct *vma)
42657 {
42658 struct uio_device *idev = vma->vm_private_data;
42659 - idev->vma_count++;
42660 + local_inc(&idev->vma_count);
42661 }
42662
42663 static void uio_vma_close(struct vm_area_struct *vma)
42664 {
42665 struct uio_device *idev = vma->vm_private_data;
42666 - idev->vma_count--;
42667 + local_dec(&idev->vma_count);
42668 }
42669
42670 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42671 @@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
42672 idev->owner = owner;
42673 idev->info = info;
42674 init_waitqueue_head(&idev->wait);
42675 - atomic_set(&idev->event, 0);
42676 + atomic_set_unchecked(&idev->event, 0);
42677
42678 ret = uio_get_minor(idev);
42679 if (ret)
42680 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
42681 index fbea856..06efea6 100644
42682 --- a/drivers/usb/atm/usbatm.c
42683 +++ b/drivers/usb/atm/usbatm.c
42684 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42685 if (printk_ratelimit())
42686 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
42687 __func__, vpi, vci);
42688 - atomic_inc(&vcc->stats->rx_err);
42689 + atomic_inc_unchecked(&vcc->stats->rx_err);
42690 return;
42691 }
42692
42693 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42694 if (length > ATM_MAX_AAL5_PDU) {
42695 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
42696 __func__, length, vcc);
42697 - atomic_inc(&vcc->stats->rx_err);
42698 + atomic_inc_unchecked(&vcc->stats->rx_err);
42699 goto out;
42700 }
42701
42702 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42703 if (sarb->len < pdu_length) {
42704 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
42705 __func__, pdu_length, sarb->len, vcc);
42706 - atomic_inc(&vcc->stats->rx_err);
42707 + atomic_inc_unchecked(&vcc->stats->rx_err);
42708 goto out;
42709 }
42710
42711 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
42712 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
42713 __func__, vcc);
42714 - atomic_inc(&vcc->stats->rx_err);
42715 + atomic_inc_unchecked(&vcc->stats->rx_err);
42716 goto out;
42717 }
42718
42719 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42720 if (printk_ratelimit())
42721 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
42722 __func__, length);
42723 - atomic_inc(&vcc->stats->rx_drop);
42724 + atomic_inc_unchecked(&vcc->stats->rx_drop);
42725 goto out;
42726 }
42727
42728 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42729
42730 vcc->push(vcc, skb);
42731
42732 - atomic_inc(&vcc->stats->rx);
42733 + atomic_inc_unchecked(&vcc->stats->rx);
42734 out:
42735 skb_trim(sarb, 0);
42736 }
42737 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
42738 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
42739
42740 usbatm_pop(vcc, skb);
42741 - atomic_inc(&vcc->stats->tx);
42742 + atomic_inc_unchecked(&vcc->stats->tx);
42743
42744 skb = skb_dequeue(&instance->sndqueue);
42745 }
42746 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
42747 if (!left--)
42748 return sprintf(page,
42749 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
42750 - atomic_read(&atm_dev->stats.aal5.tx),
42751 - atomic_read(&atm_dev->stats.aal5.tx_err),
42752 - atomic_read(&atm_dev->stats.aal5.rx),
42753 - atomic_read(&atm_dev->stats.aal5.rx_err),
42754 - atomic_read(&atm_dev->stats.aal5.rx_drop));
42755 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
42756 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
42757 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
42758 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
42759 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
42760
42761 if (!left--) {
42762 if (instance->disconnected)
42763 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
42764 index 24e6205..fe5a5d4 100644
42765 --- a/drivers/usb/core/hcd.c
42766 +++ b/drivers/usb/core/hcd.c
42767 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
42768
42769 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42770
42771 -struct usb_mon_operations *mon_ops;
42772 +const struct usb_mon_operations *mon_ops;
42773
42774 /*
42775 * The registration is unlocked.
42776 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
42777 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
42778 */
42779
42780 -int usb_mon_register (struct usb_mon_operations *ops)
42781 +int usb_mon_register (const struct usb_mon_operations *ops)
42782 {
42783
42784 if (mon_ops)
42785 diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
42786 index bcbe104..9cfd1c6 100644
42787 --- a/drivers/usb/core/hcd.h
42788 +++ b/drivers/usb/core/hcd.h
42789 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
42790 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42791
42792 struct usb_mon_operations {
42793 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
42794 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42795 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42796 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
42797 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42798 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42799 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
42800 };
42801
42802 -extern struct usb_mon_operations *mon_ops;
42803 +extern const struct usb_mon_operations *mon_ops;
42804
42805 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
42806 {
42807 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
42808 (*mon_ops->urb_complete)(bus, urb, status);
42809 }
42810
42811 -int usb_mon_register(struct usb_mon_operations *ops);
42812 +int usb_mon_register(const struct usb_mon_operations *ops);
42813 void usb_mon_deregister(void);
42814
42815 #else
42816 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
42817 index 409cc94..a673bad 100644
42818 --- a/drivers/usb/core/message.c
42819 +++ b/drivers/usb/core/message.c
42820 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
42821 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
42822 if (buf) {
42823 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
42824 - if (len > 0) {
42825 - smallbuf = kmalloc(++len, GFP_NOIO);
42826 + if (len++ > 0) {
42827 + smallbuf = kmalloc(len, GFP_NOIO);
42828 if (!smallbuf)
42829 return buf;
42830 memcpy(smallbuf, buf, len);
42831 diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
42832 index 62ff5e7..530b74e 100644
42833 --- a/drivers/usb/misc/appledisplay.c
42834 +++ b/drivers/usb/misc/appledisplay.c
42835 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
42836 return pdata->msgdata[1];
42837 }
42838
42839 -static struct backlight_ops appledisplay_bl_data = {
42840 +static const struct backlight_ops appledisplay_bl_data = {
42841 .get_brightness = appledisplay_bl_get_brightness,
42842 .update_status = appledisplay_bl_update_status,
42843 };
42844 diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
42845 index e0c2db3..bd8cb66 100644
42846 --- a/drivers/usb/mon/mon_main.c
42847 +++ b/drivers/usb/mon/mon_main.c
42848 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
42849 /*
42850 * Ops
42851 */
42852 -static struct usb_mon_operations mon_ops_0 = {
42853 +static const struct usb_mon_operations mon_ops_0 = {
42854 .urb_submit = mon_submit,
42855 .urb_submit_error = mon_submit_error,
42856 .urb_complete = mon_complete,
42857 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
42858 index d6bea3e..60b250e 100644
42859 --- a/drivers/usb/wusbcore/wa-hc.h
42860 +++ b/drivers/usb/wusbcore/wa-hc.h
42861 @@ -192,7 +192,7 @@ struct wahc {
42862 struct list_head xfer_delayed_list;
42863 spinlock_t xfer_list_lock;
42864 struct work_struct xfer_work;
42865 - atomic_t xfer_id_count;
42866 + atomic_unchecked_t xfer_id_count;
42867 };
42868
42869
42870 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
42871 INIT_LIST_HEAD(&wa->xfer_delayed_list);
42872 spin_lock_init(&wa->xfer_list_lock);
42873 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
42874 - atomic_set(&wa->xfer_id_count, 1);
42875 + atomic_set_unchecked(&wa->xfer_id_count, 1);
42876 }
42877
42878 /**
42879 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
42880 index 613a5fc..3174865 100644
42881 --- a/drivers/usb/wusbcore/wa-xfer.c
42882 +++ b/drivers/usb/wusbcore/wa-xfer.c
42883 @@ -293,7 +293,7 @@ out:
42884 */
42885 static void wa_xfer_id_init(struct wa_xfer *xfer)
42886 {
42887 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
42888 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
42889 }
42890
42891 /*
42892 diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
42893 index aa42fce..f8a828c 100644
42894 --- a/drivers/uwb/wlp/messages.c
42895 +++ b/drivers/uwb/wlp/messages.c
42896 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
42897 size_t len = skb->len;
42898 size_t used;
42899 ssize_t result;
42900 - struct wlp_nonce enonce, rnonce;
42901 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
42902 enum wlp_assc_error assc_err;
42903 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
42904 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
42905 diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
42906 index 0370399..6627c94 100644
42907 --- a/drivers/uwb/wlp/sysfs.c
42908 +++ b/drivers/uwb/wlp/sysfs.c
42909 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
42910 return ret;
42911 }
42912
42913 -static
42914 -struct sysfs_ops wss_sysfs_ops = {
42915 +static const struct sysfs_ops wss_sysfs_ops = {
42916 .show = wlp_wss_attr_show,
42917 .store = wlp_wss_attr_store,
42918 };
42919 diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
42920 index 8c5e432..5ee90ea 100644
42921 --- a/drivers/video/atmel_lcdfb.c
42922 +++ b/drivers/video/atmel_lcdfb.c
42923 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
42924 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
42925 }
42926
42927 -static struct backlight_ops atmel_lcdc_bl_ops = {
42928 +static const struct backlight_ops atmel_lcdc_bl_ops = {
42929 .update_status = atmel_bl_update_status,
42930 .get_brightness = atmel_bl_get_brightness,
42931 };
42932 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
42933 index e4e4d43..66bcbcc 100644
42934 --- a/drivers/video/aty/aty128fb.c
42935 +++ b/drivers/video/aty/aty128fb.c
42936 @@ -149,7 +149,7 @@ enum {
42937 };
42938
42939 /* Must match above enum */
42940 -static const char *r128_family[] __devinitdata = {
42941 +static const char *r128_family[] __devinitconst = {
42942 "AGP",
42943 "PCI",
42944 "PRO AGP",
42945 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
42946 return bd->props.brightness;
42947 }
42948
42949 -static struct backlight_ops aty128_bl_data = {
42950 +static const struct backlight_ops aty128_bl_data = {
42951 .get_brightness = aty128_bl_get_brightness,
42952 .update_status = aty128_bl_update_status,
42953 };
42954 diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
42955 index 913b4a4..9295a38 100644
42956 --- a/drivers/video/aty/atyfb_base.c
42957 +++ b/drivers/video/aty/atyfb_base.c
42958 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
42959 return bd->props.brightness;
42960 }
42961
42962 -static struct backlight_ops aty_bl_data = {
42963 +static const struct backlight_ops aty_bl_data = {
42964 .get_brightness = aty_bl_get_brightness,
42965 .update_status = aty_bl_update_status,
42966 };
42967 diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
42968 index 1a056ad..221bd6a 100644
42969 --- a/drivers/video/aty/radeon_backlight.c
42970 +++ b/drivers/video/aty/radeon_backlight.c
42971 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
42972 return bd->props.brightness;
42973 }
42974
42975 -static struct backlight_ops radeon_bl_data = {
42976 +static const struct backlight_ops radeon_bl_data = {
42977 .get_brightness = radeon_bl_get_brightness,
42978 .update_status = radeon_bl_update_status,
42979 };
42980 diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
42981 index ad05da5..3cb2cb9 100644
42982 --- a/drivers/video/backlight/adp5520_bl.c
42983 +++ b/drivers/video/backlight/adp5520_bl.c
42984 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
42985 return error ? data->current_brightness : reg_val;
42986 }
42987
42988 -static struct backlight_ops adp5520_bl_ops = {
42989 +static const struct backlight_ops adp5520_bl_ops = {
42990 .update_status = adp5520_bl_update_status,
42991 .get_brightness = adp5520_bl_get_brightness,
42992 };
42993 diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
42994 index 2c3bdfc..d769b0b 100644
42995 --- a/drivers/video/backlight/adx_bl.c
42996 +++ b/drivers/video/backlight/adx_bl.c
42997 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
42998 return 1;
42999 }
43000
43001 -static struct backlight_ops adx_backlight_ops = {
43002 +static const struct backlight_ops adx_backlight_ops = {
43003 .options = 0,
43004 .update_status = adx_backlight_update_status,
43005 .get_brightness = adx_backlight_get_brightness,
43006 diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
43007 index 505c082..6b6b3cc 100644
43008 --- a/drivers/video/backlight/atmel-pwm-bl.c
43009 +++ b/drivers/video/backlight/atmel-pwm-bl.c
43010 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
43011 return pwm_channel_enable(&pwmbl->pwmc);
43012 }
43013
43014 -static struct backlight_ops atmel_pwm_bl_ops = {
43015 +static const struct backlight_ops atmel_pwm_bl_ops = {
43016 .get_brightness = atmel_pwm_bl_get_intensity,
43017 .update_status = atmel_pwm_bl_set_intensity,
43018 };
43019 diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
43020 index 5e20e6e..89025e6 100644
43021 --- a/drivers/video/backlight/backlight.c
43022 +++ b/drivers/video/backlight/backlight.c
43023 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
43024 * ERR_PTR() or a pointer to the newly allocated device.
43025 */
43026 struct backlight_device *backlight_device_register(const char *name,
43027 - struct device *parent, void *devdata, struct backlight_ops *ops)
43028 + struct device *parent, void *devdata, const struct backlight_ops *ops)
43029 {
43030 struct backlight_device *new_bd;
43031 int rc;
43032 diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
43033 index 9677494..b4bcf80 100644
43034 --- a/drivers/video/backlight/corgi_lcd.c
43035 +++ b/drivers/video/backlight/corgi_lcd.c
43036 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
43037 }
43038 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
43039
43040 -static struct backlight_ops corgi_bl_ops = {
43041 +static const struct backlight_ops corgi_bl_ops = {
43042 .get_brightness = corgi_bl_get_intensity,
43043 .update_status = corgi_bl_update_status,
43044 };
43045 diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
43046 index b9fe62b..2914bf1 100644
43047 --- a/drivers/video/backlight/cr_bllcd.c
43048 +++ b/drivers/video/backlight/cr_bllcd.c
43049 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
43050 return intensity;
43051 }
43052
43053 -static struct backlight_ops cr_backlight_ops = {
43054 +static const struct backlight_ops cr_backlight_ops = {
43055 .get_brightness = cr_backlight_get_intensity,
43056 .update_status = cr_backlight_set_intensity,
43057 };
43058 diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
43059 index 701a108..feacfd5 100644
43060 --- a/drivers/video/backlight/da903x_bl.c
43061 +++ b/drivers/video/backlight/da903x_bl.c
43062 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
43063 return data->current_brightness;
43064 }
43065
43066 -static struct backlight_ops da903x_backlight_ops = {
43067 +static const struct backlight_ops da903x_backlight_ops = {
43068 .update_status = da903x_backlight_update_status,
43069 .get_brightness = da903x_backlight_get_brightness,
43070 };
43071 diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
43072 index 6d27f62..e6d348e 100644
43073 --- a/drivers/video/backlight/generic_bl.c
43074 +++ b/drivers/video/backlight/generic_bl.c
43075 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
43076 }
43077 EXPORT_SYMBOL(corgibl_limit_intensity);
43078
43079 -static struct backlight_ops genericbl_ops = {
43080 +static const struct backlight_ops genericbl_ops = {
43081 .options = BL_CORE_SUSPENDRESUME,
43082 .get_brightness = genericbl_get_intensity,
43083 .update_status = genericbl_send_intensity,
43084 diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
43085 index 7fb4eef..f7cc528 100644
43086 --- a/drivers/video/backlight/hp680_bl.c
43087 +++ b/drivers/video/backlight/hp680_bl.c
43088 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
43089 return current_intensity;
43090 }
43091
43092 -static struct backlight_ops hp680bl_ops = {
43093 +static const struct backlight_ops hp680bl_ops = {
43094 .get_brightness = hp680bl_get_intensity,
43095 .update_status = hp680bl_set_intensity,
43096 };
43097 diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
43098 index 7aed256..db9071f 100644
43099 --- a/drivers/video/backlight/jornada720_bl.c
43100 +++ b/drivers/video/backlight/jornada720_bl.c
43101 @@ -93,7 +93,7 @@ out:
43102 return ret;
43103 }
43104
43105 -static struct backlight_ops jornada_bl_ops = {
43106 +static const struct backlight_ops jornada_bl_ops = {
43107 .get_brightness = jornada_bl_get_brightness,
43108 .update_status = jornada_bl_update_status,
43109 .options = BL_CORE_SUSPENDRESUME,
43110 diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
43111 index a38fda1..939e7b8 100644
43112 --- a/drivers/video/backlight/kb3886_bl.c
43113 +++ b/drivers/video/backlight/kb3886_bl.c
43114 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
43115 return kb3886bl_intensity;
43116 }
43117
43118 -static struct backlight_ops kb3886bl_ops = {
43119 +static const struct backlight_ops kb3886bl_ops = {
43120 .get_brightness = kb3886bl_get_intensity,
43121 .update_status = kb3886bl_send_intensity,
43122 };
43123 diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
43124 index 6b488b8..00a9591 100644
43125 --- a/drivers/video/backlight/locomolcd.c
43126 +++ b/drivers/video/backlight/locomolcd.c
43127 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
43128 return current_intensity;
43129 }
43130
43131 -static struct backlight_ops locomobl_data = {
43132 +static const struct backlight_ops locomobl_data = {
43133 .get_brightness = locomolcd_get_intensity,
43134 .update_status = locomolcd_set_intensity,
43135 };
43136 diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
43137 index 99bdfa8..3dac448 100644
43138 --- a/drivers/video/backlight/mbp_nvidia_bl.c
43139 +++ b/drivers/video/backlight/mbp_nvidia_bl.c
43140 @@ -33,7 +33,7 @@ struct dmi_match_data {
43141 unsigned long iostart;
43142 unsigned long iolen;
43143 /* Backlight operations structure. */
43144 - struct backlight_ops backlight_ops;
43145 + const struct backlight_ops backlight_ops;
43146 };
43147
43148 /* Module parameters. */
43149 diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
43150 index cbad67e..3cf900e 100644
43151 --- a/drivers/video/backlight/omap1_bl.c
43152 +++ b/drivers/video/backlight/omap1_bl.c
43153 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
43154 return bl->current_intensity;
43155 }
43156
43157 -static struct backlight_ops omapbl_ops = {
43158 +static const struct backlight_ops omapbl_ops = {
43159 .get_brightness = omapbl_get_intensity,
43160 .update_status = omapbl_update_status,
43161 };
43162 diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
43163 index 9edaf24..075786e 100644
43164 --- a/drivers/video/backlight/progear_bl.c
43165 +++ b/drivers/video/backlight/progear_bl.c
43166 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
43167 return intensity - HW_LEVEL_MIN;
43168 }
43169
43170 -static struct backlight_ops progearbl_ops = {
43171 +static const struct backlight_ops progearbl_ops = {
43172 .get_brightness = progearbl_get_intensity,
43173 .update_status = progearbl_set_intensity,
43174 };
43175 diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
43176 index 8871662..df9e0b3 100644
43177 --- a/drivers/video/backlight/pwm_bl.c
43178 +++ b/drivers/video/backlight/pwm_bl.c
43179 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
43180 return bl->props.brightness;
43181 }
43182
43183 -static struct backlight_ops pwm_backlight_ops = {
43184 +static const struct backlight_ops pwm_backlight_ops = {
43185 .update_status = pwm_backlight_update_status,
43186 .get_brightness = pwm_backlight_get_brightness,
43187 };
43188 diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
43189 index 43edbad..e14ce4d 100644
43190 --- a/drivers/video/backlight/tosa_bl.c
43191 +++ b/drivers/video/backlight/tosa_bl.c
43192 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
43193 return props->brightness;
43194 }
43195
43196 -static struct backlight_ops bl_ops = {
43197 +static const struct backlight_ops bl_ops = {
43198 .get_brightness = tosa_bl_get_brightness,
43199 .update_status = tosa_bl_update_status,
43200 };
43201 diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
43202 index 467bdb7..e32add3 100644
43203 --- a/drivers/video/backlight/wm831x_bl.c
43204 +++ b/drivers/video/backlight/wm831x_bl.c
43205 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
43206 return data->current_brightness;
43207 }
43208
43209 -static struct backlight_ops wm831x_backlight_ops = {
43210 +static const struct backlight_ops wm831x_backlight_ops = {
43211 .options = BL_CORE_SUSPENDRESUME,
43212 .update_status = wm831x_backlight_update_status,
43213 .get_brightness = wm831x_backlight_get_brightness,
43214 diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
43215 index e49ae5e..db4e6f7 100644
43216 --- a/drivers/video/bf54x-lq043fb.c
43217 +++ b/drivers/video/bf54x-lq043fb.c
43218 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
43219 return 0;
43220 }
43221
43222 -static struct backlight_ops bfin_lq043fb_bl_ops = {
43223 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
43224 .get_brightness = bl_get_brightness,
43225 };
43226
43227 diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
43228 index 2c72a7c..d523e52 100644
43229 --- a/drivers/video/bfin-t350mcqb-fb.c
43230 +++ b/drivers/video/bfin-t350mcqb-fb.c
43231 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
43232 return 0;
43233 }
43234
43235 -static struct backlight_ops bfin_lq043fb_bl_ops = {
43236 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
43237 .get_brightness = bl_get_brightness,
43238 };
43239
43240 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
43241 index f53b9f1..958bf4e 100644
43242 --- a/drivers/video/fbcmap.c
43243 +++ b/drivers/video/fbcmap.c
43244 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
43245 rc = -ENODEV;
43246 goto out;
43247 }
43248 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
43249 - !info->fbops->fb_setcmap)) {
43250 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
43251 rc = -EINVAL;
43252 goto out1;
43253 }
43254 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
43255 index 99bbd28..ad3829e 100644
43256 --- a/drivers/video/fbmem.c
43257 +++ b/drivers/video/fbmem.c
43258 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43259 image->dx += image->width + 8;
43260 }
43261 } else if (rotate == FB_ROTATE_UD) {
43262 - for (x = 0; x < num && image->dx >= 0; x++) {
43263 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
43264 info->fbops->fb_imageblit(info, image);
43265 image->dx -= image->width + 8;
43266 }
43267 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43268 image->dy += image->height + 8;
43269 }
43270 } else if (rotate == FB_ROTATE_CCW) {
43271 - for (x = 0; x < num && image->dy >= 0; x++) {
43272 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
43273 info->fbops->fb_imageblit(info, image);
43274 image->dy -= image->height + 8;
43275 }
43276 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
43277 int flags = info->flags;
43278 int ret = 0;
43279
43280 + pax_track_stack();
43281 +
43282 if (var->activate & FB_ACTIVATE_INV_MODE) {
43283 struct fb_videomode mode1, mode2;
43284
43285 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43286 void __user *argp = (void __user *)arg;
43287 long ret = 0;
43288
43289 + pax_track_stack();
43290 +
43291 switch (cmd) {
43292 case FBIOGET_VSCREENINFO:
43293 if (!lock_fb_info(info))
43294 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43295 return -EFAULT;
43296 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
43297 return -EINVAL;
43298 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
43299 + if (con2fb.framebuffer >= FB_MAX)
43300 return -EINVAL;
43301 if (!registered_fb[con2fb.framebuffer])
43302 request_module("fb%d", con2fb.framebuffer);
43303 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
43304 index f20eff8..3e4f622 100644
43305 --- a/drivers/video/geode/gx1fb_core.c
43306 +++ b/drivers/video/geode/gx1fb_core.c
43307 @@ -30,7 +30,7 @@ static int crt_option = 1;
43308 static char panel_option[32] = "";
43309
43310 /* Modes relevant to the GX1 (taken from modedb.c) */
43311 -static const struct fb_videomode __initdata gx1_modedb[] = {
43312 +static const struct fb_videomode __initconst gx1_modedb[] = {
43313 /* 640x480-60 VESA */
43314 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
43315 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
43316 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
43317 index 896e53d..4d87d0b 100644
43318 --- a/drivers/video/gxt4500.c
43319 +++ b/drivers/video/gxt4500.c
43320 @@ -156,7 +156,7 @@ struct gxt4500_par {
43321 static char *mode_option;
43322
43323 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
43324 -static const struct fb_videomode defaultmode __devinitdata = {
43325 +static const struct fb_videomode defaultmode __devinitconst = {
43326 .refresh = 60,
43327 .xres = 1280,
43328 .yres = 1024,
43329 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
43330 return 0;
43331 }
43332
43333 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
43334 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
43335 .id = "IBM GXT4500P",
43336 .type = FB_TYPE_PACKED_PIXELS,
43337 .visual = FB_VISUAL_PSEUDOCOLOR,
43338 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
43339 index f5bedee..28c6028 100644
43340 --- a/drivers/video/i810/i810_accel.c
43341 +++ b/drivers/video/i810/i810_accel.c
43342 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
43343 }
43344 }
43345 printk("ringbuffer lockup!!!\n");
43346 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
43347 i810_report_error(mmio);
43348 par->dev_flags |= LOCKUP;
43349 info->pixmap.scan_align = 1;
43350 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
43351 index 5743ea2..457f82c 100644
43352 --- a/drivers/video/i810/i810_main.c
43353 +++ b/drivers/video/i810/i810_main.c
43354 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
43355 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
43356
43357 /* PCI */
43358 -static const char *i810_pci_list[] __devinitdata = {
43359 +static const char *i810_pci_list[] __devinitconst = {
43360 "Intel(R) 810 Framebuffer Device" ,
43361 "Intel(R) 810-DC100 Framebuffer Device" ,
43362 "Intel(R) 810E Framebuffer Device" ,
43363 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
43364 index 3c14e43..eafa544 100644
43365 --- a/drivers/video/logo/logo_linux_clut224.ppm
43366 +++ b/drivers/video/logo/logo_linux_clut224.ppm
43367 @@ -1,1604 +1,1123 @@
43368 P3
43369 -# Standard 224-color Linux logo
43370 80 80
43371 255
43372 - 0 0 0 0 0 0 0 0 0 0 0 0
43373 - 0 0 0 0 0 0 0 0 0 0 0 0
43374 - 0 0 0 0 0 0 0 0 0 0 0 0
43375 - 0 0 0 0 0 0 0 0 0 0 0 0
43376 - 0 0 0 0 0 0 0 0 0 0 0 0
43377 - 0 0 0 0 0 0 0 0 0 0 0 0
43378 - 0 0 0 0 0 0 0 0 0 0 0 0
43379 - 0 0 0 0 0 0 0 0 0 0 0 0
43380 - 0 0 0 0 0 0 0 0 0 0 0 0
43381 - 6 6 6 6 6 6 10 10 10 10 10 10
43382 - 10 10 10 6 6 6 6 6 6 6 6 6
43383 - 0 0 0 0 0 0 0 0 0 0 0 0
43384 - 0 0 0 0 0 0 0 0 0 0 0 0
43385 - 0 0 0 0 0 0 0 0 0 0 0 0
43386 - 0 0 0 0 0 0 0 0 0 0 0 0
43387 - 0 0 0 0 0 0 0 0 0 0 0 0
43388 - 0 0 0 0 0 0 0 0 0 0 0 0
43389 - 0 0 0 0 0 0 0 0 0 0 0 0
43390 - 0 0 0 0 0 0 0 0 0 0 0 0
43391 - 0 0 0 0 0 0 0 0 0 0 0 0
43392 - 0 0 0 0 0 0 0 0 0 0 0 0
43393 - 0 0 0 0 0 0 0 0 0 0 0 0
43394 - 0 0 0 0 0 0 0 0 0 0 0 0
43395 - 0 0 0 0 0 0 0 0 0 0 0 0
43396 - 0 0 0 0 0 0 0 0 0 0 0 0
43397 - 0 0 0 0 0 0 0 0 0 0 0 0
43398 - 0 0 0 0 0 0 0 0 0 0 0 0
43399 - 0 0 0 0 0 0 0 0 0 0 0 0
43400 - 0 0 0 6 6 6 10 10 10 14 14 14
43401 - 22 22 22 26 26 26 30 30 30 34 34 34
43402 - 30 30 30 30 30 30 26 26 26 18 18 18
43403 - 14 14 14 10 10 10 6 6 6 0 0 0
43404 - 0 0 0 0 0 0 0 0 0 0 0 0
43405 - 0 0 0 0 0 0 0 0 0 0 0 0
43406 - 0 0 0 0 0 0 0 0 0 0 0 0
43407 - 0 0 0 0 0 0 0 0 0 0 0 0
43408 - 0 0 0 0 0 0 0 0 0 0 0 0
43409 - 0 0 0 0 0 0 0 0 0 0 0 0
43410 - 0 0 0 0 0 0 0 0 0 0 0 0
43411 - 0 0 0 0 0 0 0 0 0 0 0 0
43412 - 0 0 0 0 0 0 0 0 0 0 0 0
43413 - 0 0 0 0 0 1 0 0 1 0 0 0
43414 - 0 0 0 0 0 0 0 0 0 0 0 0
43415 - 0 0 0 0 0 0 0 0 0 0 0 0
43416 - 0 0 0 0 0 0 0 0 0 0 0 0
43417 - 0 0 0 0 0 0 0 0 0 0 0 0
43418 - 0 0 0 0 0 0 0 0 0 0 0 0
43419 - 0 0 0 0 0 0 0 0 0 0 0 0
43420 - 6 6 6 14 14 14 26 26 26 42 42 42
43421 - 54 54 54 66 66 66 78 78 78 78 78 78
43422 - 78 78 78 74 74 74 66 66 66 54 54 54
43423 - 42 42 42 26 26 26 18 18 18 10 10 10
43424 - 6 6 6 0 0 0 0 0 0 0 0 0
43425 - 0 0 0 0 0 0 0 0 0 0 0 0
43426 - 0 0 0 0 0 0 0 0 0 0 0 0
43427 - 0 0 0 0 0 0 0 0 0 0 0 0
43428 - 0 0 0 0 0 0 0 0 0 0 0 0
43429 - 0 0 0 0 0 0 0 0 0 0 0 0
43430 - 0 0 0 0 0 0 0 0 0 0 0 0
43431 - 0 0 0 0 0 0 0 0 0 0 0 0
43432 - 0 0 0 0 0 0 0 0 0 0 0 0
43433 - 0 0 1 0 0 0 0 0 0 0 0 0
43434 - 0 0 0 0 0 0 0 0 0 0 0 0
43435 - 0 0 0 0 0 0 0 0 0 0 0 0
43436 - 0 0 0 0 0 0 0 0 0 0 0 0
43437 - 0 0 0 0 0 0 0 0 0 0 0 0
43438 - 0 0 0 0 0 0 0 0 0 0 0 0
43439 - 0 0 0 0 0 0 0 0 0 10 10 10
43440 - 22 22 22 42 42 42 66 66 66 86 86 86
43441 - 66 66 66 38 38 38 38 38 38 22 22 22
43442 - 26 26 26 34 34 34 54 54 54 66 66 66
43443 - 86 86 86 70 70 70 46 46 46 26 26 26
43444 - 14 14 14 6 6 6 0 0 0 0 0 0
43445 - 0 0 0 0 0 0 0 0 0 0 0 0
43446 - 0 0 0 0 0 0 0 0 0 0 0 0
43447 - 0 0 0 0 0 0 0 0 0 0 0 0
43448 - 0 0 0 0 0 0 0 0 0 0 0 0
43449 - 0 0 0 0 0 0 0 0 0 0 0 0
43450 - 0 0 0 0 0 0 0 0 0 0 0 0
43451 - 0 0 0 0 0 0 0 0 0 0 0 0
43452 - 0 0 0 0 0 0 0 0 0 0 0 0
43453 - 0 0 1 0 0 1 0 0 1 0 0 0
43454 - 0 0 0 0 0 0 0 0 0 0 0 0
43455 - 0 0 0 0 0 0 0 0 0 0 0 0
43456 - 0 0 0 0 0 0 0 0 0 0 0 0
43457 - 0 0 0 0 0 0 0 0 0 0 0 0
43458 - 0 0 0 0 0 0 0 0 0 0 0 0
43459 - 0 0 0 0 0 0 10 10 10 26 26 26
43460 - 50 50 50 82 82 82 58 58 58 6 6 6
43461 - 2 2 6 2 2 6 2 2 6 2 2 6
43462 - 2 2 6 2 2 6 2 2 6 2 2 6
43463 - 6 6 6 54 54 54 86 86 86 66 66 66
43464 - 38 38 38 18 18 18 6 6 6 0 0 0
43465 - 0 0 0 0 0 0 0 0 0 0 0 0
43466 - 0 0 0 0 0 0 0 0 0 0 0 0
43467 - 0 0 0 0 0 0 0 0 0 0 0 0
43468 - 0 0 0 0 0 0 0 0 0 0 0 0
43469 - 0 0 0 0 0 0 0 0 0 0 0 0
43470 - 0 0 0 0 0 0 0 0 0 0 0 0
43471 - 0 0 0 0 0 0 0 0 0 0 0 0
43472 - 0 0 0 0 0 0 0 0 0 0 0 0
43473 - 0 0 0 0 0 0 0 0 0 0 0 0
43474 - 0 0 0 0 0 0 0 0 0 0 0 0
43475 - 0 0 0 0 0 0 0 0 0 0 0 0
43476 - 0 0 0 0 0 0 0 0 0 0 0 0
43477 - 0 0 0 0 0 0 0 0 0 0 0 0
43478 - 0 0 0 0 0 0 0 0 0 0 0 0
43479 - 0 0 0 6 6 6 22 22 22 50 50 50
43480 - 78 78 78 34 34 34 2 2 6 2 2 6
43481 - 2 2 6 2 2 6 2 2 6 2 2 6
43482 - 2 2 6 2 2 6 2 2 6 2 2 6
43483 - 2 2 6 2 2 6 6 6 6 70 70 70
43484 - 78 78 78 46 46 46 22 22 22 6 6 6
43485 - 0 0 0 0 0 0 0 0 0 0 0 0
43486 - 0 0 0 0 0 0 0 0 0 0 0 0
43487 - 0 0 0 0 0 0 0 0 0 0 0 0
43488 - 0 0 0 0 0 0 0 0 0 0 0 0
43489 - 0 0 0 0 0 0 0 0 0 0 0 0
43490 - 0 0 0 0 0 0 0 0 0 0 0 0
43491 - 0 0 0 0 0 0 0 0 0 0 0 0
43492 - 0 0 0 0 0 0 0 0 0 0 0 0
43493 - 0 0 1 0 0 1 0 0 1 0 0 0
43494 - 0 0 0 0 0 0 0 0 0 0 0 0
43495 - 0 0 0 0 0 0 0 0 0 0 0 0
43496 - 0 0 0 0 0 0 0 0 0 0 0 0
43497 - 0 0 0 0 0 0 0 0 0 0 0 0
43498 - 0 0 0 0 0 0 0 0 0 0 0 0
43499 - 6 6 6 18 18 18 42 42 42 82 82 82
43500 - 26 26 26 2 2 6 2 2 6 2 2 6
43501 - 2 2 6 2 2 6 2 2 6 2 2 6
43502 - 2 2 6 2 2 6 2 2 6 14 14 14
43503 - 46 46 46 34 34 34 6 6 6 2 2 6
43504 - 42 42 42 78 78 78 42 42 42 18 18 18
43505 - 6 6 6 0 0 0 0 0 0 0 0 0
43506 - 0 0 0 0 0 0 0 0 0 0 0 0
43507 - 0 0 0 0 0 0 0 0 0 0 0 0
43508 - 0 0 0 0 0 0 0 0 0 0 0 0
43509 - 0 0 0 0 0 0 0 0 0 0 0 0
43510 - 0 0 0 0 0 0 0 0 0 0 0 0
43511 - 0 0 0 0 0 0 0 0 0 0 0 0
43512 - 0 0 0 0 0 0 0 0 0 0 0 0
43513 - 0 0 1 0 0 0 0 0 1 0 0 0
43514 - 0 0 0 0 0 0 0 0 0 0 0 0
43515 - 0 0 0 0 0 0 0 0 0 0 0 0
43516 - 0 0 0 0 0 0 0 0 0 0 0 0
43517 - 0 0 0 0 0 0 0 0 0 0 0 0
43518 - 0 0 0 0 0 0 0 0 0 0 0 0
43519 - 10 10 10 30 30 30 66 66 66 58 58 58
43520 - 2 2 6 2 2 6 2 2 6 2 2 6
43521 - 2 2 6 2 2 6 2 2 6 2 2 6
43522 - 2 2 6 2 2 6 2 2 6 26 26 26
43523 - 86 86 86 101 101 101 46 46 46 10 10 10
43524 - 2 2 6 58 58 58 70 70 70 34 34 34
43525 - 10 10 10 0 0 0 0 0 0 0 0 0
43526 - 0 0 0 0 0 0 0 0 0 0 0 0
43527 - 0 0 0 0 0 0 0 0 0 0 0 0
43528 - 0 0 0 0 0 0 0 0 0 0 0 0
43529 - 0 0 0 0 0 0 0 0 0 0 0 0
43530 - 0 0 0 0 0 0 0 0 0 0 0 0
43531 - 0 0 0 0 0 0 0 0 0 0 0 0
43532 - 0 0 0 0 0 0 0 0 0 0 0 0
43533 - 0 0 1 0 0 1 0 0 1 0 0 0
43534 - 0 0 0 0 0 0 0 0 0 0 0 0
43535 - 0 0 0 0 0 0 0 0 0 0 0 0
43536 - 0 0 0 0 0 0 0 0 0 0 0 0
43537 - 0 0 0 0 0 0 0 0 0 0 0 0
43538 - 0 0 0 0 0 0 0 0 0 0 0 0
43539 - 14 14 14 42 42 42 86 86 86 10 10 10
43540 - 2 2 6 2 2 6 2 2 6 2 2 6
43541 - 2 2 6 2 2 6 2 2 6 2 2 6
43542 - 2 2 6 2 2 6 2 2 6 30 30 30
43543 - 94 94 94 94 94 94 58 58 58 26 26 26
43544 - 2 2 6 6 6 6 78 78 78 54 54 54
43545 - 22 22 22 6 6 6 0 0 0 0 0 0
43546 - 0 0 0 0 0 0 0 0 0 0 0 0
43547 - 0 0 0 0 0 0 0 0 0 0 0 0
43548 - 0 0 0 0 0 0 0 0 0 0 0 0
43549 - 0 0 0 0 0 0 0 0 0 0 0 0
43550 - 0 0 0 0 0 0 0 0 0 0 0 0
43551 - 0 0 0 0 0 0 0 0 0 0 0 0
43552 - 0 0 0 0 0 0 0 0 0 0 0 0
43553 - 0 0 0 0 0 0 0 0 0 0 0 0
43554 - 0 0 0 0 0 0 0 0 0 0 0 0
43555 - 0 0 0 0 0 0 0 0 0 0 0 0
43556 - 0 0 0 0 0 0 0 0 0 0 0 0
43557 - 0 0 0 0 0 0 0 0 0 0 0 0
43558 - 0 0 0 0 0 0 0 0 0 6 6 6
43559 - 22 22 22 62 62 62 62 62 62 2 2 6
43560 - 2 2 6 2 2 6 2 2 6 2 2 6
43561 - 2 2 6 2 2 6 2 2 6 2 2 6
43562 - 2 2 6 2 2 6 2 2 6 26 26 26
43563 - 54 54 54 38 38 38 18 18 18 10 10 10
43564 - 2 2 6 2 2 6 34 34 34 82 82 82
43565 - 38 38 38 14 14 14 0 0 0 0 0 0
43566 - 0 0 0 0 0 0 0 0 0 0 0 0
43567 - 0 0 0 0 0 0 0 0 0 0 0 0
43568 - 0 0 0 0 0 0 0 0 0 0 0 0
43569 - 0 0 0 0 0 0 0 0 0 0 0 0
43570 - 0 0 0 0 0 0 0 0 0 0 0 0
43571 - 0 0 0 0 0 0 0 0 0 0 0 0
43572 - 0 0 0 0 0 0 0 0 0 0 0 0
43573 - 0 0 0 0 0 1 0 0 1 0 0 0
43574 - 0 0 0 0 0 0 0 0 0 0 0 0
43575 - 0 0 0 0 0 0 0 0 0 0 0 0
43576 - 0 0 0 0 0 0 0 0 0 0 0 0
43577 - 0 0 0 0 0 0 0 0 0 0 0 0
43578 - 0 0 0 0 0 0 0 0 0 6 6 6
43579 - 30 30 30 78 78 78 30 30 30 2 2 6
43580 - 2 2 6 2 2 6 2 2 6 2 2 6
43581 - 2 2 6 2 2 6 2 2 6 2 2 6
43582 - 2 2 6 2 2 6 2 2 6 10 10 10
43583 - 10 10 10 2 2 6 2 2 6 2 2 6
43584 - 2 2 6 2 2 6 2 2 6 78 78 78
43585 - 50 50 50 18 18 18 6 6 6 0 0 0
43586 - 0 0 0 0 0 0 0 0 0 0 0 0
43587 - 0 0 0 0 0 0 0 0 0 0 0 0
43588 - 0 0 0 0 0 0 0 0 0 0 0 0
43589 - 0 0 0 0 0 0 0 0 0 0 0 0
43590 - 0 0 0 0 0 0 0 0 0 0 0 0
43591 - 0 0 0 0 0 0 0 0 0 0 0 0
43592 - 0 0 0 0 0 0 0 0 0 0 0 0
43593 - 0 0 1 0 0 0 0 0 0 0 0 0
43594 - 0 0 0 0 0 0 0 0 0 0 0 0
43595 - 0 0 0 0 0 0 0 0 0 0 0 0
43596 - 0 0 0 0 0 0 0 0 0 0 0 0
43597 - 0 0 0 0 0 0 0 0 0 0 0 0
43598 - 0 0 0 0 0 0 0 0 0 10 10 10
43599 - 38 38 38 86 86 86 14 14 14 2 2 6
43600 - 2 2 6 2 2 6 2 2 6 2 2 6
43601 - 2 2 6 2 2 6 2 2 6 2 2 6
43602 - 2 2 6 2 2 6 2 2 6 2 2 6
43603 - 2 2 6 2 2 6 2 2 6 2 2 6
43604 - 2 2 6 2 2 6 2 2 6 54 54 54
43605 - 66 66 66 26 26 26 6 6 6 0 0 0
43606 - 0 0 0 0 0 0 0 0 0 0 0 0
43607 - 0 0 0 0 0 0 0 0 0 0 0 0
43608 - 0 0 0 0 0 0 0 0 0 0 0 0
43609 - 0 0 0 0 0 0 0 0 0 0 0 0
43610 - 0 0 0 0 0 0 0 0 0 0 0 0
43611 - 0 0 0 0 0 0 0 0 0 0 0 0
43612 - 0 0 0 0 0 0 0 0 0 0 0 0
43613 - 0 0 0 0 0 1 0 0 1 0 0 0
43614 - 0 0 0 0 0 0 0 0 0 0 0 0
43615 - 0 0 0 0 0 0 0 0 0 0 0 0
43616 - 0 0 0 0 0 0 0 0 0 0 0 0
43617 - 0 0 0 0 0 0 0 0 0 0 0 0
43618 - 0 0 0 0 0 0 0 0 0 14 14 14
43619 - 42 42 42 82 82 82 2 2 6 2 2 6
43620 - 2 2 6 6 6 6 10 10 10 2 2 6
43621 - 2 2 6 2 2 6 2 2 6 2 2 6
43622 - 2 2 6 2 2 6 2 2 6 6 6 6
43623 - 14 14 14 10 10 10 2 2 6 2 2 6
43624 - 2 2 6 2 2 6 2 2 6 18 18 18
43625 - 82 82 82 34 34 34 10 10 10 0 0 0
43626 - 0 0 0 0 0 0 0 0 0 0 0 0
43627 - 0 0 0 0 0 0 0 0 0 0 0 0
43628 - 0 0 0 0 0 0 0 0 0 0 0 0
43629 - 0 0 0 0 0 0 0 0 0 0 0 0
43630 - 0 0 0 0 0 0 0 0 0 0 0 0
43631 - 0 0 0 0 0 0 0 0 0 0 0 0
43632 - 0 0 0 0 0 0 0 0 0 0 0 0
43633 - 0 0 1 0 0 0 0 0 0 0 0 0
43634 - 0 0 0 0 0 0 0 0 0 0 0 0
43635 - 0 0 0 0 0 0 0 0 0 0 0 0
43636 - 0 0 0 0 0 0 0 0 0 0 0 0
43637 - 0 0 0 0 0 0 0 0 0 0 0 0
43638 - 0 0 0 0 0 0 0 0 0 14 14 14
43639 - 46 46 46 86 86 86 2 2 6 2 2 6
43640 - 6 6 6 6 6 6 22 22 22 34 34 34
43641 - 6 6 6 2 2 6 2 2 6 2 2 6
43642 - 2 2 6 2 2 6 18 18 18 34 34 34
43643 - 10 10 10 50 50 50 22 22 22 2 2 6
43644 - 2 2 6 2 2 6 2 2 6 10 10 10
43645 - 86 86 86 42 42 42 14 14 14 0 0 0
43646 - 0 0 0 0 0 0 0 0 0 0 0 0
43647 - 0 0 0 0 0 0 0 0 0 0 0 0
43648 - 0 0 0 0 0 0 0 0 0 0 0 0
43649 - 0 0 0 0 0 0 0 0 0 0 0 0
43650 - 0 0 0 0 0 0 0 0 0 0 0 0
43651 - 0 0 0 0 0 0 0 0 0 0 0 0
43652 - 0 0 0 0 0 0 0 0 0 0 0 0
43653 - 0 0 1 0 0 1 0 0 1 0 0 0
43654 - 0 0 0 0 0 0 0 0 0 0 0 0
43655 - 0 0 0 0 0 0 0 0 0 0 0 0
43656 - 0 0 0 0 0 0 0 0 0 0 0 0
43657 - 0 0 0 0 0 0 0 0 0 0 0 0
43658 - 0 0 0 0 0 0 0 0 0 14 14 14
43659 - 46 46 46 86 86 86 2 2 6 2 2 6
43660 - 38 38 38 116 116 116 94 94 94 22 22 22
43661 - 22 22 22 2 2 6 2 2 6 2 2 6
43662 - 14 14 14 86 86 86 138 138 138 162 162 162
43663 -154 154 154 38 38 38 26 26 26 6 6 6
43664 - 2 2 6 2 2 6 2 2 6 2 2 6
43665 - 86 86 86 46 46 46 14 14 14 0 0 0
43666 - 0 0 0 0 0 0 0 0 0 0 0 0
43667 - 0 0 0 0 0 0 0 0 0 0 0 0
43668 - 0 0 0 0 0 0 0 0 0 0 0 0
43669 - 0 0 0 0 0 0 0 0 0 0 0 0
43670 - 0 0 0 0 0 0 0 0 0 0 0 0
43671 - 0 0 0 0 0 0 0 0 0 0 0 0
43672 - 0 0 0 0 0 0 0 0 0 0 0 0
43673 - 0 0 0 0 0 0 0 0 0 0 0 0
43674 - 0 0 0 0 0 0 0 0 0 0 0 0
43675 - 0 0 0 0 0 0 0 0 0 0 0 0
43676 - 0 0 0 0 0 0 0 0 0 0 0 0
43677 - 0 0 0 0 0 0 0 0 0 0 0 0
43678 - 0 0 0 0 0 0 0 0 0 14 14 14
43679 - 46 46 46 86 86 86 2 2 6 14 14 14
43680 -134 134 134 198 198 198 195 195 195 116 116 116
43681 - 10 10 10 2 2 6 2 2 6 6 6 6
43682 -101 98 89 187 187 187 210 210 210 218 218 218
43683 -214 214 214 134 134 134 14 14 14 6 6 6
43684 - 2 2 6 2 2 6 2 2 6 2 2 6
43685 - 86 86 86 50 50 50 18 18 18 6 6 6
43686 - 0 0 0 0 0 0 0 0 0 0 0 0
43687 - 0 0 0 0 0 0 0 0 0 0 0 0
43688 - 0 0 0 0 0 0 0 0 0 0 0 0
43689 - 0 0 0 0 0 0 0 0 0 0 0 0
43690 - 0 0 0 0 0 0 0 0 0 0 0 0
43691 - 0 0 0 0 0 0 0 0 0 0 0 0
43692 - 0 0 0 0 0 0 0 0 1 0 0 0
43693 - 0 0 1 0 0 1 0 0 1 0 0 0
43694 - 0 0 0 0 0 0 0 0 0 0 0 0
43695 - 0 0 0 0 0 0 0 0 0 0 0 0
43696 - 0 0 0 0 0 0 0 0 0 0 0 0
43697 - 0 0 0 0 0 0 0 0 0 0 0 0
43698 - 0 0 0 0 0 0 0 0 0 14 14 14
43699 - 46 46 46 86 86 86 2 2 6 54 54 54
43700 -218 218 218 195 195 195 226 226 226 246 246 246
43701 - 58 58 58 2 2 6 2 2 6 30 30 30
43702 -210 210 210 253 253 253 174 174 174 123 123 123
43703 -221 221 221 234 234 234 74 74 74 2 2 6
43704 - 2 2 6 2 2 6 2 2 6 2 2 6
43705 - 70 70 70 58 58 58 22 22 22 6 6 6
43706 - 0 0 0 0 0 0 0 0 0 0 0 0
43707 - 0 0 0 0 0 0 0 0 0 0 0 0
43708 - 0 0 0 0 0 0 0 0 0 0 0 0
43709 - 0 0 0 0 0 0 0 0 0 0 0 0
43710 - 0 0 0 0 0 0 0 0 0 0 0 0
43711 - 0 0 0 0 0 0 0 0 0 0 0 0
43712 - 0 0 0 0 0 0 0 0 0 0 0 0
43713 - 0 0 0 0 0 0 0 0 0 0 0 0
43714 - 0 0 0 0 0 0 0 0 0 0 0 0
43715 - 0 0 0 0 0 0 0 0 0 0 0 0
43716 - 0 0 0 0 0 0 0 0 0 0 0 0
43717 - 0 0 0 0 0 0 0 0 0 0 0 0
43718 - 0 0 0 0 0 0 0 0 0 14 14 14
43719 - 46 46 46 82 82 82 2 2 6 106 106 106
43720 -170 170 170 26 26 26 86 86 86 226 226 226
43721 -123 123 123 10 10 10 14 14 14 46 46 46
43722 -231 231 231 190 190 190 6 6 6 70 70 70
43723 - 90 90 90 238 238 238 158 158 158 2 2 6
43724 - 2 2 6 2 2 6 2 2 6 2 2 6
43725 - 70 70 70 58 58 58 22 22 22 6 6 6
43726 - 0 0 0 0 0 0 0 0 0 0 0 0
43727 - 0 0 0 0 0 0 0 0 0 0 0 0
43728 - 0 0 0 0 0 0 0 0 0 0 0 0
43729 - 0 0 0 0 0 0 0 0 0 0 0 0
43730 - 0 0 0 0 0 0 0 0 0 0 0 0
43731 - 0 0 0 0 0 0 0 0 0 0 0 0
43732 - 0 0 0 0 0 0 0 0 1 0 0 0
43733 - 0 0 1 0 0 1 0 0 1 0 0 0
43734 - 0 0 0 0 0 0 0 0 0 0 0 0
43735 - 0 0 0 0 0 0 0 0 0 0 0 0
43736 - 0 0 0 0 0 0 0 0 0 0 0 0
43737 - 0 0 0 0 0 0 0 0 0 0 0 0
43738 - 0 0 0 0 0 0 0 0 0 14 14 14
43739 - 42 42 42 86 86 86 6 6 6 116 116 116
43740 -106 106 106 6 6 6 70 70 70 149 149 149
43741 -128 128 128 18 18 18 38 38 38 54 54 54
43742 -221 221 221 106 106 106 2 2 6 14 14 14
43743 - 46 46 46 190 190 190 198 198 198 2 2 6
43744 - 2 2 6 2 2 6 2 2 6 2 2 6
43745 - 74 74 74 62 62 62 22 22 22 6 6 6
43746 - 0 0 0 0 0 0 0 0 0 0 0 0
43747 - 0 0 0 0 0 0 0 0 0 0 0 0
43748 - 0 0 0 0 0 0 0 0 0 0 0 0
43749 - 0 0 0 0 0 0 0 0 0 0 0 0
43750 - 0 0 0 0 0 0 0 0 0 0 0 0
43751 - 0 0 0 0 0 0 0 0 0 0 0 0
43752 - 0 0 0 0 0 0 0 0 1 0 0 0
43753 - 0 0 1 0 0 0 0 0 1 0 0 0
43754 - 0 0 0 0 0 0 0 0 0 0 0 0
43755 - 0 0 0 0 0 0 0 0 0 0 0 0
43756 - 0 0 0 0 0 0 0 0 0 0 0 0
43757 - 0 0 0 0 0 0 0 0 0 0 0 0
43758 - 0 0 0 0 0 0 0 0 0 14 14 14
43759 - 42 42 42 94 94 94 14 14 14 101 101 101
43760 -128 128 128 2 2 6 18 18 18 116 116 116
43761 -118 98 46 121 92 8 121 92 8 98 78 10
43762 -162 162 162 106 106 106 2 2 6 2 2 6
43763 - 2 2 6 195 195 195 195 195 195 6 6 6
43764 - 2 2 6 2 2 6 2 2 6 2 2 6
43765 - 74 74 74 62 62 62 22 22 22 6 6 6
43766 - 0 0 0 0 0 0 0 0 0 0 0 0
43767 - 0 0 0 0 0 0 0 0 0 0 0 0
43768 - 0 0 0 0 0 0 0 0 0 0 0 0
43769 - 0 0 0 0 0 0 0 0 0 0 0 0
43770 - 0 0 0 0 0 0 0 0 0 0 0 0
43771 - 0 0 0 0 0 0 0 0 0 0 0 0
43772 - 0 0 0 0 0 0 0 0 1 0 0 1
43773 - 0 0 1 0 0 0 0 0 1 0 0 0
43774 - 0 0 0 0 0 0 0 0 0 0 0 0
43775 - 0 0 0 0 0 0 0 0 0 0 0 0
43776 - 0 0 0 0 0 0 0 0 0 0 0 0
43777 - 0 0 0 0 0 0 0 0 0 0 0 0
43778 - 0 0 0 0 0 0 0 0 0 10 10 10
43779 - 38 38 38 90 90 90 14 14 14 58 58 58
43780 -210 210 210 26 26 26 54 38 6 154 114 10
43781 -226 170 11 236 186 11 225 175 15 184 144 12
43782 -215 174 15 175 146 61 37 26 9 2 2 6
43783 - 70 70 70 246 246 246 138 138 138 2 2 6
43784 - 2 2 6 2 2 6 2 2 6 2 2 6
43785 - 70 70 70 66 66 66 26 26 26 6 6 6
43786 - 0 0 0 0 0 0 0 0 0 0 0 0
43787 - 0 0 0 0 0 0 0 0 0 0 0 0
43788 - 0 0 0 0 0 0 0 0 0 0 0 0
43789 - 0 0 0 0 0 0 0 0 0 0 0 0
43790 - 0 0 0 0 0 0 0 0 0 0 0 0
43791 - 0 0 0 0 0 0 0 0 0 0 0 0
43792 - 0 0 0 0 0 0 0 0 0 0 0 0
43793 - 0 0 0 0 0 0 0 0 0 0 0 0
43794 - 0 0 0 0 0 0 0 0 0 0 0 0
43795 - 0 0 0 0 0 0 0 0 0 0 0 0
43796 - 0 0 0 0 0 0 0 0 0 0 0 0
43797 - 0 0 0 0 0 0 0 0 0 0 0 0
43798 - 0 0 0 0 0 0 0 0 0 10 10 10
43799 - 38 38 38 86 86 86 14 14 14 10 10 10
43800 -195 195 195 188 164 115 192 133 9 225 175 15
43801 -239 182 13 234 190 10 232 195 16 232 200 30
43802 -245 207 45 241 208 19 232 195 16 184 144 12
43803 -218 194 134 211 206 186 42 42 42 2 2 6
43804 - 2 2 6 2 2 6 2 2 6 2 2 6
43805 - 50 50 50 74 74 74 30 30 30 6 6 6
43806 - 0 0 0 0 0 0 0 0 0 0 0 0
43807 - 0 0 0 0 0 0 0 0 0 0 0 0
43808 - 0 0 0 0 0 0 0 0 0 0 0 0
43809 - 0 0 0 0 0 0 0 0 0 0 0 0
43810 - 0 0 0 0 0 0 0 0 0 0 0 0
43811 - 0 0 0 0 0 0 0 0 0 0 0 0
43812 - 0 0 0 0 0 0 0 0 0 0 0 0
43813 - 0 0 0 0 0 0 0 0 0 0 0 0
43814 - 0 0 0 0 0 0 0 0 0 0 0 0
43815 - 0 0 0 0 0 0 0 0 0 0 0 0
43816 - 0 0 0 0 0 0 0 0 0 0 0 0
43817 - 0 0 0 0 0 0 0 0 0 0 0 0
43818 - 0 0 0 0 0 0 0 0 0 10 10 10
43819 - 34 34 34 86 86 86 14 14 14 2 2 6
43820 -121 87 25 192 133 9 219 162 10 239 182 13
43821 -236 186 11 232 195 16 241 208 19 244 214 54
43822 -246 218 60 246 218 38 246 215 20 241 208 19
43823 -241 208 19 226 184 13 121 87 25 2 2 6
43824 - 2 2 6 2 2 6 2 2 6 2 2 6
43825 - 50 50 50 82 82 82 34 34 34 10 10 10
43826 - 0 0 0 0 0 0 0 0 0 0 0 0
43827 - 0 0 0 0 0 0 0 0 0 0 0 0
43828 - 0 0 0 0 0 0 0 0 0 0 0 0
43829 - 0 0 0 0 0 0 0 0 0 0 0 0
43830 - 0 0 0 0 0 0 0 0 0 0 0 0
43831 - 0 0 0 0 0 0 0 0 0 0 0 0
43832 - 0 0 0 0 0 0 0 0 0 0 0 0
43833 - 0 0 0 0 0 0 0 0 0 0 0 0
43834 - 0 0 0 0 0 0 0 0 0 0 0 0
43835 - 0 0 0 0 0 0 0 0 0 0 0 0
43836 - 0 0 0 0 0 0 0 0 0 0 0 0
43837 - 0 0 0 0 0 0 0 0 0 0 0 0
43838 - 0 0 0 0 0 0 0 0 0 10 10 10
43839 - 34 34 34 82 82 82 30 30 30 61 42 6
43840 -180 123 7 206 145 10 230 174 11 239 182 13
43841 -234 190 10 238 202 15 241 208 19 246 218 74
43842 -246 218 38 246 215 20 246 215 20 246 215 20
43843 -226 184 13 215 174 15 184 144 12 6 6 6
43844 - 2 2 6 2 2 6 2 2 6 2 2 6
43845 - 26 26 26 94 94 94 42 42 42 14 14 14
43846 - 0 0 0 0 0 0 0 0 0 0 0 0
43847 - 0 0 0 0 0 0 0 0 0 0 0 0
43848 - 0 0 0 0 0 0 0 0 0 0 0 0
43849 - 0 0 0 0 0 0 0 0 0 0 0 0
43850 - 0 0 0 0 0 0 0 0 0 0 0 0
43851 - 0 0 0 0 0 0 0 0 0 0 0 0
43852 - 0 0 0 0 0 0 0 0 0 0 0 0
43853 - 0 0 0 0 0 0 0 0 0 0 0 0
43854 - 0 0 0 0 0 0 0 0 0 0 0 0
43855 - 0 0 0 0 0 0 0 0 0 0 0 0
43856 - 0 0 0 0 0 0 0 0 0 0 0 0
43857 - 0 0 0 0 0 0 0 0 0 0 0 0
43858 - 0 0 0 0 0 0 0 0 0 10 10 10
43859 - 30 30 30 78 78 78 50 50 50 104 69 6
43860 -192 133 9 216 158 10 236 178 12 236 186 11
43861 -232 195 16 241 208 19 244 214 54 245 215 43
43862 -246 215 20 246 215 20 241 208 19 198 155 10
43863 -200 144 11 216 158 10 156 118 10 2 2 6
43864 - 2 2 6 2 2 6 2 2 6 2 2 6
43865 - 6 6 6 90 90 90 54 54 54 18 18 18
43866 - 6 6 6 0 0 0 0 0 0 0 0 0
43867 - 0 0 0 0 0 0 0 0 0 0 0 0
43868 - 0 0 0 0 0 0 0 0 0 0 0 0
43869 - 0 0 0 0 0 0 0 0 0 0 0 0
43870 - 0 0 0 0 0 0 0 0 0 0 0 0
43871 - 0 0 0 0 0 0 0 0 0 0 0 0
43872 - 0 0 0 0 0 0 0 0 0 0 0 0
43873 - 0 0 0 0 0 0 0 0 0 0 0 0
43874 - 0 0 0 0 0 0 0 0 0 0 0 0
43875 - 0 0 0 0 0 0 0 0 0 0 0 0
43876 - 0 0 0 0 0 0 0 0 0 0 0 0
43877 - 0 0 0 0 0 0 0 0 0 0 0 0
43878 - 0 0 0 0 0 0 0 0 0 10 10 10
43879 - 30 30 30 78 78 78 46 46 46 22 22 22
43880 -137 92 6 210 162 10 239 182 13 238 190 10
43881 -238 202 15 241 208 19 246 215 20 246 215 20
43882 -241 208 19 203 166 17 185 133 11 210 150 10
43883 -216 158 10 210 150 10 102 78 10 2 2 6
43884 - 6 6 6 54 54 54 14 14 14 2 2 6
43885 - 2 2 6 62 62 62 74 74 74 30 30 30
43886 - 10 10 10 0 0 0 0 0 0 0 0 0
43887 - 0 0 0 0 0 0 0 0 0 0 0 0
43888 - 0 0 0 0 0 0 0 0 0 0 0 0
43889 - 0 0 0 0 0 0 0 0 0 0 0 0
43890 - 0 0 0 0 0 0 0 0 0 0 0 0
43891 - 0 0 0 0 0 0 0 0 0 0 0 0
43892 - 0 0 0 0 0 0 0 0 0 0 0 0
43893 - 0 0 0 0 0 0 0 0 0 0 0 0
43894 - 0 0 0 0 0 0 0 0 0 0 0 0
43895 - 0 0 0 0 0 0 0 0 0 0 0 0
43896 - 0 0 0 0 0 0 0 0 0 0 0 0
43897 - 0 0 0 0 0 0 0 0 0 0 0 0
43898 - 0 0 0 0 0 0 0 0 0 10 10 10
43899 - 34 34 34 78 78 78 50 50 50 6 6 6
43900 - 94 70 30 139 102 15 190 146 13 226 184 13
43901 -232 200 30 232 195 16 215 174 15 190 146 13
43902 -168 122 10 192 133 9 210 150 10 213 154 11
43903 -202 150 34 182 157 106 101 98 89 2 2 6
43904 - 2 2 6 78 78 78 116 116 116 58 58 58
43905 - 2 2 6 22 22 22 90 90 90 46 46 46
43906 - 18 18 18 6 6 6 0 0 0 0 0 0
43907 - 0 0 0 0 0 0 0 0 0 0 0 0
43908 - 0 0 0 0 0 0 0 0 0 0 0 0
43909 - 0 0 0 0 0 0 0 0 0 0 0 0
43910 - 0 0 0 0 0 0 0 0 0 0 0 0
43911 - 0 0 0 0 0 0 0 0 0 0 0 0
43912 - 0 0 0 0 0 0 0 0 0 0 0 0
43913 - 0 0 0 0 0 0 0 0 0 0 0 0
43914 - 0 0 0 0 0 0 0 0 0 0 0 0
43915 - 0 0 0 0 0 0 0 0 0 0 0 0
43916 - 0 0 0 0 0 0 0 0 0 0 0 0
43917 - 0 0 0 0 0 0 0 0 0 0 0 0
43918 - 0 0 0 0 0 0 0 0 0 10 10 10
43919 - 38 38 38 86 86 86 50 50 50 6 6 6
43920 -128 128 128 174 154 114 156 107 11 168 122 10
43921 -198 155 10 184 144 12 197 138 11 200 144 11
43922 -206 145 10 206 145 10 197 138 11 188 164 115
43923 -195 195 195 198 198 198 174 174 174 14 14 14
43924 - 2 2 6 22 22 22 116 116 116 116 116 116
43925 - 22 22 22 2 2 6 74 74 74 70 70 70
43926 - 30 30 30 10 10 10 0 0 0 0 0 0
43927 - 0 0 0 0 0 0 0 0 0 0 0 0
43928 - 0 0 0 0 0 0 0 0 0 0 0 0
43929 - 0 0 0 0 0 0 0 0 0 0 0 0
43930 - 0 0 0 0 0 0 0 0 0 0 0 0
43931 - 0 0 0 0 0 0 0 0 0 0 0 0
43932 - 0 0 0 0 0 0 0 0 0 0 0 0
43933 - 0 0 0 0 0 0 0 0 0 0 0 0
43934 - 0 0 0 0 0 0 0 0 0 0 0 0
43935 - 0 0 0 0 0 0 0 0 0 0 0 0
43936 - 0 0 0 0 0 0 0 0 0 0 0 0
43937 - 0 0 0 0 0 0 0 0 0 0 0 0
43938 - 0 0 0 0 0 0 6 6 6 18 18 18
43939 - 50 50 50 101 101 101 26 26 26 10 10 10
43940 -138 138 138 190 190 190 174 154 114 156 107 11
43941 -197 138 11 200 144 11 197 138 11 192 133 9
43942 -180 123 7 190 142 34 190 178 144 187 187 187
43943 -202 202 202 221 221 221 214 214 214 66 66 66
43944 - 2 2 6 2 2 6 50 50 50 62 62 62
43945 - 6 6 6 2 2 6 10 10 10 90 90 90
43946 - 50 50 50 18 18 18 6 6 6 0 0 0
43947 - 0 0 0 0 0 0 0 0 0 0 0 0
43948 - 0 0 0 0 0 0 0 0 0 0 0 0
43949 - 0 0 0 0 0 0 0 0 0 0 0 0
43950 - 0 0 0 0 0 0 0 0 0 0 0 0
43951 - 0 0 0 0 0 0 0 0 0 0 0 0
43952 - 0 0 0 0 0 0 0 0 0 0 0 0
43953 - 0 0 0 0 0 0 0 0 0 0 0 0
43954 - 0 0 0 0 0 0 0 0 0 0 0 0
43955 - 0 0 0 0 0 0 0 0 0 0 0 0
43956 - 0 0 0 0 0 0 0 0 0 0 0 0
43957 - 0 0 0 0 0 0 0 0 0 0 0 0
43958 - 0 0 0 0 0 0 10 10 10 34 34 34
43959 - 74 74 74 74 74 74 2 2 6 6 6 6
43960 -144 144 144 198 198 198 190 190 190 178 166 146
43961 -154 121 60 156 107 11 156 107 11 168 124 44
43962 -174 154 114 187 187 187 190 190 190 210 210 210
43963 -246 246 246 253 253 253 253 253 253 182 182 182
43964 - 6 6 6 2 2 6 2 2 6 2 2 6
43965 - 2 2 6 2 2 6 2 2 6 62 62 62
43966 - 74 74 74 34 34 34 14 14 14 0 0 0
43967 - 0 0 0 0 0 0 0 0 0 0 0 0
43968 - 0 0 0 0 0 0 0 0 0 0 0 0
43969 - 0 0 0 0 0 0 0 0 0 0 0 0
43970 - 0 0 0 0 0 0 0 0 0 0 0 0
43971 - 0 0 0 0 0 0 0 0 0 0 0 0
43972 - 0 0 0 0 0 0 0 0 0 0 0 0
43973 - 0 0 0 0 0 0 0 0 0 0 0 0
43974 - 0 0 0 0 0 0 0 0 0 0 0 0
43975 - 0 0 0 0 0 0 0 0 0 0 0 0
43976 - 0 0 0 0 0 0 0 0 0 0 0 0
43977 - 0 0 0 0 0 0 0 0 0 0 0 0
43978 - 0 0 0 10 10 10 22 22 22 54 54 54
43979 - 94 94 94 18 18 18 2 2 6 46 46 46
43980 -234 234 234 221 221 221 190 190 190 190 190 190
43981 -190 190 190 187 187 187 187 187 187 190 190 190
43982 -190 190 190 195 195 195 214 214 214 242 242 242
43983 -253 253 253 253 253 253 253 253 253 253 253 253
43984 - 82 82 82 2 2 6 2 2 6 2 2 6
43985 - 2 2 6 2 2 6 2 2 6 14 14 14
43986 - 86 86 86 54 54 54 22 22 22 6 6 6
43987 - 0 0 0 0 0 0 0 0 0 0 0 0
43988 - 0 0 0 0 0 0 0 0 0 0 0 0
43989 - 0 0 0 0 0 0 0 0 0 0 0 0
43990 - 0 0 0 0 0 0 0 0 0 0 0 0
43991 - 0 0 0 0 0 0 0 0 0 0 0 0
43992 - 0 0 0 0 0 0 0 0 0 0 0 0
43993 - 0 0 0 0 0 0 0 0 0 0 0 0
43994 - 0 0 0 0 0 0 0 0 0 0 0 0
43995 - 0 0 0 0 0 0 0 0 0 0 0 0
43996 - 0 0 0 0 0 0 0 0 0 0 0 0
43997 - 0 0 0 0 0 0 0 0 0 0 0 0
43998 - 6 6 6 18 18 18 46 46 46 90 90 90
43999 - 46 46 46 18 18 18 6 6 6 182 182 182
44000 -253 253 253 246 246 246 206 206 206 190 190 190
44001 -190 190 190 190 190 190 190 190 190 190 190 190
44002 -206 206 206 231 231 231 250 250 250 253 253 253
44003 -253 253 253 253 253 253 253 253 253 253 253 253
44004 -202 202 202 14 14 14 2 2 6 2 2 6
44005 - 2 2 6 2 2 6 2 2 6 2 2 6
44006 - 42 42 42 86 86 86 42 42 42 18 18 18
44007 - 6 6 6 0 0 0 0 0 0 0 0 0
44008 - 0 0 0 0 0 0 0 0 0 0 0 0
44009 - 0 0 0 0 0 0 0 0 0 0 0 0
44010 - 0 0 0 0 0 0 0 0 0 0 0 0
44011 - 0 0 0 0 0 0 0 0 0 0 0 0
44012 - 0 0 0 0 0 0 0 0 0 0 0 0
44013 - 0 0 0 0 0 0 0 0 0 0 0 0
44014 - 0 0 0 0 0 0 0 0 0 0 0 0
44015 - 0 0 0 0 0 0 0 0 0 0 0 0
44016 - 0 0 0 0 0 0 0 0 0 0 0 0
44017 - 0 0 0 0 0 0 0 0 0 6 6 6
44018 - 14 14 14 38 38 38 74 74 74 66 66 66
44019 - 2 2 6 6 6 6 90 90 90 250 250 250
44020 -253 253 253 253 253 253 238 238 238 198 198 198
44021 -190 190 190 190 190 190 195 195 195 221 221 221
44022 -246 246 246 253 253 253 253 253 253 253 253 253
44023 -253 253 253 253 253 253 253 253 253 253 253 253
44024 -253 253 253 82 82 82 2 2 6 2 2 6
44025 - 2 2 6 2 2 6 2 2 6 2 2 6
44026 - 2 2 6 78 78 78 70 70 70 34 34 34
44027 - 14 14 14 6 6 6 0 0 0 0 0 0
44028 - 0 0 0 0 0 0 0 0 0 0 0 0
44029 - 0 0 0 0 0 0 0 0 0 0 0 0
44030 - 0 0 0 0 0 0 0 0 0 0 0 0
44031 - 0 0 0 0 0 0 0 0 0 0 0 0
44032 - 0 0 0 0 0 0 0 0 0 0 0 0
44033 - 0 0 0 0 0 0 0 0 0 0 0 0
44034 - 0 0 0 0 0 0 0 0 0 0 0 0
44035 - 0 0 0 0 0 0 0 0 0 0 0 0
44036 - 0 0 0 0 0 0 0 0 0 0 0 0
44037 - 0 0 0 0 0 0 0 0 0 14 14 14
44038 - 34 34 34 66 66 66 78 78 78 6 6 6
44039 - 2 2 6 18 18 18 218 218 218 253 253 253
44040 -253 253 253 253 253 253 253 253 253 246 246 246
44041 -226 226 226 231 231 231 246 246 246 253 253 253
44042 -253 253 253 253 253 253 253 253 253 253 253 253
44043 -253 253 253 253 253 253 253 253 253 253 253 253
44044 -253 253 253 178 178 178 2 2 6 2 2 6
44045 - 2 2 6 2 2 6 2 2 6 2 2 6
44046 - 2 2 6 18 18 18 90 90 90 62 62 62
44047 - 30 30 30 10 10 10 0 0 0 0 0 0
44048 - 0 0 0 0 0 0 0 0 0 0 0 0
44049 - 0 0 0 0 0 0 0 0 0 0 0 0
44050 - 0 0 0 0 0 0 0 0 0 0 0 0
44051 - 0 0 0 0 0 0 0 0 0 0 0 0
44052 - 0 0 0 0 0 0 0 0 0 0 0 0
44053 - 0 0 0 0 0 0 0 0 0 0 0 0
44054 - 0 0 0 0 0 0 0 0 0 0 0 0
44055 - 0 0 0 0 0 0 0 0 0 0 0 0
44056 - 0 0 0 0 0 0 0 0 0 0 0 0
44057 - 0 0 0 0 0 0 10 10 10 26 26 26
44058 - 58 58 58 90 90 90 18 18 18 2 2 6
44059 - 2 2 6 110 110 110 253 253 253 253 253 253
44060 -253 253 253 253 253 253 253 253 253 253 253 253
44061 -250 250 250 253 253 253 253 253 253 253 253 253
44062 -253 253 253 253 253 253 253 253 253 253 253 253
44063 -253 253 253 253 253 253 253 253 253 253 253 253
44064 -253 253 253 231 231 231 18 18 18 2 2 6
44065 - 2 2 6 2 2 6 2 2 6 2 2 6
44066 - 2 2 6 2 2 6 18 18 18 94 94 94
44067 - 54 54 54 26 26 26 10 10 10 0 0 0
44068 - 0 0 0 0 0 0 0 0 0 0 0 0
44069 - 0 0 0 0 0 0 0 0 0 0 0 0
44070 - 0 0 0 0 0 0 0 0 0 0 0 0
44071 - 0 0 0 0 0 0 0 0 0 0 0 0
44072 - 0 0 0 0 0 0 0 0 0 0 0 0
44073 - 0 0 0 0 0 0 0 0 0 0 0 0
44074 - 0 0 0 0 0 0 0 0 0 0 0 0
44075 - 0 0 0 0 0 0 0 0 0 0 0 0
44076 - 0 0 0 0 0 0 0 0 0 0 0 0
44077 - 0 0 0 6 6 6 22 22 22 50 50 50
44078 - 90 90 90 26 26 26 2 2 6 2 2 6
44079 - 14 14 14 195 195 195 250 250 250 253 253 253
44080 -253 253 253 253 253 253 253 253 253 253 253 253
44081 -253 253 253 253 253 253 253 253 253 253 253 253
44082 -253 253 253 253 253 253 253 253 253 253 253 253
44083 -253 253 253 253 253 253 253 253 253 253 253 253
44084 -250 250 250 242 242 242 54 54 54 2 2 6
44085 - 2 2 6 2 2 6 2 2 6 2 2 6
44086 - 2 2 6 2 2 6 2 2 6 38 38 38
44087 - 86 86 86 50 50 50 22 22 22 6 6 6
44088 - 0 0 0 0 0 0 0 0 0 0 0 0
44089 - 0 0 0 0 0 0 0 0 0 0 0 0
44090 - 0 0 0 0 0 0 0 0 0 0 0 0
44091 - 0 0 0 0 0 0 0 0 0 0 0 0
44092 - 0 0 0 0 0 0 0 0 0 0 0 0
44093 - 0 0 0 0 0 0 0 0 0 0 0 0
44094 - 0 0 0 0 0 0 0 0 0 0 0 0
44095 - 0 0 0 0 0 0 0 0 0 0 0 0
44096 - 0 0 0 0 0 0 0 0 0 0 0 0
44097 - 6 6 6 14 14 14 38 38 38 82 82 82
44098 - 34 34 34 2 2 6 2 2 6 2 2 6
44099 - 42 42 42 195 195 195 246 246 246 253 253 253
44100 -253 253 253 253 253 253 253 253 253 250 250 250
44101 -242 242 242 242 242 242 250 250 250 253 253 253
44102 -253 253 253 253 253 253 253 253 253 253 253 253
44103 -253 253 253 250 250 250 246 246 246 238 238 238
44104 -226 226 226 231 231 231 101 101 101 6 6 6
44105 - 2 2 6 2 2 6 2 2 6 2 2 6
44106 - 2 2 6 2 2 6 2 2 6 2 2 6
44107 - 38 38 38 82 82 82 42 42 42 14 14 14
44108 - 6 6 6 0 0 0 0 0 0 0 0 0
44109 - 0 0 0 0 0 0 0 0 0 0 0 0
44110 - 0 0 0 0 0 0 0 0 0 0 0 0
44111 - 0 0 0 0 0 0 0 0 0 0 0 0
44112 - 0 0 0 0 0 0 0 0 0 0 0 0
44113 - 0 0 0 0 0 0 0 0 0 0 0 0
44114 - 0 0 0 0 0 0 0 0 0 0 0 0
44115 - 0 0 0 0 0 0 0 0 0 0 0 0
44116 - 0 0 0 0 0 0 0 0 0 0 0 0
44117 - 10 10 10 26 26 26 62 62 62 66 66 66
44118 - 2 2 6 2 2 6 2 2 6 6 6 6
44119 - 70 70 70 170 170 170 206 206 206 234 234 234
44120 -246 246 246 250 250 250 250 250 250 238 238 238
44121 -226 226 226 231 231 231 238 238 238 250 250 250
44122 -250 250 250 250 250 250 246 246 246 231 231 231
44123 -214 214 214 206 206 206 202 202 202 202 202 202
44124 -198 198 198 202 202 202 182 182 182 18 18 18
44125 - 2 2 6 2 2 6 2 2 6 2 2 6
44126 - 2 2 6 2 2 6 2 2 6 2 2 6
44127 - 2 2 6 62 62 62 66 66 66 30 30 30
44128 - 10 10 10 0 0 0 0 0 0 0 0 0
44129 - 0 0 0 0 0 0 0 0 0 0 0 0
44130 - 0 0 0 0 0 0 0 0 0 0 0 0
44131 - 0 0 0 0 0 0 0 0 0 0 0 0
44132 - 0 0 0 0 0 0 0 0 0 0 0 0
44133 - 0 0 0 0 0 0 0 0 0 0 0 0
44134 - 0 0 0 0 0 0 0 0 0 0 0 0
44135 - 0 0 0 0 0 0 0 0 0 0 0 0
44136 - 0 0 0 0 0 0 0 0 0 0 0 0
44137 - 14 14 14 42 42 42 82 82 82 18 18 18
44138 - 2 2 6 2 2 6 2 2 6 10 10 10
44139 - 94 94 94 182 182 182 218 218 218 242 242 242
44140 -250 250 250 253 253 253 253 253 253 250 250 250
44141 -234 234 234 253 253 253 253 253 253 253 253 253
44142 -253 253 253 253 253 253 253 253 253 246 246 246
44143 -238 238 238 226 226 226 210 210 210 202 202 202
44144 -195 195 195 195 195 195 210 210 210 158 158 158
44145 - 6 6 6 14 14 14 50 50 50 14 14 14
44146 - 2 2 6 2 2 6 2 2 6 2 2 6
44147 - 2 2 6 6 6 6 86 86 86 46 46 46
44148 - 18 18 18 6 6 6 0 0 0 0 0 0
44149 - 0 0 0 0 0 0 0 0 0 0 0 0
44150 - 0 0 0 0 0 0 0 0 0 0 0 0
44151 - 0 0 0 0 0 0 0 0 0 0 0 0
44152 - 0 0 0 0 0 0 0 0 0 0 0 0
44153 - 0 0 0 0 0 0 0 0 0 0 0 0
44154 - 0 0 0 0 0 0 0 0 0 0 0 0
44155 - 0 0 0 0 0 0 0 0 0 0 0 0
44156 - 0 0 0 0 0 0 0 0 0 6 6 6
44157 - 22 22 22 54 54 54 70 70 70 2 2 6
44158 - 2 2 6 10 10 10 2 2 6 22 22 22
44159 -166 166 166 231 231 231 250 250 250 253 253 253
44160 -253 253 253 253 253 253 253 253 253 250 250 250
44161 -242 242 242 253 253 253 253 253 253 253 253 253
44162 -253 253 253 253 253 253 253 253 253 253 253 253
44163 -253 253 253 253 253 253 253 253 253 246 246 246
44164 -231 231 231 206 206 206 198 198 198 226 226 226
44165 - 94 94 94 2 2 6 6 6 6 38 38 38
44166 - 30 30 30 2 2 6 2 2 6 2 2 6
44167 - 2 2 6 2 2 6 62 62 62 66 66 66
44168 - 26 26 26 10 10 10 0 0 0 0 0 0
44169 - 0 0 0 0 0 0 0 0 0 0 0 0
44170 - 0 0 0 0 0 0 0 0 0 0 0 0
44171 - 0 0 0 0 0 0 0 0 0 0 0 0
44172 - 0 0 0 0 0 0 0 0 0 0 0 0
44173 - 0 0 0 0 0 0 0 0 0 0 0 0
44174 - 0 0 0 0 0 0 0 0 0 0 0 0
44175 - 0 0 0 0 0 0 0 0 0 0 0 0
44176 - 0 0 0 0 0 0 0 0 0 10 10 10
44177 - 30 30 30 74 74 74 50 50 50 2 2 6
44178 - 26 26 26 26 26 26 2 2 6 106 106 106
44179 -238 238 238 253 253 253 253 253 253 253 253 253
44180 -253 253 253 253 253 253 253 253 253 253 253 253
44181 -253 253 253 253 253 253 253 253 253 253 253 253
44182 -253 253 253 253 253 253 253 253 253 253 253 253
44183 -253 253 253 253 253 253 253 253 253 253 253 253
44184 -253 253 253 246 246 246 218 218 218 202 202 202
44185 -210 210 210 14 14 14 2 2 6 2 2 6
44186 - 30 30 30 22 22 22 2 2 6 2 2 6
44187 - 2 2 6 2 2 6 18 18 18 86 86 86
44188 - 42 42 42 14 14 14 0 0 0 0 0 0
44189 - 0 0 0 0 0 0 0 0 0 0 0 0
44190 - 0 0 0 0 0 0 0 0 0 0 0 0
44191 - 0 0 0 0 0 0 0 0 0 0 0 0
44192 - 0 0 0 0 0 0 0 0 0 0 0 0
44193 - 0 0 0 0 0 0 0 0 0 0 0 0
44194 - 0 0 0 0 0 0 0 0 0 0 0 0
44195 - 0 0 0 0 0 0 0 0 0 0 0 0
44196 - 0 0 0 0 0 0 0 0 0 14 14 14
44197 - 42 42 42 90 90 90 22 22 22 2 2 6
44198 - 42 42 42 2 2 6 18 18 18 218 218 218
44199 -253 253 253 253 253 253 253 253 253 253 253 253
44200 -253 253 253 253 253 253 253 253 253 253 253 253
44201 -253 253 253 253 253 253 253 253 253 253 253 253
44202 -253 253 253 253 253 253 253 253 253 253 253 253
44203 -253 253 253 253 253 253 253 253 253 253 253 253
44204 -253 253 253 253 253 253 250 250 250 221 221 221
44205 -218 218 218 101 101 101 2 2 6 14 14 14
44206 - 18 18 18 38 38 38 10 10 10 2 2 6
44207 - 2 2 6 2 2 6 2 2 6 78 78 78
44208 - 58 58 58 22 22 22 6 6 6 0 0 0
44209 - 0 0 0 0 0 0 0 0 0 0 0 0
44210 - 0 0 0 0 0 0 0 0 0 0 0 0
44211 - 0 0 0 0 0 0 0 0 0 0 0 0
44212 - 0 0 0 0 0 0 0 0 0 0 0 0
44213 - 0 0 0 0 0 0 0 0 0 0 0 0
44214 - 0 0 0 0 0 0 0 0 0 0 0 0
44215 - 0 0 0 0 0 0 0 0 0 0 0 0
44216 - 0 0 0 0 0 0 6 6 6 18 18 18
44217 - 54 54 54 82 82 82 2 2 6 26 26 26
44218 - 22 22 22 2 2 6 123 123 123 253 253 253
44219 -253 253 253 253 253 253 253 253 253 253 253 253
44220 -253 253 253 253 253 253 253 253 253 253 253 253
44221 -253 253 253 253 253 253 253 253 253 253 253 253
44222 -253 253 253 253 253 253 253 253 253 253 253 253
44223 -253 253 253 253 253 253 253 253 253 253 253 253
44224 -253 253 253 253 253 253 253 253 253 250 250 250
44225 -238 238 238 198 198 198 6 6 6 38 38 38
44226 - 58 58 58 26 26 26 38 38 38 2 2 6
44227 - 2 2 6 2 2 6 2 2 6 46 46 46
44228 - 78 78 78 30 30 30 10 10 10 0 0 0
44229 - 0 0 0 0 0 0 0 0 0 0 0 0
44230 - 0 0 0 0 0 0 0 0 0 0 0 0
44231 - 0 0 0 0 0 0 0 0 0 0 0 0
44232 - 0 0 0 0 0 0 0 0 0 0 0 0
44233 - 0 0 0 0 0 0 0 0 0 0 0 0
44234 - 0 0 0 0 0 0 0 0 0 0 0 0
44235 - 0 0 0 0 0 0 0 0 0 0 0 0
44236 - 0 0 0 0 0 0 10 10 10 30 30 30
44237 - 74 74 74 58 58 58 2 2 6 42 42 42
44238 - 2 2 6 22 22 22 231 231 231 253 253 253
44239 -253 253 253 253 253 253 253 253 253 253 253 253
44240 -253 253 253 253 253 253 253 253 253 250 250 250
44241 -253 253 253 253 253 253 253 253 253 253 253 253
44242 -253 253 253 253 253 253 253 253 253 253 253 253
44243 -253 253 253 253 253 253 253 253 253 253 253 253
44244 -253 253 253 253 253 253 253 253 253 253 253 253
44245 -253 253 253 246 246 246 46 46 46 38 38 38
44246 - 42 42 42 14 14 14 38 38 38 14 14 14
44247 - 2 2 6 2 2 6 2 2 6 6 6 6
44248 - 86 86 86 46 46 46 14 14 14 0 0 0
44249 - 0 0 0 0 0 0 0 0 0 0 0 0
44250 - 0 0 0 0 0 0 0 0 0 0 0 0
44251 - 0 0 0 0 0 0 0 0 0 0 0 0
44252 - 0 0 0 0 0 0 0 0 0 0 0 0
44253 - 0 0 0 0 0 0 0 0 0 0 0 0
44254 - 0 0 0 0 0 0 0 0 0 0 0 0
44255 - 0 0 0 0 0 0 0 0 0 0 0 0
44256 - 0 0 0 6 6 6 14 14 14 42 42 42
44257 - 90 90 90 18 18 18 18 18 18 26 26 26
44258 - 2 2 6 116 116 116 253 253 253 253 253 253
44259 -253 253 253 253 253 253 253 253 253 253 253 253
44260 -253 253 253 253 253 253 250 250 250 238 238 238
44261 -253 253 253 253 253 253 253 253 253 253 253 253
44262 -253 253 253 253 253 253 253 253 253 253 253 253
44263 -253 253 253 253 253 253 253 253 253 253 253 253
44264 -253 253 253 253 253 253 253 253 253 253 253 253
44265 -253 253 253 253 253 253 94 94 94 6 6 6
44266 - 2 2 6 2 2 6 10 10 10 34 34 34
44267 - 2 2 6 2 2 6 2 2 6 2 2 6
44268 - 74 74 74 58 58 58 22 22 22 6 6 6
44269 - 0 0 0 0 0 0 0 0 0 0 0 0
44270 - 0 0 0 0 0 0 0 0 0 0 0 0
44271 - 0 0 0 0 0 0 0 0 0 0 0 0
44272 - 0 0 0 0 0 0 0 0 0 0 0 0
44273 - 0 0 0 0 0 0 0 0 0 0 0 0
44274 - 0 0 0 0 0 0 0 0 0 0 0 0
44275 - 0 0 0 0 0 0 0 0 0 0 0 0
44276 - 0 0 0 10 10 10 26 26 26 66 66 66
44277 - 82 82 82 2 2 6 38 38 38 6 6 6
44278 - 14 14 14 210 210 210 253 253 253 253 253 253
44279 -253 253 253 253 253 253 253 253 253 253 253 253
44280 -253 253 253 253 253 253 246 246 246 242 242 242
44281 -253 253 253 253 253 253 253 253 253 253 253 253
44282 -253 253 253 253 253 253 253 253 253 253 253 253
44283 -253 253 253 253 253 253 253 253 253 253 253 253
44284 -253 253 253 253 253 253 253 253 253 253 253 253
44285 -253 253 253 253 253 253 144 144 144 2 2 6
44286 - 2 2 6 2 2 6 2 2 6 46 46 46
44287 - 2 2 6 2 2 6 2 2 6 2 2 6
44288 - 42 42 42 74 74 74 30 30 30 10 10 10
44289 - 0 0 0 0 0 0 0 0 0 0 0 0
44290 - 0 0 0 0 0 0 0 0 0 0 0 0
44291 - 0 0 0 0 0 0 0 0 0 0 0 0
44292 - 0 0 0 0 0 0 0 0 0 0 0 0
44293 - 0 0 0 0 0 0 0 0 0 0 0 0
44294 - 0 0 0 0 0 0 0 0 0 0 0 0
44295 - 0 0 0 0 0 0 0 0 0 0 0 0
44296 - 6 6 6 14 14 14 42 42 42 90 90 90
44297 - 26 26 26 6 6 6 42 42 42 2 2 6
44298 - 74 74 74 250 250 250 253 253 253 253 253 253
44299 -253 253 253 253 253 253 253 253 253 253 253 253
44300 -253 253 253 253 253 253 242 242 242 242 242 242
44301 -253 253 253 253 253 253 253 253 253 253 253 253
44302 -253 253 253 253 253 253 253 253 253 253 253 253
44303 -253 253 253 253 253 253 253 253 253 253 253 253
44304 -253 253 253 253 253 253 253 253 253 253 253 253
44305 -253 253 253 253 253 253 182 182 182 2 2 6
44306 - 2 2 6 2 2 6 2 2 6 46 46 46
44307 - 2 2 6 2 2 6 2 2 6 2 2 6
44308 - 10 10 10 86 86 86 38 38 38 10 10 10
44309 - 0 0 0 0 0 0 0 0 0 0 0 0
44310 - 0 0 0 0 0 0 0 0 0 0 0 0
44311 - 0 0 0 0 0 0 0 0 0 0 0 0
44312 - 0 0 0 0 0 0 0 0 0 0 0 0
44313 - 0 0 0 0 0 0 0 0 0 0 0 0
44314 - 0 0 0 0 0 0 0 0 0 0 0 0
44315 - 0 0 0 0 0 0 0 0 0 0 0 0
44316 - 10 10 10 26 26 26 66 66 66 82 82 82
44317 - 2 2 6 22 22 22 18 18 18 2 2 6
44318 -149 149 149 253 253 253 253 253 253 253 253 253
44319 -253 253 253 253 253 253 253 253 253 253 253 253
44320 -253 253 253 253 253 253 234 234 234 242 242 242
44321 -253 253 253 253 253 253 253 253 253 253 253 253
44322 -253 253 253 253 253 253 253 253 253 253 253 253
44323 -253 253 253 253 253 253 253 253 253 253 253 253
44324 -253 253 253 253 253 253 253 253 253 253 253 253
44325 -253 253 253 253 253 253 206 206 206 2 2 6
44326 - 2 2 6 2 2 6 2 2 6 38 38 38
44327 - 2 2 6 2 2 6 2 2 6 2 2 6
44328 - 6 6 6 86 86 86 46 46 46 14 14 14
44329 - 0 0 0 0 0 0 0 0 0 0 0 0
44330 - 0 0 0 0 0 0 0 0 0 0 0 0
44331 - 0 0 0 0 0 0 0 0 0 0 0 0
44332 - 0 0 0 0 0 0 0 0 0 0 0 0
44333 - 0 0 0 0 0 0 0 0 0 0 0 0
44334 - 0 0 0 0 0 0 0 0 0 0 0 0
44335 - 0 0 0 0 0 0 0 0 0 6 6 6
44336 - 18 18 18 46 46 46 86 86 86 18 18 18
44337 - 2 2 6 34 34 34 10 10 10 6 6 6
44338 -210 210 210 253 253 253 253 253 253 253 253 253
44339 -253 253 253 253 253 253 253 253 253 253 253 253
44340 -253 253 253 253 253 253 234 234 234 242 242 242
44341 -253 253 253 253 253 253 253 253 253 253 253 253
44342 -253 253 253 253 253 253 253 253 253 253 253 253
44343 -253 253 253 253 253 253 253 253 253 253 253 253
44344 -253 253 253 253 253 253 253 253 253 253 253 253
44345 -253 253 253 253 253 253 221 221 221 6 6 6
44346 - 2 2 6 2 2 6 6 6 6 30 30 30
44347 - 2 2 6 2 2 6 2 2 6 2 2 6
44348 - 2 2 6 82 82 82 54 54 54 18 18 18
44349 - 6 6 6 0 0 0 0 0 0 0 0 0
44350 - 0 0 0 0 0 0 0 0 0 0 0 0
44351 - 0 0 0 0 0 0 0 0 0 0 0 0
44352 - 0 0 0 0 0 0 0 0 0 0 0 0
44353 - 0 0 0 0 0 0 0 0 0 0 0 0
44354 - 0 0 0 0 0 0 0 0 0 0 0 0
44355 - 0 0 0 0 0 0 0 0 0 10 10 10
44356 - 26 26 26 66 66 66 62 62 62 2 2 6
44357 - 2 2 6 38 38 38 10 10 10 26 26 26
44358 -238 238 238 253 253 253 253 253 253 253 253 253
44359 -253 253 253 253 253 253 253 253 253 253 253 253
44360 -253 253 253 253 253 253 231 231 231 238 238 238
44361 -253 253 253 253 253 253 253 253 253 253 253 253
44362 -253 253 253 253 253 253 253 253 253 253 253 253
44363 -253 253 253 253 253 253 253 253 253 253 253 253
44364 -253 253 253 253 253 253 253 253 253 253 253 253
44365 -253 253 253 253 253 253 231 231 231 6 6 6
44366 - 2 2 6 2 2 6 10 10 10 30 30 30
44367 - 2 2 6 2 2 6 2 2 6 2 2 6
44368 - 2 2 6 66 66 66 58 58 58 22 22 22
44369 - 6 6 6 0 0 0 0 0 0 0 0 0
44370 - 0 0 0 0 0 0 0 0 0 0 0 0
44371 - 0 0 0 0 0 0 0 0 0 0 0 0
44372 - 0 0 0 0 0 0 0 0 0 0 0 0
44373 - 0 0 0 0 0 0 0 0 0 0 0 0
44374 - 0 0 0 0 0 0 0 0 0 0 0 0
44375 - 0 0 0 0 0 0 0 0 0 10 10 10
44376 - 38 38 38 78 78 78 6 6 6 2 2 6
44377 - 2 2 6 46 46 46 14 14 14 42 42 42
44378 -246 246 246 253 253 253 253 253 253 253 253 253
44379 -253 253 253 253 253 253 253 253 253 253 253 253
44380 -253 253 253 253 253 253 231 231 231 242 242 242
44381 -253 253 253 253 253 253 253 253 253 253 253 253
44382 -253 253 253 253 253 253 253 253 253 253 253 253
44383 -253 253 253 253 253 253 253 253 253 253 253 253
44384 -253 253 253 253 253 253 253 253 253 253 253 253
44385 -253 253 253 253 253 253 234 234 234 10 10 10
44386 - 2 2 6 2 2 6 22 22 22 14 14 14
44387 - 2 2 6 2 2 6 2 2 6 2 2 6
44388 - 2 2 6 66 66 66 62 62 62 22 22 22
44389 - 6 6 6 0 0 0 0 0 0 0 0 0
44390 - 0 0 0 0 0 0 0 0 0 0 0 0
44391 - 0 0 0 0 0 0 0 0 0 0 0 0
44392 - 0 0 0 0 0 0 0 0 0 0 0 0
44393 - 0 0 0 0 0 0 0 0 0 0 0 0
44394 - 0 0 0 0 0 0 0 0 0 0 0 0
44395 - 0 0 0 0 0 0 6 6 6 18 18 18
44396 - 50 50 50 74 74 74 2 2 6 2 2 6
44397 - 14 14 14 70 70 70 34 34 34 62 62 62
44398 -250 250 250 253 253 253 253 253 253 253 253 253
44399 -253 253 253 253 253 253 253 253 253 253 253 253
44400 -253 253 253 253 253 253 231 231 231 246 246 246
44401 -253 253 253 253 253 253 253 253 253 253 253 253
44402 -253 253 253 253 253 253 253 253 253 253 253 253
44403 -253 253 253 253 253 253 253 253 253 253 253 253
44404 -253 253 253 253 253 253 253 253 253 253 253 253
44405 -253 253 253 253 253 253 234 234 234 14 14 14
44406 - 2 2 6 2 2 6 30 30 30 2 2 6
44407 - 2 2 6 2 2 6 2 2 6 2 2 6
44408 - 2 2 6 66 66 66 62 62 62 22 22 22
44409 - 6 6 6 0 0 0 0 0 0 0 0 0
44410 - 0 0 0 0 0 0 0 0 0 0 0 0
44411 - 0 0 0 0 0 0 0 0 0 0 0 0
44412 - 0 0 0 0 0 0 0 0 0 0 0 0
44413 - 0 0 0 0 0 0 0 0 0 0 0 0
44414 - 0 0 0 0 0 0 0 0 0 0 0 0
44415 - 0 0 0 0 0 0 6 6 6 18 18 18
44416 - 54 54 54 62 62 62 2 2 6 2 2 6
44417 - 2 2 6 30 30 30 46 46 46 70 70 70
44418 -250 250 250 253 253 253 253 253 253 253 253 253
44419 -253 253 253 253 253 253 253 253 253 253 253 253
44420 -253 253 253 253 253 253 231 231 231 246 246 246
44421 -253 253 253 253 253 253 253 253 253 253 253 253
44422 -253 253 253 253 253 253 253 253 253 253 253 253
44423 -253 253 253 253 253 253 253 253 253 253 253 253
44424 -253 253 253 253 253 253 253 253 253 253 253 253
44425 -253 253 253 253 253 253 226 226 226 10 10 10
44426 - 2 2 6 6 6 6 30 30 30 2 2 6
44427 - 2 2 6 2 2 6 2 2 6 2 2 6
44428 - 2 2 6 66 66 66 58 58 58 22 22 22
44429 - 6 6 6 0 0 0 0 0 0 0 0 0
44430 - 0 0 0 0 0 0 0 0 0 0 0 0
44431 - 0 0 0 0 0 0 0 0 0 0 0 0
44432 - 0 0 0 0 0 0 0 0 0 0 0 0
44433 - 0 0 0 0 0 0 0 0 0 0 0 0
44434 - 0 0 0 0 0 0 0 0 0 0 0 0
44435 - 0 0 0 0 0 0 6 6 6 22 22 22
44436 - 58 58 58 62 62 62 2 2 6 2 2 6
44437 - 2 2 6 2 2 6 30 30 30 78 78 78
44438 -250 250 250 253 253 253 253 253 253 253 253 253
44439 -253 253 253 253 253 253 253 253 253 253 253 253
44440 -253 253 253 253 253 253 231 231 231 246 246 246
44441 -253 253 253 253 253 253 253 253 253 253 253 253
44442 -253 253 253 253 253 253 253 253 253 253 253 253
44443 -253 253 253 253 253 253 253 253 253 253 253 253
44444 -253 253 253 253 253 253 253 253 253 253 253 253
44445 -253 253 253 253 253 253 206 206 206 2 2 6
44446 - 22 22 22 34 34 34 18 14 6 22 22 22
44447 - 26 26 26 18 18 18 6 6 6 2 2 6
44448 - 2 2 6 82 82 82 54 54 54 18 18 18
44449 - 6 6 6 0 0 0 0 0 0 0 0 0
44450 - 0 0 0 0 0 0 0 0 0 0 0 0
44451 - 0 0 0 0 0 0 0 0 0 0 0 0
44452 - 0 0 0 0 0 0 0 0 0 0 0 0
44453 - 0 0 0 0 0 0 0 0 0 0 0 0
44454 - 0 0 0 0 0 0 0 0 0 0 0 0
44455 - 0 0 0 0 0 0 6 6 6 26 26 26
44456 - 62 62 62 106 106 106 74 54 14 185 133 11
44457 -210 162 10 121 92 8 6 6 6 62 62 62
44458 -238 238 238 253 253 253 253 253 253 253 253 253
44459 -253 253 253 253 253 253 253 253 253 253 253 253
44460 -253 253 253 253 253 253 231 231 231 246 246 246
44461 -253 253 253 253 253 253 253 253 253 253 253 253
44462 -253 253 253 253 253 253 253 253 253 253 253 253
44463 -253 253 253 253 253 253 253 253 253 253 253 253
44464 -253 253 253 253 253 253 253 253 253 253 253 253
44465 -253 253 253 253 253 253 158 158 158 18 18 18
44466 - 14 14 14 2 2 6 2 2 6 2 2 6
44467 - 6 6 6 18 18 18 66 66 66 38 38 38
44468 - 6 6 6 94 94 94 50 50 50 18 18 18
44469 - 6 6 6 0 0 0 0 0 0 0 0 0
44470 - 0 0 0 0 0 0 0 0 0 0 0 0
44471 - 0 0 0 0 0 0 0 0 0 0 0 0
44472 - 0 0 0 0 0 0 0 0 0 0 0 0
44473 - 0 0 0 0 0 0 0 0 0 0 0 0
44474 - 0 0 0 0 0 0 0 0 0 6 6 6
44475 - 10 10 10 10 10 10 18 18 18 38 38 38
44476 - 78 78 78 142 134 106 216 158 10 242 186 14
44477 -246 190 14 246 190 14 156 118 10 10 10 10
44478 - 90 90 90 238 238 238 253 253 253 253 253 253
44479 -253 253 253 253 253 253 253 253 253 253 253 253
44480 -253 253 253 253 253 253 231 231 231 250 250 250
44481 -253 253 253 253 253 253 253 253 253 253 253 253
44482 -253 253 253 253 253 253 253 253 253 253 253 253
44483 -253 253 253 253 253 253 253 253 253 253 253 253
44484 -253 253 253 253 253 253 253 253 253 246 230 190
44485 -238 204 91 238 204 91 181 142 44 37 26 9
44486 - 2 2 6 2 2 6 2 2 6 2 2 6
44487 - 2 2 6 2 2 6 38 38 38 46 46 46
44488 - 26 26 26 106 106 106 54 54 54 18 18 18
44489 - 6 6 6 0 0 0 0 0 0 0 0 0
44490 - 0 0 0 0 0 0 0 0 0 0 0 0
44491 - 0 0 0 0 0 0 0 0 0 0 0 0
44492 - 0 0 0 0 0 0 0 0 0 0 0 0
44493 - 0 0 0 0 0 0 0 0 0 0 0 0
44494 - 0 0 0 6 6 6 14 14 14 22 22 22
44495 - 30 30 30 38 38 38 50 50 50 70 70 70
44496 -106 106 106 190 142 34 226 170 11 242 186 14
44497 -246 190 14 246 190 14 246 190 14 154 114 10
44498 - 6 6 6 74 74 74 226 226 226 253 253 253
44499 -253 253 253 253 253 253 253 253 253 253 253 253
44500 -253 253 253 253 253 253 231 231 231 250 250 250
44501 -253 253 253 253 253 253 253 253 253 253 253 253
44502 -253 253 253 253 253 253 253 253 253 253 253 253
44503 -253 253 253 253 253 253 253 253 253 253 253 253
44504 -253 253 253 253 253 253 253 253 253 228 184 62
44505 -241 196 14 241 208 19 232 195 16 38 30 10
44506 - 2 2 6 2 2 6 2 2 6 2 2 6
44507 - 2 2 6 6 6 6 30 30 30 26 26 26
44508 -203 166 17 154 142 90 66 66 66 26 26 26
44509 - 6 6 6 0 0 0 0 0 0 0 0 0
44510 - 0 0 0 0 0 0 0 0 0 0 0 0
44511 - 0 0 0 0 0 0 0 0 0 0 0 0
44512 - 0 0 0 0 0 0 0 0 0 0 0 0
44513 - 0 0 0 0 0 0 0 0 0 0 0 0
44514 - 6 6 6 18 18 18 38 38 38 58 58 58
44515 - 78 78 78 86 86 86 101 101 101 123 123 123
44516 -175 146 61 210 150 10 234 174 13 246 186 14
44517 -246 190 14 246 190 14 246 190 14 238 190 10
44518 -102 78 10 2 2 6 46 46 46 198 198 198
44519 -253 253 253 253 253 253 253 253 253 253 253 253
44520 -253 253 253 253 253 253 234 234 234 242 242 242
44521 -253 253 253 253 253 253 253 253 253 253 253 253
44522 -253 253 253 253 253 253 253 253 253 253 253 253
44523 -253 253 253 253 253 253 253 253 253 253 253 253
44524 -253 253 253 253 253 253 253 253 253 224 178 62
44525 -242 186 14 241 196 14 210 166 10 22 18 6
44526 - 2 2 6 2 2 6 2 2 6 2 2 6
44527 - 2 2 6 2 2 6 6 6 6 121 92 8
44528 -238 202 15 232 195 16 82 82 82 34 34 34
44529 - 10 10 10 0 0 0 0 0 0 0 0 0
44530 - 0 0 0 0 0 0 0 0 0 0 0 0
44531 - 0 0 0 0 0 0 0 0 0 0 0 0
44532 - 0 0 0 0 0 0 0 0 0 0 0 0
44533 - 0 0 0 0 0 0 0 0 0 0 0 0
44534 - 14 14 14 38 38 38 70 70 70 154 122 46
44535 -190 142 34 200 144 11 197 138 11 197 138 11
44536 -213 154 11 226 170 11 242 186 14 246 190 14
44537 -246 190 14 246 190 14 246 190 14 246 190 14
44538 -225 175 15 46 32 6 2 2 6 22 22 22
44539 -158 158 158 250 250 250 253 253 253 253 253 253
44540 -253 253 253 253 253 253 253 253 253 253 253 253
44541 -253 253 253 253 253 253 253 253 253 253 253 253
44542 -253 253 253 253 253 253 253 253 253 253 253 253
44543 -253 253 253 253 253 253 253 253 253 253 253 253
44544 -253 253 253 250 250 250 242 242 242 224 178 62
44545 -239 182 13 236 186 11 213 154 11 46 32 6
44546 - 2 2 6 2 2 6 2 2 6 2 2 6
44547 - 2 2 6 2 2 6 61 42 6 225 175 15
44548 -238 190 10 236 186 11 112 100 78 42 42 42
44549 - 14 14 14 0 0 0 0 0 0 0 0 0
44550 - 0 0 0 0 0 0 0 0 0 0 0 0
44551 - 0 0 0 0 0 0 0 0 0 0 0 0
44552 - 0 0 0 0 0 0 0 0 0 0 0 0
44553 - 0 0 0 0 0 0 0 0 0 6 6 6
44554 - 22 22 22 54 54 54 154 122 46 213 154 11
44555 -226 170 11 230 174 11 226 170 11 226 170 11
44556 -236 178 12 242 186 14 246 190 14 246 190 14
44557 -246 190 14 246 190 14 246 190 14 246 190 14
44558 -241 196 14 184 144 12 10 10 10 2 2 6
44559 - 6 6 6 116 116 116 242 242 242 253 253 253
44560 -253 253 253 253 253 253 253 253 253 253 253 253
44561 -253 253 253 253 253 253 253 253 253 253 253 253
44562 -253 253 253 253 253 253 253 253 253 253 253 253
44563 -253 253 253 253 253 253 253 253 253 253 253 253
44564 -253 253 253 231 231 231 198 198 198 214 170 54
44565 -236 178 12 236 178 12 210 150 10 137 92 6
44566 - 18 14 6 2 2 6 2 2 6 2 2 6
44567 - 6 6 6 70 47 6 200 144 11 236 178 12
44568 -239 182 13 239 182 13 124 112 88 58 58 58
44569 - 22 22 22 6 6 6 0 0 0 0 0 0
44570 - 0 0 0 0 0 0 0 0 0 0 0 0
44571 - 0 0 0 0 0 0 0 0 0 0 0 0
44572 - 0 0 0 0 0 0 0 0 0 0 0 0
44573 - 0 0 0 0 0 0 0 0 0 10 10 10
44574 - 30 30 30 70 70 70 180 133 36 226 170 11
44575 -239 182 13 242 186 14 242 186 14 246 186 14
44576 -246 190 14 246 190 14 246 190 14 246 190 14
44577 -246 190 14 246 190 14 246 190 14 246 190 14
44578 -246 190 14 232 195 16 98 70 6 2 2 6
44579 - 2 2 6 2 2 6 66 66 66 221 221 221
44580 -253 253 253 253 253 253 253 253 253 253 253 253
44581 -253 253 253 253 253 253 253 253 253 253 253 253
44582 -253 253 253 253 253 253 253 253 253 253 253 253
44583 -253 253 253 253 253 253 253 253 253 253 253 253
44584 -253 253 253 206 206 206 198 198 198 214 166 58
44585 -230 174 11 230 174 11 216 158 10 192 133 9
44586 -163 110 8 116 81 8 102 78 10 116 81 8
44587 -167 114 7 197 138 11 226 170 11 239 182 13
44588 -242 186 14 242 186 14 162 146 94 78 78 78
44589 - 34 34 34 14 14 14 6 6 6 0 0 0
44590 - 0 0 0 0 0 0 0 0 0 0 0 0
44591 - 0 0 0 0 0 0 0 0 0 0 0 0
44592 - 0 0 0 0 0 0 0 0 0 0 0 0
44593 - 0 0 0 0 0 0 0 0 0 6 6 6
44594 - 30 30 30 78 78 78 190 142 34 226 170 11
44595 -239 182 13 246 190 14 246 190 14 246 190 14
44596 -246 190 14 246 190 14 246 190 14 246 190 14
44597 -246 190 14 246 190 14 246 190 14 246 190 14
44598 -246 190 14 241 196 14 203 166 17 22 18 6
44599 - 2 2 6 2 2 6 2 2 6 38 38 38
44600 -218 218 218 253 253 253 253 253 253 253 253 253
44601 -253 253 253 253 253 253 253 253 253 253 253 253
44602 -253 253 253 253 253 253 253 253 253 253 253 253
44603 -253 253 253 253 253 253 253 253 253 253 253 253
44604 -250 250 250 206 206 206 198 198 198 202 162 69
44605 -226 170 11 236 178 12 224 166 10 210 150 10
44606 -200 144 11 197 138 11 192 133 9 197 138 11
44607 -210 150 10 226 170 11 242 186 14 246 190 14
44608 -246 190 14 246 186 14 225 175 15 124 112 88
44609 - 62 62 62 30 30 30 14 14 14 6 6 6
44610 - 0 0 0 0 0 0 0 0 0 0 0 0
44611 - 0 0 0 0 0 0 0 0 0 0 0 0
44612 - 0 0 0 0 0 0 0 0 0 0 0 0
44613 - 0 0 0 0 0 0 0 0 0 10 10 10
44614 - 30 30 30 78 78 78 174 135 50 224 166 10
44615 -239 182 13 246 190 14 246 190 14 246 190 14
44616 -246 190 14 246 190 14 246 190 14 246 190 14
44617 -246 190 14 246 190 14 246 190 14 246 190 14
44618 -246 190 14 246 190 14 241 196 14 139 102 15
44619 - 2 2 6 2 2 6 2 2 6 2 2 6
44620 - 78 78 78 250 250 250 253 253 253 253 253 253
44621 -253 253 253 253 253 253 253 253 253 253 253 253
44622 -253 253 253 253 253 253 253 253 253 253 253 253
44623 -253 253 253 253 253 253 253 253 253 253 253 253
44624 -250 250 250 214 214 214 198 198 198 190 150 46
44625 -219 162 10 236 178 12 234 174 13 224 166 10
44626 -216 158 10 213 154 11 213 154 11 216 158 10
44627 -226 170 11 239 182 13 246 190 14 246 190 14
44628 -246 190 14 246 190 14 242 186 14 206 162 42
44629 -101 101 101 58 58 58 30 30 30 14 14 14
44630 - 6 6 6 0 0 0 0 0 0 0 0 0
44631 - 0 0 0 0 0 0 0 0 0 0 0 0
44632 - 0 0 0 0 0 0 0 0 0 0 0 0
44633 - 0 0 0 0 0 0 0 0 0 10 10 10
44634 - 30 30 30 74 74 74 174 135 50 216 158 10
44635 -236 178 12 246 190 14 246 190 14 246 190 14
44636 -246 190 14 246 190 14 246 190 14 246 190 14
44637 -246 190 14 246 190 14 246 190 14 246 190 14
44638 -246 190 14 246 190 14 241 196 14 226 184 13
44639 - 61 42 6 2 2 6 2 2 6 2 2 6
44640 - 22 22 22 238 238 238 253 253 253 253 253 253
44641 -253 253 253 253 253 253 253 253 253 253 253 253
44642 -253 253 253 253 253 253 253 253 253 253 253 253
44643 -253 253 253 253 253 253 253 253 253 253 253 253
44644 -253 253 253 226 226 226 187 187 187 180 133 36
44645 -216 158 10 236 178 12 239 182 13 236 178 12
44646 -230 174 11 226 170 11 226 170 11 230 174 11
44647 -236 178 12 242 186 14 246 190 14 246 190 14
44648 -246 190 14 246 190 14 246 186 14 239 182 13
44649 -206 162 42 106 106 106 66 66 66 34 34 34
44650 - 14 14 14 6 6 6 0 0 0 0 0 0
44651 - 0 0 0 0 0 0 0 0 0 0 0 0
44652 - 0 0 0 0 0 0 0 0 0 0 0 0
44653 - 0 0 0 0 0 0 0 0 0 6 6 6
44654 - 26 26 26 70 70 70 163 133 67 213 154 11
44655 -236 178 12 246 190 14 246 190 14 246 190 14
44656 -246 190 14 246 190 14 246 190 14 246 190 14
44657 -246 190 14 246 190 14 246 190 14 246 190 14
44658 -246 190 14 246 190 14 246 190 14 241 196 14
44659 -190 146 13 18 14 6 2 2 6 2 2 6
44660 - 46 46 46 246 246 246 253 253 253 253 253 253
44661 -253 253 253 253 253 253 253 253 253 253 253 253
44662 -253 253 253 253 253 253 253 253 253 253 253 253
44663 -253 253 253 253 253 253 253 253 253 253 253 253
44664 -253 253 253 221 221 221 86 86 86 156 107 11
44665 -216 158 10 236 178 12 242 186 14 246 186 14
44666 -242 186 14 239 182 13 239 182 13 242 186 14
44667 -242 186 14 246 186 14 246 190 14 246 190 14
44668 -246 190 14 246 190 14 246 190 14 246 190 14
44669 -242 186 14 225 175 15 142 122 72 66 66 66
44670 - 30 30 30 10 10 10 0 0 0 0 0 0
44671 - 0 0 0 0 0 0 0 0 0 0 0 0
44672 - 0 0 0 0 0 0 0 0 0 0 0 0
44673 - 0 0 0 0 0 0 0 0 0 6 6 6
44674 - 26 26 26 70 70 70 163 133 67 210 150 10
44675 -236 178 12 246 190 14 246 190 14 246 190 14
44676 -246 190 14 246 190 14 246 190 14 246 190 14
44677 -246 190 14 246 190 14 246 190 14 246 190 14
44678 -246 190 14 246 190 14 246 190 14 246 190 14
44679 -232 195 16 121 92 8 34 34 34 106 106 106
44680 -221 221 221 253 253 253 253 253 253 253 253 253
44681 -253 253 253 253 253 253 253 253 253 253 253 253
44682 -253 253 253 253 253 253 253 253 253 253 253 253
44683 -253 253 253 253 253 253 253 253 253 253 253 253
44684 -242 242 242 82 82 82 18 14 6 163 110 8
44685 -216 158 10 236 178 12 242 186 14 246 190 14
44686 -246 190 14 246 190 14 246 190 14 246 190 14
44687 -246 190 14 246 190 14 246 190 14 246 190 14
44688 -246 190 14 246 190 14 246 190 14 246 190 14
44689 -246 190 14 246 190 14 242 186 14 163 133 67
44690 - 46 46 46 18 18 18 6 6 6 0 0 0
44691 - 0 0 0 0 0 0 0 0 0 0 0 0
44692 - 0 0 0 0 0 0 0 0 0 0 0 0
44693 - 0 0 0 0 0 0 0 0 0 10 10 10
44694 - 30 30 30 78 78 78 163 133 67 210 150 10
44695 -236 178 12 246 186 14 246 190 14 246 190 14
44696 -246 190 14 246 190 14 246 190 14 246 190 14
44697 -246 190 14 246 190 14 246 190 14 246 190 14
44698 -246 190 14 246 190 14 246 190 14 246 190 14
44699 -241 196 14 215 174 15 190 178 144 253 253 253
44700 -253 253 253 253 253 253 253 253 253 253 253 253
44701 -253 253 253 253 253 253 253 253 253 253 253 253
44702 -253 253 253 253 253 253 253 253 253 253 253 253
44703 -253 253 253 253 253 253 253 253 253 218 218 218
44704 - 58 58 58 2 2 6 22 18 6 167 114 7
44705 -216 158 10 236 178 12 246 186 14 246 190 14
44706 -246 190 14 246 190 14 246 190 14 246 190 14
44707 -246 190 14 246 190 14 246 190 14 246 190 14
44708 -246 190 14 246 190 14 246 190 14 246 190 14
44709 -246 190 14 246 186 14 242 186 14 190 150 46
44710 - 54 54 54 22 22 22 6 6 6 0 0 0
44711 - 0 0 0 0 0 0 0 0 0 0 0 0
44712 - 0 0 0 0 0 0 0 0 0 0 0 0
44713 - 0 0 0 0 0 0 0 0 0 14 14 14
44714 - 38 38 38 86 86 86 180 133 36 213 154 11
44715 -236 178 12 246 186 14 246 190 14 246 190 14
44716 -246 190 14 246 190 14 246 190 14 246 190 14
44717 -246 190 14 246 190 14 246 190 14 246 190 14
44718 -246 190 14 246 190 14 246 190 14 246 190 14
44719 -246 190 14 232 195 16 190 146 13 214 214 214
44720 -253 253 253 253 253 253 253 253 253 253 253 253
44721 -253 253 253 253 253 253 253 253 253 253 253 253
44722 -253 253 253 253 253 253 253 253 253 253 253 253
44723 -253 253 253 250 250 250 170 170 170 26 26 26
44724 - 2 2 6 2 2 6 37 26 9 163 110 8
44725 -219 162 10 239 182 13 246 186 14 246 190 14
44726 -246 190 14 246 190 14 246 190 14 246 190 14
44727 -246 190 14 246 190 14 246 190 14 246 190 14
44728 -246 190 14 246 190 14 246 190 14 246 190 14
44729 -246 186 14 236 178 12 224 166 10 142 122 72
44730 - 46 46 46 18 18 18 6 6 6 0 0 0
44731 - 0 0 0 0 0 0 0 0 0 0 0 0
44732 - 0 0 0 0 0 0 0 0 0 0 0 0
44733 - 0 0 0 0 0 0 6 6 6 18 18 18
44734 - 50 50 50 109 106 95 192 133 9 224 166 10
44735 -242 186 14 246 190 14 246 190 14 246 190 14
44736 -246 190 14 246 190 14 246 190 14 246 190 14
44737 -246 190 14 246 190 14 246 190 14 246 190 14
44738 -246 190 14 246 190 14 246 190 14 246 190 14
44739 -242 186 14 226 184 13 210 162 10 142 110 46
44740 -226 226 226 253 253 253 253 253 253 253 253 253
44741 -253 253 253 253 253 253 253 253 253 253 253 253
44742 -253 253 253 253 253 253 253 253 253 253 253 253
44743 -198 198 198 66 66 66 2 2 6 2 2 6
44744 - 2 2 6 2 2 6 50 34 6 156 107 11
44745 -219 162 10 239 182 13 246 186 14 246 190 14
44746 -246 190 14 246 190 14 246 190 14 246 190 14
44747 -246 190 14 246 190 14 246 190 14 246 190 14
44748 -246 190 14 246 190 14 246 190 14 242 186 14
44749 -234 174 13 213 154 11 154 122 46 66 66 66
44750 - 30 30 30 10 10 10 0 0 0 0 0 0
44751 - 0 0 0 0 0 0 0 0 0 0 0 0
44752 - 0 0 0 0 0 0 0 0 0 0 0 0
44753 - 0 0 0 0 0 0 6 6 6 22 22 22
44754 - 58 58 58 154 121 60 206 145 10 234 174 13
44755 -242 186 14 246 186 14 246 190 14 246 190 14
44756 -246 190 14 246 190 14 246 190 14 246 190 14
44757 -246 190 14 246 190 14 246 190 14 246 190 14
44758 -246 190 14 246 190 14 246 190 14 246 190 14
44759 -246 186 14 236 178 12 210 162 10 163 110 8
44760 - 61 42 6 138 138 138 218 218 218 250 250 250
44761 -253 253 253 253 253 253 253 253 253 250 250 250
44762 -242 242 242 210 210 210 144 144 144 66 66 66
44763 - 6 6 6 2 2 6 2 2 6 2 2 6
44764 - 2 2 6 2 2 6 61 42 6 163 110 8
44765 -216 158 10 236 178 12 246 190 14 246 190 14
44766 -246 190 14 246 190 14 246 190 14 246 190 14
44767 -246 190 14 246 190 14 246 190 14 246 190 14
44768 -246 190 14 239 182 13 230 174 11 216 158 10
44769 -190 142 34 124 112 88 70 70 70 38 38 38
44770 - 18 18 18 6 6 6 0 0 0 0 0 0
44771 - 0 0 0 0 0 0 0 0 0 0 0 0
44772 - 0 0 0 0 0 0 0 0 0 0 0 0
44773 - 0 0 0 0 0 0 6 6 6 22 22 22
44774 - 62 62 62 168 124 44 206 145 10 224 166 10
44775 -236 178 12 239 182 13 242 186 14 242 186 14
44776 -246 186 14 246 190 14 246 190 14 246 190 14
44777 -246 190 14 246 190 14 246 190 14 246 190 14
44778 -246 190 14 246 190 14 246 190 14 246 190 14
44779 -246 190 14 236 178 12 216 158 10 175 118 6
44780 - 80 54 7 2 2 6 6 6 6 30 30 30
44781 - 54 54 54 62 62 62 50 50 50 38 38 38
44782 - 14 14 14 2 2 6 2 2 6 2 2 6
44783 - 2 2 6 2 2 6 2 2 6 2 2 6
44784 - 2 2 6 6 6 6 80 54 7 167 114 7
44785 -213 154 11 236 178 12 246 190 14 246 190 14
44786 -246 190 14 246 190 14 246 190 14 246 190 14
44787 -246 190 14 242 186 14 239 182 13 239 182 13
44788 -230 174 11 210 150 10 174 135 50 124 112 88
44789 - 82 82 82 54 54 54 34 34 34 18 18 18
44790 - 6 6 6 0 0 0 0 0 0 0 0 0
44791 - 0 0 0 0 0 0 0 0 0 0 0 0
44792 - 0 0 0 0 0 0 0 0 0 0 0 0
44793 - 0 0 0 0 0 0 6 6 6 18 18 18
44794 - 50 50 50 158 118 36 192 133 9 200 144 11
44795 -216 158 10 219 162 10 224 166 10 226 170 11
44796 -230 174 11 236 178 12 239 182 13 239 182 13
44797 -242 186 14 246 186 14 246 190 14 246 190 14
44798 -246 190 14 246 190 14 246 190 14 246 190 14
44799 -246 186 14 230 174 11 210 150 10 163 110 8
44800 -104 69 6 10 10 10 2 2 6 2 2 6
44801 - 2 2 6 2 2 6 2 2 6 2 2 6
44802 - 2 2 6 2 2 6 2 2 6 2 2 6
44803 - 2 2 6 2 2 6 2 2 6 2 2 6
44804 - 2 2 6 6 6 6 91 60 6 167 114 7
44805 -206 145 10 230 174 11 242 186 14 246 190 14
44806 -246 190 14 246 190 14 246 186 14 242 186 14
44807 -239 182 13 230 174 11 224 166 10 213 154 11
44808 -180 133 36 124 112 88 86 86 86 58 58 58
44809 - 38 38 38 22 22 22 10 10 10 6 6 6
44810 - 0 0 0 0 0 0 0 0 0 0 0 0
44811 - 0 0 0 0 0 0 0 0 0 0 0 0
44812 - 0 0 0 0 0 0 0 0 0 0 0 0
44813 - 0 0 0 0 0 0 0 0 0 14 14 14
44814 - 34 34 34 70 70 70 138 110 50 158 118 36
44815 -167 114 7 180 123 7 192 133 9 197 138 11
44816 -200 144 11 206 145 10 213 154 11 219 162 10
44817 -224 166 10 230 174 11 239 182 13 242 186 14
44818 -246 186 14 246 186 14 246 186 14 246 186 14
44819 -239 182 13 216 158 10 185 133 11 152 99 6
44820 -104 69 6 18 14 6 2 2 6 2 2 6
44821 - 2 2 6 2 2 6 2 2 6 2 2 6
44822 - 2 2 6 2 2 6 2 2 6 2 2 6
44823 - 2 2 6 2 2 6 2 2 6 2 2 6
44824 - 2 2 6 6 6 6 80 54 7 152 99 6
44825 -192 133 9 219 162 10 236 178 12 239 182 13
44826 -246 186 14 242 186 14 239 182 13 236 178 12
44827 -224 166 10 206 145 10 192 133 9 154 121 60
44828 - 94 94 94 62 62 62 42 42 42 22 22 22
44829 - 14 14 14 6 6 6 0 0 0 0 0 0
44830 - 0 0 0 0 0 0 0 0 0 0 0 0
44831 - 0 0 0 0 0 0 0 0 0 0 0 0
44832 - 0 0 0 0 0 0 0 0 0 0 0 0
44833 - 0 0 0 0 0 0 0 0 0 6 6 6
44834 - 18 18 18 34 34 34 58 58 58 78 78 78
44835 -101 98 89 124 112 88 142 110 46 156 107 11
44836 -163 110 8 167 114 7 175 118 6 180 123 7
44837 -185 133 11 197 138 11 210 150 10 219 162 10
44838 -226 170 11 236 178 12 236 178 12 234 174 13
44839 -219 162 10 197 138 11 163 110 8 130 83 6
44840 - 91 60 6 10 10 10 2 2 6 2 2 6
44841 - 18 18 18 38 38 38 38 38 38 38 38 38
44842 - 38 38 38 38 38 38 38 38 38 38 38 38
44843 - 38 38 38 38 38 38 26 26 26 2 2 6
44844 - 2 2 6 6 6 6 70 47 6 137 92 6
44845 -175 118 6 200 144 11 219 162 10 230 174 11
44846 -234 174 13 230 174 11 219 162 10 210 150 10
44847 -192 133 9 163 110 8 124 112 88 82 82 82
44848 - 50 50 50 30 30 30 14 14 14 6 6 6
44849 - 0 0 0 0 0 0 0 0 0 0 0 0
44850 - 0 0 0 0 0 0 0 0 0 0 0 0
44851 - 0 0 0 0 0 0 0 0 0 0 0 0
44852 - 0 0 0 0 0 0 0 0 0 0 0 0
44853 - 0 0 0 0 0 0 0 0 0 0 0 0
44854 - 6 6 6 14 14 14 22 22 22 34 34 34
44855 - 42 42 42 58 58 58 74 74 74 86 86 86
44856 -101 98 89 122 102 70 130 98 46 121 87 25
44857 -137 92 6 152 99 6 163 110 8 180 123 7
44858 -185 133 11 197 138 11 206 145 10 200 144 11
44859 -180 123 7 156 107 11 130 83 6 104 69 6
44860 - 50 34 6 54 54 54 110 110 110 101 98 89
44861 - 86 86 86 82 82 82 78 78 78 78 78 78
44862 - 78 78 78 78 78 78 78 78 78 78 78 78
44863 - 78 78 78 82 82 82 86 86 86 94 94 94
44864 -106 106 106 101 101 101 86 66 34 124 80 6
44865 -156 107 11 180 123 7 192 133 9 200 144 11
44866 -206 145 10 200 144 11 192 133 9 175 118 6
44867 -139 102 15 109 106 95 70 70 70 42 42 42
44868 - 22 22 22 10 10 10 0 0 0 0 0 0
44869 - 0 0 0 0 0 0 0 0 0 0 0 0
44870 - 0 0 0 0 0 0 0 0 0 0 0 0
44871 - 0 0 0 0 0 0 0 0 0 0 0 0
44872 - 0 0 0 0 0 0 0 0 0 0 0 0
44873 - 0 0 0 0 0 0 0 0 0 0 0 0
44874 - 0 0 0 0 0 0 6 6 6 10 10 10
44875 - 14 14 14 22 22 22 30 30 30 38 38 38
44876 - 50 50 50 62 62 62 74 74 74 90 90 90
44877 -101 98 89 112 100 78 121 87 25 124 80 6
44878 -137 92 6 152 99 6 152 99 6 152 99 6
44879 -138 86 6 124 80 6 98 70 6 86 66 30
44880 -101 98 89 82 82 82 58 58 58 46 46 46
44881 - 38 38 38 34 34 34 34 34 34 34 34 34
44882 - 34 34 34 34 34 34 34 34 34 34 34 34
44883 - 34 34 34 34 34 34 38 38 38 42 42 42
44884 - 54 54 54 82 82 82 94 86 76 91 60 6
44885 -134 86 6 156 107 11 167 114 7 175 118 6
44886 -175 118 6 167 114 7 152 99 6 121 87 25
44887 -101 98 89 62 62 62 34 34 34 18 18 18
44888 - 6 6 6 0 0 0 0 0 0 0 0 0
44889 - 0 0 0 0 0 0 0 0 0 0 0 0
44890 - 0 0 0 0 0 0 0 0 0 0 0 0
44891 - 0 0 0 0 0 0 0 0 0 0 0 0
44892 - 0 0 0 0 0 0 0 0 0 0 0 0
44893 - 0 0 0 0 0 0 0 0 0 0 0 0
44894 - 0 0 0 0 0 0 0 0 0 0 0 0
44895 - 0 0 0 6 6 6 6 6 6 10 10 10
44896 - 18 18 18 22 22 22 30 30 30 42 42 42
44897 - 50 50 50 66 66 66 86 86 86 101 98 89
44898 -106 86 58 98 70 6 104 69 6 104 69 6
44899 -104 69 6 91 60 6 82 62 34 90 90 90
44900 - 62 62 62 38 38 38 22 22 22 14 14 14
44901 - 10 10 10 10 10 10 10 10 10 10 10 10
44902 - 10 10 10 10 10 10 6 6 6 10 10 10
44903 - 10 10 10 10 10 10 10 10 10 14 14 14
44904 - 22 22 22 42 42 42 70 70 70 89 81 66
44905 - 80 54 7 104 69 6 124 80 6 137 92 6
44906 -134 86 6 116 81 8 100 82 52 86 86 86
44907 - 58 58 58 30 30 30 14 14 14 6 6 6
44908 - 0 0 0 0 0 0 0 0 0 0 0 0
44909 - 0 0 0 0 0 0 0 0 0 0 0 0
44910 - 0 0 0 0 0 0 0 0 0 0 0 0
44911 - 0 0 0 0 0 0 0 0 0 0 0 0
44912 - 0 0 0 0 0 0 0 0 0 0 0 0
44913 - 0 0 0 0 0 0 0 0 0 0 0 0
44914 - 0 0 0 0 0 0 0 0 0 0 0 0
44915 - 0 0 0 0 0 0 0 0 0 0 0 0
44916 - 0 0 0 6 6 6 10 10 10 14 14 14
44917 - 18 18 18 26 26 26 38 38 38 54 54 54
44918 - 70 70 70 86 86 86 94 86 76 89 81 66
44919 - 89 81 66 86 86 86 74 74 74 50 50 50
44920 - 30 30 30 14 14 14 6 6 6 0 0 0
44921 - 0 0 0 0 0 0 0 0 0 0 0 0
44922 - 0 0 0 0 0 0 0 0 0 0 0 0
44923 - 0 0 0 0 0 0 0 0 0 0 0 0
44924 - 6 6 6 18 18 18 34 34 34 58 58 58
44925 - 82 82 82 89 81 66 89 81 66 89 81 66
44926 - 94 86 66 94 86 76 74 74 74 50 50 50
44927 - 26 26 26 14 14 14 6 6 6 0 0 0
44928 - 0 0 0 0 0 0 0 0 0 0 0 0
44929 - 0 0 0 0 0 0 0 0 0 0 0 0
44930 - 0 0 0 0 0 0 0 0 0 0 0 0
44931 - 0 0 0 0 0 0 0 0 0 0 0 0
44932 - 0 0 0 0 0 0 0 0 0 0 0 0
44933 - 0 0 0 0 0 0 0 0 0 0 0 0
44934 - 0 0 0 0 0 0 0 0 0 0 0 0
44935 - 0 0 0 0 0 0 0 0 0 0 0 0
44936 - 0 0 0 0 0 0 0 0 0 0 0 0
44937 - 6 6 6 6 6 6 14 14 14 18 18 18
44938 - 30 30 30 38 38 38 46 46 46 54 54 54
44939 - 50 50 50 42 42 42 30 30 30 18 18 18
44940 - 10 10 10 0 0 0 0 0 0 0 0 0
44941 - 0 0 0 0 0 0 0 0 0 0 0 0
44942 - 0 0 0 0 0 0 0 0 0 0 0 0
44943 - 0 0 0 0 0 0 0 0 0 0 0 0
44944 - 0 0 0 6 6 6 14 14 14 26 26 26
44945 - 38 38 38 50 50 50 58 58 58 58 58 58
44946 - 54 54 54 42 42 42 30 30 30 18 18 18
44947 - 10 10 10 0 0 0 0 0 0 0 0 0
44948 - 0 0 0 0 0 0 0 0 0 0 0 0
44949 - 0 0 0 0 0 0 0 0 0 0 0 0
44950 - 0 0 0 0 0 0 0 0 0 0 0 0
44951 - 0 0 0 0 0 0 0 0 0 0 0 0
44952 - 0 0 0 0 0 0 0 0 0 0 0 0
44953 - 0 0 0 0 0 0 0 0 0 0 0 0
44954 - 0 0 0 0 0 0 0 0 0 0 0 0
44955 - 0 0 0 0 0 0 0 0 0 0 0 0
44956 - 0 0 0 0 0 0 0 0 0 0 0 0
44957 - 0 0 0 0 0 0 0 0 0 6 6 6
44958 - 6 6 6 10 10 10 14 14 14 18 18 18
44959 - 18 18 18 14 14 14 10 10 10 6 6 6
44960 - 0 0 0 0 0 0 0 0 0 0 0 0
44961 - 0 0 0 0 0 0 0 0 0 0 0 0
44962 - 0 0 0 0 0 0 0 0 0 0 0 0
44963 - 0 0 0 0 0 0 0 0 0 0 0 0
44964 - 0 0 0 0 0 0 0 0 0 6 6 6
44965 - 14 14 14 18 18 18 22 22 22 22 22 22
44966 - 18 18 18 14 14 14 10 10 10 6 6 6
44967 - 0 0 0 0 0 0 0 0 0 0 0 0
44968 - 0 0 0 0 0 0 0 0 0 0 0 0
44969 - 0 0 0 0 0 0 0 0 0 0 0 0
44970 - 0 0 0 0 0 0 0 0 0 0 0 0
44971 - 0 0 0 0 0 0 0 0 0 0 0 0
44972 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44973 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44974 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44975 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44976 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44977 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44978 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44979 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44980 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44981 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44982 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44983 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44984 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44985 +4 4 4 4 4 4
44986 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44987 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44988 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44989 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44990 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44991 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44992 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44993 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44994 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44995 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44996 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44997 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44998 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44999 +4 4 4 4 4 4
45000 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45001 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45002 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45003 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45004 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45005 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45006 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45007 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45008 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45009 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45011 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45012 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45013 +4 4 4 4 4 4
45014 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45016 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45017 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45018 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45019 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45020 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45021 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45022 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45023 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45025 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45026 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45027 +4 4 4 4 4 4
45028 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45030 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45031 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45032 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45033 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45034 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45036 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45037 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45039 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45040 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45041 +4 4 4 4 4 4
45042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45044 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45045 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45046 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45047 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45050 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45051 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45053 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45054 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45055 +4 4 4 4 4 4
45056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45058 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45059 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45060 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
45061 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
45062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45063 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45064 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45065 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
45066 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45067 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
45068 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45069 +4 4 4 4 4 4
45070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45072 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45073 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45074 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
45075 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
45076 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45077 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45078 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45079 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
45080 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
45081 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
45082 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45083 +4 4 4 4 4 4
45084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45086 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45087 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45088 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
45089 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
45090 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45091 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45092 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45093 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
45094 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
45095 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
45096 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
45097 +4 4 4 4 4 4
45098 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45100 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45101 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
45102 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
45103 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
45104 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
45105 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45106 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45107 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
45108 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
45109 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
45110 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
45111 +4 4 4 4 4 4
45112 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45113 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45114 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45115 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
45116 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
45117 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
45118 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
45119 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45120 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
45121 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
45122 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
45123 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
45124 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
45125 +4 4 4 4 4 4
45126 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45127 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45128 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45129 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
45130 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
45131 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
45132 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
45133 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45134 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
45135 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
45136 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
45137 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
45138 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
45139 +4 4 4 4 4 4
45140 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45141 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45142 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
45143 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
45144 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
45145 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
45146 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
45147 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
45148 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
45149 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
45150 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
45151 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
45152 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
45153 +4 4 4 4 4 4
45154 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45155 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45156 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
45157 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
45158 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
45159 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
45160 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
45161 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
45162 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
45163 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
45164 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
45165 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
45166 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
45167 +4 4 4 4 4 4
45168 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45169 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45170 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
45171 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
45172 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
45173 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
45174 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
45175 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
45176 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
45177 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
45178 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
45179 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
45180 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45181 +4 4 4 4 4 4
45182 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45183 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45184 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
45185 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
45186 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
45187 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
45188 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
45189 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
45190 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
45191 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
45192 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
45193 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
45194 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
45195 +4 4 4 4 4 4
45196 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45197 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
45198 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
45199 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
45200 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
45201 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
45202 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
45203 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
45204 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
45205 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
45206 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
45207 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
45208 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
45209 +4 4 4 4 4 4
45210 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45211 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
45212 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
45213 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
45214 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45215 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
45216 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
45217 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
45218 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
45219 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
45220 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
45221 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
45222 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
45223 +0 0 0 4 4 4
45224 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45225 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
45226 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
45227 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
45228 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
45229 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
45230 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
45231 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
45232 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
45233 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
45234 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
45235 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
45236 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
45237 +2 0 0 0 0 0
45238 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
45239 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
45240 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
45241 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
45242 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
45243 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
45244 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
45245 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
45246 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
45247 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
45248 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
45249 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
45250 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
45251 +37 38 37 0 0 0
45252 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45253 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
45254 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
45255 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
45256 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
45257 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
45258 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
45259 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
45260 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
45261 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
45262 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
45263 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
45264 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
45265 +85 115 134 4 0 0
45266 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
45267 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
45268 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
45269 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
45270 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
45271 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
45272 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
45273 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
45274 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
45275 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
45276 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
45277 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
45278 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
45279 +60 73 81 4 0 0
45280 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
45281 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
45282 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
45283 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
45284 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
45285 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
45286 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
45287 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
45288 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
45289 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
45290 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
45291 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
45292 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
45293 +16 19 21 4 0 0
45294 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
45295 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
45296 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
45297 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
45298 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
45299 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
45300 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
45301 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
45302 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
45303 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
45304 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
45305 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
45306 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
45307 +4 0 0 4 3 3
45308 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
45309 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
45310 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
45311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
45312 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
45313 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
45314 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
45315 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
45316 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
45317 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
45318 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
45319 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
45320 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
45321 +3 2 2 4 4 4
45322 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
45323 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
45324 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
45325 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45326 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
45327 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
45328 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
45329 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
45330 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
45331 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
45332 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
45333 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
45334 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
45335 +4 4 4 4 4 4
45336 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
45337 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
45338 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
45339 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
45340 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
45341 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
45342 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
45343 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
45344 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
45345 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
45346 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
45347 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
45348 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
45349 +4 4 4 4 4 4
45350 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
45351 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
45352 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
45353 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
45354 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
45355 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45356 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
45357 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
45358 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
45359 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
45360 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
45361 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
45362 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
45363 +5 5 5 5 5 5
45364 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
45365 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
45366 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
45367 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
45368 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
45369 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45370 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
45371 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
45372 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
45373 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
45374 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
45375 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
45376 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45377 +5 5 5 4 4 4
45378 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
45379 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
45380 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
45381 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
45382 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45383 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
45384 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
45385 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
45386 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
45387 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
45388 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
45389 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45390 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45391 +4 4 4 4 4 4
45392 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
45393 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
45394 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
45395 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
45396 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
45397 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45398 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45399 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
45400 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
45401 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
45402 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
45403 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
45404 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45405 +4 4 4 4 4 4
45406 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
45407 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
45408 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
45409 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
45410 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45411 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
45412 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
45413 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
45414 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
45415 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
45416 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
45417 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45418 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45419 +4 4 4 4 4 4
45420 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
45421 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
45422 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
45423 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
45424 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45425 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45426 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45427 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
45428 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
45429 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
45430 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
45431 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45432 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45433 +4 4 4 4 4 4
45434 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
45435 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
45436 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
45437 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
45438 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45439 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
45440 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45441 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
45442 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
45443 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
45444 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45445 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45446 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45447 +4 4 4 4 4 4
45448 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
45449 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
45450 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
45451 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
45452 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45453 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
45454 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
45455 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
45456 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
45457 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
45458 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
45459 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45460 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45461 +4 4 4 4 4 4
45462 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
45463 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
45464 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
45465 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
45466 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45467 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
45468 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
45469 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
45470 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
45471 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
45472 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
45473 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45474 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45475 +4 4 4 4 4 4
45476 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
45477 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
45478 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
45479 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45480 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
45481 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
45482 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
45483 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
45484 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
45485 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
45486 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45487 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45488 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45489 +4 4 4 4 4 4
45490 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
45491 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
45492 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
45493 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45494 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45495 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
45496 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
45497 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
45498 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
45499 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
45500 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45501 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45502 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45503 +4 4 4 4 4 4
45504 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
45505 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
45506 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45507 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45508 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45509 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
45510 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
45511 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
45512 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
45513 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
45514 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45515 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45516 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45517 +4 4 4 4 4 4
45518 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
45519 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
45520 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45521 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45522 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45523 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
45524 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
45525 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
45526 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45527 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45528 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45529 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45530 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45531 +4 4 4 4 4 4
45532 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45533 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
45534 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45535 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
45536 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
45537 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
45538 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
45539 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
45540 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45541 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45542 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45543 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45544 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45545 +4 4 4 4 4 4
45546 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45547 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
45548 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45549 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
45550 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45551 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
45552 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
45553 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
45554 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45555 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45556 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45557 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45558 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45559 +4 4 4 4 4 4
45560 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
45561 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
45562 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45563 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
45564 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
45565 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
45566 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
45567 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
45568 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45569 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45570 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45571 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45572 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45573 +4 4 4 4 4 4
45574 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
45575 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
45576 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45577 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
45578 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
45579 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
45580 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
45581 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
45582 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45583 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45584 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45585 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45586 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45587 +4 4 4 4 4 4
45588 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45589 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
45590 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45591 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
45592 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
45593 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
45594 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
45595 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
45596 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45597 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45598 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45599 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45600 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45601 +4 4 4 4 4 4
45602 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
45603 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
45604 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45605 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
45606 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
45607 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
45608 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
45609 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
45610 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
45611 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45612 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45613 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45614 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45615 +4 4 4 4 4 4
45616 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45617 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
45618 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
45619 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
45620 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
45621 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
45622 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
45623 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
45624 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45625 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45626 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45627 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45628 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45629 +4 4 4 4 4 4
45630 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45631 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
45632 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45633 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
45634 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
45635 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
45636 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
45637 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
45638 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45639 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45640 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45641 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45642 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45643 +4 4 4 4 4 4
45644 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45645 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
45646 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
45647 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
45648 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
45649 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
45650 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45651 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
45652 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45653 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45654 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45655 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45656 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45657 +4 4 4 4 4 4
45658 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45659 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
45660 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
45661 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45662 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
45663 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
45664 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45665 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
45666 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45667 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45668 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45669 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45670 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45671 +4 4 4 4 4 4
45672 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45673 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
45674 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
45675 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
45676 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
45677 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
45678 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
45679 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
45680 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
45681 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45682 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45683 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45684 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45685 +4 4 4 4 4 4
45686 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45687 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
45688 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
45689 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
45690 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
45691 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
45692 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
45693 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
45694 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
45695 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45696 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45697 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45698 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45699 +4 4 4 4 4 4
45700 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
45701 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
45702 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
45703 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
45704 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45705 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
45706 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
45707 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
45708 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
45709 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45710 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45711 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45712 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45713 +4 4 4 4 4 4
45714 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45715 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
45716 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
45717 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
45718 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
45719 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
45720 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
45721 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
45722 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
45723 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45724 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45725 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45726 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45727 +4 4 4 4 4 4
45728 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
45729 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
45730 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
45731 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
45732 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
45733 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
45734 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
45735 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
45736 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
45737 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
45738 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45739 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45740 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45741 +4 4 4 4 4 4
45742 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
45743 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45744 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
45745 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
45746 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
45747 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
45748 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
45749 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
45750 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
45751 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
45752 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45753 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45754 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45755 +4 4 4 4 4 4
45756 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
45757 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45758 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
45759 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
45760 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
45761 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
45762 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45763 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
45764 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
45765 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
45766 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45767 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45768 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45769 +4 4 4 4 4 4
45770 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
45771 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
45772 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
45773 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
45774 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
45775 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
45776 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
45777 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
45778 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
45779 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
45780 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45781 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45782 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45783 +4 4 4 4 4 4
45784 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
45785 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
45786 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45787 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
45788 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
45789 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
45790 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
45791 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
45792 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
45793 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
45794 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45795 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45796 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45797 +4 4 4 4 4 4
45798 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45799 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
45800 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
45801 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
45802 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
45803 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
45804 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
45805 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
45806 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
45807 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45808 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45809 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45810 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45811 +4 4 4 4 4 4
45812 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
45813 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
45814 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
45815 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
45816 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
45817 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
45818 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
45819 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
45820 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
45821 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45822 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45823 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45824 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45825 +4 4 4 4 4 4
45826 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
45827 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
45828 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
45829 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
45830 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
45831 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
45832 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
45833 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
45834 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45835 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45836 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45837 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45838 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45839 +4 4 4 4 4 4
45840 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
45841 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45842 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
45843 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45844 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
45845 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
45846 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
45847 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
45848 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
45849 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45850 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45851 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45852 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45853 +4 4 4 4 4 4
45854 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
45855 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
45856 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
45857 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
45858 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
45859 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
45860 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
45861 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
45862 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
45863 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45864 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45865 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45866 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45867 +4 4 4 4 4 4
45868 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45869 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
45870 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
45871 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
45872 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
45873 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
45874 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
45875 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
45876 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45877 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45878 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45879 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45880 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45881 +4 4 4 4 4 4
45882 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
45883 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
45884 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45885 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
45886 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
45887 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
45888 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
45889 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
45890 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45891 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45892 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45893 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45894 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45895 +4 4 4 4 4 4
45896 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45897 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
45898 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
45899 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
45900 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
45901 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
45902 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
45903 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45904 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45905 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45906 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45907 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45908 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45909 +4 4 4 4 4 4
45910 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45911 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
45912 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45913 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
45914 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
45915 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
45916 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
45917 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
45918 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45919 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45920 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45921 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45922 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45923 +4 4 4 4 4 4
45924 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45925 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
45926 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
45927 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
45928 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
45929 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
45930 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
45931 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
45932 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45933 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45934 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45935 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45936 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45937 +4 4 4 4 4 4
45938 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45939 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45940 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
45941 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45942 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
45943 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
45944 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
45945 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45946 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45947 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45948 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45949 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45950 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45951 +4 4 4 4 4 4
45952 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45953 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45954 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45955 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
45956 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
45957 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
45958 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
45959 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45960 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45961 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45962 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45963 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45964 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45965 +4 4 4 4 4 4
45966 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45967 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45968 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45969 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45970 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
45971 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
45972 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
45973 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45974 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45975 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45976 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45977 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45978 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45979 +4 4 4 4 4 4
45980 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45981 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45982 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45983 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45984 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45985 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
45986 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
45987 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45988 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45989 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45990 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45991 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45992 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45993 +4 4 4 4 4 4
45994 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45995 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45996 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45997 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45998 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45999 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
46000 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
46001 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46002 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46003 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46004 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46005 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46006 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46007 +4 4 4 4 4 4
46008 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46009 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46011 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
46012 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
46013 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
46014 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
46015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46016 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46017 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46018 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46019 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46020 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46021 +4 4 4 4 4 4
46022 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46023 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46025 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46026 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
46027 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46028 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46030 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46031 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46032 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46033 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46034 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46035 +4 4 4 4 4 4
46036 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46037 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46039 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46040 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
46041 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
46042 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
46043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46044 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46045 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46046 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46047 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46049 +4 4 4 4 4 4
46050 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46051 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46053 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46054 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
46055 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
46056 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46058 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46059 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46060 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46061 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46063 +4 4 4 4 4 4
46064 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46065 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46066 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46067 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46068 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
46069 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
46070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46072 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46073 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46074 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46075 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46077 +4 4 4 4 4 4
46078 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46079 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46080 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46081 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46082 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
46083 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
46084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46086 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46087 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46088 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46089 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46090 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46091 +4 4 4 4 4 4
46092 diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
46093 index 443e3c8..c443d6a 100644
46094 --- a/drivers/video/nvidia/nv_backlight.c
46095 +++ b/drivers/video/nvidia/nv_backlight.c
46096 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
46097 return bd->props.brightness;
46098 }
46099
46100 -static struct backlight_ops nvidia_bl_ops = {
46101 +static const struct backlight_ops nvidia_bl_ops = {
46102 .get_brightness = nvidia_bl_get_brightness,
46103 .update_status = nvidia_bl_update_status,
46104 };
46105 diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
46106 index d94c57f..912984c 100644
46107 --- a/drivers/video/riva/fbdev.c
46108 +++ b/drivers/video/riva/fbdev.c
46109 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
46110 return bd->props.brightness;
46111 }
46112
46113 -static struct backlight_ops riva_bl_ops = {
46114 +static const struct backlight_ops riva_bl_ops = {
46115 .get_brightness = riva_bl_get_brightness,
46116 .update_status = riva_bl_update_status,
46117 };
46118 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
46119 index 54fbb29..2c108fc 100644
46120 --- a/drivers/video/uvesafb.c
46121 +++ b/drivers/video/uvesafb.c
46122 @@ -18,6 +18,7 @@
46123 #include <linux/fb.h>
46124 #include <linux/io.h>
46125 #include <linux/mutex.h>
46126 +#include <linux/moduleloader.h>
46127 #include <video/edid.h>
46128 #include <video/uvesafb.h>
46129 #ifdef CONFIG_X86
46130 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
46131 NULL,
46132 };
46133
46134 - return call_usermodehelper(v86d_path, argv, envp, 1);
46135 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
46136 }
46137
46138 /*
46139 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
46140 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
46141 par->pmi_setpal = par->ypan = 0;
46142 } else {
46143 +
46144 +#ifdef CONFIG_PAX_KERNEXEC
46145 +#ifdef CONFIG_MODULES
46146 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
46147 +#endif
46148 + if (!par->pmi_code) {
46149 + par->pmi_setpal = par->ypan = 0;
46150 + return 0;
46151 + }
46152 +#endif
46153 +
46154 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
46155 + task->t.regs.edi);
46156 +
46157 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46158 + pax_open_kernel();
46159 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
46160 + pax_close_kernel();
46161 +
46162 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
46163 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
46164 +#else
46165 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
46166 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
46167 +#endif
46168 +
46169 printk(KERN_INFO "uvesafb: protected mode interface info at "
46170 "%04x:%04x\n",
46171 (u16)task->t.regs.es, (u16)task->t.regs.edi);
46172 @@ -1799,6 +1822,11 @@ out:
46173 if (par->vbe_modes)
46174 kfree(par->vbe_modes);
46175
46176 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46177 + if (par->pmi_code)
46178 + module_free_exec(NULL, par->pmi_code);
46179 +#endif
46180 +
46181 framebuffer_release(info);
46182 return err;
46183 }
46184 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
46185 kfree(par->vbe_state_orig);
46186 if (par->vbe_state_saved)
46187 kfree(par->vbe_state_saved);
46188 +
46189 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46190 + if (par->pmi_code)
46191 + module_free_exec(NULL, par->pmi_code);
46192 +#endif
46193 +
46194 }
46195
46196 framebuffer_release(info);
46197 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
46198 index bd37ee1..cb827e8 100644
46199 --- a/drivers/video/vesafb.c
46200 +++ b/drivers/video/vesafb.c
46201 @@ -9,6 +9,7 @@
46202 */
46203
46204 #include <linux/module.h>
46205 +#include <linux/moduleloader.h>
46206 #include <linux/kernel.h>
46207 #include <linux/errno.h>
46208 #include <linux/string.h>
46209 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
46210 static int vram_total __initdata; /* Set total amount of memory */
46211 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
46212 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
46213 -static void (*pmi_start)(void) __read_mostly;
46214 -static void (*pmi_pal) (void) __read_mostly;
46215 +static void (*pmi_start)(void) __read_only;
46216 +static void (*pmi_pal) (void) __read_only;
46217 static int depth __read_mostly;
46218 static int vga_compat __read_mostly;
46219 /* --------------------------------------------------------------------- */
46220 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
46221 unsigned int size_vmode;
46222 unsigned int size_remap;
46223 unsigned int size_total;
46224 + void *pmi_code = NULL;
46225
46226 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
46227 return -ENODEV;
46228 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
46229 size_remap = size_total;
46230 vesafb_fix.smem_len = size_remap;
46231
46232 -#ifndef __i386__
46233 - screen_info.vesapm_seg = 0;
46234 -#endif
46235 -
46236 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
46237 printk(KERN_WARNING
46238 "vesafb: cannot reserve video memory at 0x%lx\n",
46239 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
46240 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
46241 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
46242
46243 +#ifdef __i386__
46244 +
46245 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46246 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
46247 + if (!pmi_code)
46248 +#elif !defined(CONFIG_PAX_KERNEXEC)
46249 + if (0)
46250 +#endif
46251 +
46252 +#endif
46253 + screen_info.vesapm_seg = 0;
46254 +
46255 if (screen_info.vesapm_seg) {
46256 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
46257 - screen_info.vesapm_seg,screen_info.vesapm_off);
46258 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
46259 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
46260 }
46261
46262 if (screen_info.vesapm_seg < 0xc000)
46263 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
46264
46265 if (ypan || pmi_setpal) {
46266 unsigned short *pmi_base;
46267 +
46268 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
46269 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
46270 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
46271 +
46272 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46273 + pax_open_kernel();
46274 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
46275 +#else
46276 + pmi_code = pmi_base;
46277 +#endif
46278 +
46279 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
46280 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
46281 +
46282 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46283 + pmi_start = ktva_ktla(pmi_start);
46284 + pmi_pal = ktva_ktla(pmi_pal);
46285 + pax_close_kernel();
46286 +#endif
46287 +
46288 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
46289 if (pmi_base[3]) {
46290 printk(KERN_INFO "vesafb: pmi: ports = ");
46291 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
46292 info->node, info->fix.id);
46293 return 0;
46294 err:
46295 +
46296 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46297 + module_free_exec(NULL, pmi_code);
46298 +#endif
46299 +
46300 if (info->screen_base)
46301 iounmap(info->screen_base);
46302 framebuffer_release(info);
46303 diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
46304 index 88a60e0..6783cc2 100644
46305 --- a/drivers/xen/sys-hypervisor.c
46306 +++ b/drivers/xen/sys-hypervisor.c
46307 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
46308 return 0;
46309 }
46310
46311 -static struct sysfs_ops hyp_sysfs_ops = {
46312 +static const struct sysfs_ops hyp_sysfs_ops = {
46313 .show = hyp_sysfs_show,
46314 .store = hyp_sysfs_store,
46315 };
46316 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
46317 index 18f74ec..3227009 100644
46318 --- a/fs/9p/vfs_inode.c
46319 +++ b/fs/9p/vfs_inode.c
46320 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46321 static void
46322 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46323 {
46324 - char *s = nd_get_link(nd);
46325 + const char *s = nd_get_link(nd);
46326
46327 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
46328 IS_ERR(s) ? "<error>" : s);
46329 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
46330 index bb4cc5b..df5eaa0 100644
46331 --- a/fs/Kconfig.binfmt
46332 +++ b/fs/Kconfig.binfmt
46333 @@ -86,7 +86,7 @@ config HAVE_AOUT
46334
46335 config BINFMT_AOUT
46336 tristate "Kernel support for a.out and ECOFF binaries"
46337 - depends on HAVE_AOUT
46338 + depends on HAVE_AOUT && BROKEN
46339 ---help---
46340 A.out (Assembler.OUTput) is a set of formats for libraries and
46341 executables used in the earliest versions of UNIX. Linux used
46342 diff --git a/fs/aio.c b/fs/aio.c
46343 index 22a19ad..d484e5b 100644
46344 --- a/fs/aio.c
46345 +++ b/fs/aio.c
46346 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
46347 size += sizeof(struct io_event) * nr_events;
46348 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
46349
46350 - if (nr_pages < 0)
46351 + if (nr_pages <= 0)
46352 return -EINVAL;
46353
46354 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
46355 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
46356 struct aio_timeout to;
46357 int retry = 0;
46358
46359 + pax_track_stack();
46360 +
46361 /* needed to zero any padding within an entry (there shouldn't be
46362 * any, but C is fun!
46363 */
46364 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
46365 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
46366 {
46367 ssize_t ret;
46368 + struct iovec iovstack;
46369
46370 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
46371 kiocb->ki_nbytes, 1,
46372 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
46373 + &iovstack, &kiocb->ki_iovec);
46374 if (ret < 0)
46375 goto out;
46376
46377 + if (kiocb->ki_iovec == &iovstack) {
46378 + kiocb->ki_inline_vec = iovstack;
46379 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
46380 + }
46381 kiocb->ki_nr_segs = kiocb->ki_nbytes;
46382 kiocb->ki_cur_seg = 0;
46383 /* ki_nbytes/left now reflect bytes instead of segs */
46384 diff --git a/fs/attr.c b/fs/attr.c
46385 index 96d394b..33cf5b4 100644
46386 --- a/fs/attr.c
46387 +++ b/fs/attr.c
46388 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
46389 unsigned long limit;
46390
46391 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
46392 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
46393 if (limit != RLIM_INFINITY && offset > limit)
46394 goto out_sig;
46395 if (offset > inode->i_sb->s_maxbytes)
46396 diff --git a/fs/autofs/root.c b/fs/autofs/root.c
46397 index 4a1401c..05eb5ca 100644
46398 --- a/fs/autofs/root.c
46399 +++ b/fs/autofs/root.c
46400 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
46401 set_bit(n,sbi->symlink_bitmap);
46402 sl = &sbi->symlink[n];
46403 sl->len = strlen(symname);
46404 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
46405 + slsize = sl->len+1;
46406 + sl->data = kmalloc(slsize, GFP_KERNEL);
46407 if (!sl->data) {
46408 clear_bit(n,sbi->symlink_bitmap);
46409 unlock_kernel();
46410 diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
46411 index b4ea829..e63ef18 100644
46412 --- a/fs/autofs4/symlink.c
46413 +++ b/fs/autofs4/symlink.c
46414 @@ -15,7 +15,7 @@
46415 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
46416 {
46417 struct autofs_info *ino = autofs4_dentry_ino(dentry);
46418 - nd_set_link(nd, (char *)ino->u.symlink);
46419 + nd_set_link(nd, ino->u.symlink);
46420 return NULL;
46421 }
46422
46423 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
46424 index 2341375..df9d1c2 100644
46425 --- a/fs/autofs4/waitq.c
46426 +++ b/fs/autofs4/waitq.c
46427 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
46428 {
46429 unsigned long sigpipe, flags;
46430 mm_segment_t fs;
46431 - const char *data = (const char *)addr;
46432 + const char __user *data = (const char __force_user *)addr;
46433 ssize_t wr = 0;
46434
46435 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
46436 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
46437 index 9158c07..3f06659 100644
46438 --- a/fs/befs/linuxvfs.c
46439 +++ b/fs/befs/linuxvfs.c
46440 @@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46441 {
46442 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
46443 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
46444 - char *link = nd_get_link(nd);
46445 + const char *link = nd_get_link(nd);
46446 if (!IS_ERR(link))
46447 kfree(link);
46448 }
46449 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
46450 index 0133b5a..b3baa9f 100644
46451 --- a/fs/binfmt_aout.c
46452 +++ b/fs/binfmt_aout.c
46453 @@ -16,6 +16,7 @@
46454 #include <linux/string.h>
46455 #include <linux/fs.h>
46456 #include <linux/file.h>
46457 +#include <linux/security.h>
46458 #include <linux/stat.h>
46459 #include <linux/fcntl.h>
46460 #include <linux/ptrace.h>
46461 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46462 #endif
46463 # define START_STACK(u) (u.start_stack)
46464
46465 + memset(&dump, 0, sizeof(dump));
46466 +
46467 fs = get_fs();
46468 set_fs(KERNEL_DS);
46469 has_dumped = 1;
46470 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46471
46472 /* If the size of the dump file exceeds the rlimit, then see what would happen
46473 if we wrote the stack, but not the data area. */
46474 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
46475 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
46476 dump.u_dsize = 0;
46477
46478 /* Make sure we have enough room to write the stack and data areas. */
46479 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
46480 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
46481 dump.u_ssize = 0;
46482
46483 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46484 dump_size = dump.u_ssize << PAGE_SHIFT;
46485 DUMP_WRITE(dump_start,dump_size);
46486 }
46487 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
46488 - set_fs(KERNEL_DS);
46489 - DUMP_WRITE(current,sizeof(*current));
46490 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
46491 end_coredump:
46492 set_fs(fs);
46493 return has_dumped;
46494 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46495 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
46496 if (rlim >= RLIM_INFINITY)
46497 rlim = ~0;
46498 +
46499 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
46500 if (ex.a_data + ex.a_bss > rlim)
46501 return -ENOMEM;
46502
46503 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46504 install_exec_creds(bprm);
46505 current->flags &= ~PF_FORKNOEXEC;
46506
46507 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46508 + current->mm->pax_flags = 0UL;
46509 +#endif
46510 +
46511 +#ifdef CONFIG_PAX_PAGEEXEC
46512 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
46513 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
46514 +
46515 +#ifdef CONFIG_PAX_EMUTRAMP
46516 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
46517 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
46518 +#endif
46519 +
46520 +#ifdef CONFIG_PAX_MPROTECT
46521 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
46522 + current->mm->pax_flags |= MF_PAX_MPROTECT;
46523 +#endif
46524 +
46525 + }
46526 +#endif
46527 +
46528 if (N_MAGIC(ex) == OMAGIC) {
46529 unsigned long text_addr, map_size;
46530 loff_t pos;
46531 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46532
46533 down_write(&current->mm->mmap_sem);
46534 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
46535 - PROT_READ | PROT_WRITE | PROT_EXEC,
46536 + PROT_READ | PROT_WRITE,
46537 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
46538 fd_offset + ex.a_text);
46539 up_write(&current->mm->mmap_sem);
46540 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
46541 index 1ed37ba..de82ab7 100644
46542 --- a/fs/binfmt_elf.c
46543 +++ b/fs/binfmt_elf.c
46544 @@ -31,6 +31,7 @@
46545 #include <linux/random.h>
46546 #include <linux/elf.h>
46547 #include <linux/utsname.h>
46548 +#include <linux/xattr.h>
46549 #include <asm/uaccess.h>
46550 #include <asm/param.h>
46551 #include <asm/page.h>
46552 @@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46553 #define elf_core_dump NULL
46554 #endif
46555
46556 +#ifdef CONFIG_PAX_MPROTECT
46557 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
46558 +#endif
46559 +
46560 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
46561 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
46562 #else
46563 @@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = {
46564 .load_binary = load_elf_binary,
46565 .load_shlib = load_elf_library,
46566 .core_dump = elf_core_dump,
46567 +
46568 +#ifdef CONFIG_PAX_MPROTECT
46569 + .handle_mprotect= elf_handle_mprotect,
46570 +#endif
46571 +
46572 .min_coredump = ELF_EXEC_PAGESIZE,
46573 .hasvdso = 1
46574 };
46575 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
46576
46577 static int set_brk(unsigned long start, unsigned long end)
46578 {
46579 + unsigned long e = end;
46580 +
46581 start = ELF_PAGEALIGN(start);
46582 end = ELF_PAGEALIGN(end);
46583 if (end > start) {
46584 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
46585 if (BAD_ADDR(addr))
46586 return addr;
46587 }
46588 - current->mm->start_brk = current->mm->brk = end;
46589 + current->mm->start_brk = current->mm->brk = e;
46590 return 0;
46591 }
46592
46593 @@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46594 elf_addr_t __user *u_rand_bytes;
46595 const char *k_platform = ELF_PLATFORM;
46596 const char *k_base_platform = ELF_BASE_PLATFORM;
46597 - unsigned char k_rand_bytes[16];
46598 + u32 k_rand_bytes[4];
46599 int items;
46600 elf_addr_t *elf_info;
46601 int ei_index = 0;
46602 const struct cred *cred = current_cred();
46603 struct vm_area_struct *vma;
46604 + unsigned long saved_auxv[AT_VECTOR_SIZE];
46605 +
46606 + pax_track_stack();
46607
46608 /*
46609 * In some cases (e.g. Hyper-Threading), we want to avoid L1
46610 @@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46611 * Generate 16 random bytes for userspace PRNG seeding.
46612 */
46613 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
46614 - u_rand_bytes = (elf_addr_t __user *)
46615 - STACK_ALLOC(p, sizeof(k_rand_bytes));
46616 + srandom32(k_rand_bytes[0] ^ random32());
46617 + srandom32(k_rand_bytes[1] ^ random32());
46618 + srandom32(k_rand_bytes[2] ^ random32());
46619 + srandom32(k_rand_bytes[3] ^ random32());
46620 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
46621 + u_rand_bytes = (elf_addr_t __user *) p;
46622 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
46623 return -EFAULT;
46624
46625 @@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46626 return -EFAULT;
46627 current->mm->env_end = p;
46628
46629 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
46630 +
46631 /* Put the elf_info on the stack in the right place. */
46632 sp = (elf_addr_t __user *)envp + 1;
46633 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
46634 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
46635 return -EFAULT;
46636 return 0;
46637 }
46638 @@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46639 {
46640 struct elf_phdr *elf_phdata;
46641 struct elf_phdr *eppnt;
46642 - unsigned long load_addr = 0;
46643 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
46644 int load_addr_set = 0;
46645 unsigned long last_bss = 0, elf_bss = 0;
46646 - unsigned long error = ~0UL;
46647 + unsigned long error = -EINVAL;
46648 unsigned long total_size;
46649 int retval, i, size;
46650
46651 @@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46652 goto out_close;
46653 }
46654
46655 +#ifdef CONFIG_PAX_SEGMEXEC
46656 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
46657 + pax_task_size = SEGMEXEC_TASK_SIZE;
46658 +#endif
46659 +
46660 eppnt = elf_phdata;
46661 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
46662 if (eppnt->p_type == PT_LOAD) {
46663 @@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46664 k = load_addr + eppnt->p_vaddr;
46665 if (BAD_ADDR(k) ||
46666 eppnt->p_filesz > eppnt->p_memsz ||
46667 - eppnt->p_memsz > TASK_SIZE ||
46668 - TASK_SIZE - eppnt->p_memsz < k) {
46669 + eppnt->p_memsz > pax_task_size ||
46670 + pax_task_size - eppnt->p_memsz < k) {
46671 error = -ENOMEM;
46672 goto out_close;
46673 }
46674 @@ -532,6 +558,351 @@ out:
46675 return error;
46676 }
46677
46678 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
46679 +{
46680 + unsigned long pax_flags = 0UL;
46681 +
46682 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46683 +
46684 +#ifdef CONFIG_PAX_PAGEEXEC
46685 + if (elf_phdata->p_flags & PF_PAGEEXEC)
46686 + pax_flags |= MF_PAX_PAGEEXEC;
46687 +#endif
46688 +
46689 +#ifdef CONFIG_PAX_SEGMEXEC
46690 + if (elf_phdata->p_flags & PF_SEGMEXEC)
46691 + pax_flags |= MF_PAX_SEGMEXEC;
46692 +#endif
46693 +
46694 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46695 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46696 + if (nx_enabled)
46697 + pax_flags &= ~MF_PAX_SEGMEXEC;
46698 + else
46699 + pax_flags &= ~MF_PAX_PAGEEXEC;
46700 + }
46701 +#endif
46702 +
46703 +#ifdef CONFIG_PAX_EMUTRAMP
46704 + if (elf_phdata->p_flags & PF_EMUTRAMP)
46705 + pax_flags |= MF_PAX_EMUTRAMP;
46706 +#endif
46707 +
46708 +#ifdef CONFIG_PAX_MPROTECT
46709 + if (elf_phdata->p_flags & PF_MPROTECT)
46710 + pax_flags |= MF_PAX_MPROTECT;
46711 +#endif
46712 +
46713 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46714 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
46715 + pax_flags |= MF_PAX_RANDMMAP;
46716 +#endif
46717 +
46718 +#endif
46719 +
46720 + return pax_flags;
46721 +}
46722 +
46723 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
46724 +{
46725 + unsigned long pax_flags = 0UL;
46726 +
46727 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46728 +
46729 +#ifdef CONFIG_PAX_PAGEEXEC
46730 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
46731 + pax_flags |= MF_PAX_PAGEEXEC;
46732 +#endif
46733 +
46734 +#ifdef CONFIG_PAX_SEGMEXEC
46735 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
46736 + pax_flags |= MF_PAX_SEGMEXEC;
46737 +#endif
46738 +
46739 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46740 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46741 + if (nx_enabled)
46742 + pax_flags &= ~MF_PAX_SEGMEXEC;
46743 + else
46744 + pax_flags &= ~MF_PAX_PAGEEXEC;
46745 + }
46746 +#endif
46747 +
46748 +#ifdef CONFIG_PAX_EMUTRAMP
46749 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
46750 + pax_flags |= MF_PAX_EMUTRAMP;
46751 +#endif
46752 +
46753 +#ifdef CONFIG_PAX_MPROTECT
46754 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
46755 + pax_flags |= MF_PAX_MPROTECT;
46756 +#endif
46757 +
46758 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46759 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
46760 + pax_flags |= MF_PAX_RANDMMAP;
46761 +#endif
46762 +
46763 +#endif
46764 +
46765 + return pax_flags;
46766 +}
46767 +
46768 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
46769 +{
46770 + unsigned long pax_flags = 0UL;
46771 +
46772 +#ifdef CONFIG_PAX_EI_PAX
46773 +
46774 +#ifdef CONFIG_PAX_PAGEEXEC
46775 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
46776 + pax_flags |= MF_PAX_PAGEEXEC;
46777 +#endif
46778 +
46779 +#ifdef CONFIG_PAX_SEGMEXEC
46780 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
46781 + pax_flags |= MF_PAX_SEGMEXEC;
46782 +#endif
46783 +
46784 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46785 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46786 + if (nx_enabled)
46787 + pax_flags &= ~MF_PAX_SEGMEXEC;
46788 + else
46789 + pax_flags &= ~MF_PAX_PAGEEXEC;
46790 + }
46791 +#endif
46792 +
46793 +#ifdef CONFIG_PAX_EMUTRAMP
46794 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
46795 + pax_flags |= MF_PAX_EMUTRAMP;
46796 +#endif
46797 +
46798 +#ifdef CONFIG_PAX_MPROTECT
46799 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
46800 + pax_flags |= MF_PAX_MPROTECT;
46801 +#endif
46802 +
46803 +#ifdef CONFIG_PAX_ASLR
46804 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
46805 + pax_flags |= MF_PAX_RANDMMAP;
46806 +#endif
46807 +
46808 +#else
46809 +
46810 +#ifdef CONFIG_PAX_PAGEEXEC
46811 + pax_flags |= MF_PAX_PAGEEXEC;
46812 +#endif
46813 +
46814 +#ifdef CONFIG_PAX_MPROTECT
46815 + pax_flags |= MF_PAX_MPROTECT;
46816 +#endif
46817 +
46818 +#ifdef CONFIG_PAX_RANDMMAP
46819 + pax_flags |= MF_PAX_RANDMMAP;
46820 +#endif
46821 +
46822 +#ifdef CONFIG_PAX_SEGMEXEC
46823 + if (!(__supported_pte_mask & _PAGE_NX)) {
46824 + pax_flags &= ~MF_PAX_PAGEEXEC;
46825 + pax_flags |= MF_PAX_SEGMEXEC;
46826 + }
46827 +#endif
46828 +
46829 +#endif
46830 +
46831 + return pax_flags;
46832 +}
46833 +
46834 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
46835 +{
46836 +
46837 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46838 + unsigned long i;
46839 +
46840 + for (i = 0UL; i < elf_ex->e_phnum; i++)
46841 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
46842 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
46843 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
46844 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
46845 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
46846 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
46847 + return ~0UL;
46848 +
46849 +#ifdef CONFIG_PAX_SOFTMODE
46850 + if (pax_softmode)
46851 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
46852 + else
46853 +#endif
46854 +
46855 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
46856 + break;
46857 + }
46858 +#endif
46859 +
46860 + return ~0UL;
46861 +}
46862 +
46863 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
46864 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
46865 +{
46866 + unsigned long pax_flags = 0UL;
46867 +
46868 +#ifdef CONFIG_PAX_PAGEEXEC
46869 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
46870 + pax_flags |= MF_PAX_PAGEEXEC;
46871 +#endif
46872 +
46873 +#ifdef CONFIG_PAX_SEGMEXEC
46874 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
46875 + pax_flags |= MF_PAX_SEGMEXEC;
46876 +#endif
46877 +
46878 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46879 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46880 + if ((__supported_pte_mask & _PAGE_NX))
46881 + pax_flags &= ~MF_PAX_SEGMEXEC;
46882 + else
46883 + pax_flags &= ~MF_PAX_PAGEEXEC;
46884 + }
46885 +#endif
46886 +
46887 +#ifdef CONFIG_PAX_EMUTRAMP
46888 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
46889 + pax_flags |= MF_PAX_EMUTRAMP;
46890 +#endif
46891 +
46892 +#ifdef CONFIG_PAX_MPROTECT
46893 + if (pax_flags_softmode & MF_PAX_MPROTECT)
46894 + pax_flags |= MF_PAX_MPROTECT;
46895 +#endif
46896 +
46897 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46898 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
46899 + pax_flags |= MF_PAX_RANDMMAP;
46900 +#endif
46901 +
46902 + return pax_flags;
46903 +}
46904 +
46905 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
46906 +{
46907 + unsigned long pax_flags = 0UL;
46908 +
46909 +#ifdef CONFIG_PAX_PAGEEXEC
46910 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
46911 + pax_flags |= MF_PAX_PAGEEXEC;
46912 +#endif
46913 +
46914 +#ifdef CONFIG_PAX_SEGMEXEC
46915 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
46916 + pax_flags |= MF_PAX_SEGMEXEC;
46917 +#endif
46918 +
46919 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46920 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46921 + if ((__supported_pte_mask & _PAGE_NX))
46922 + pax_flags &= ~MF_PAX_SEGMEXEC;
46923 + else
46924 + pax_flags &= ~MF_PAX_PAGEEXEC;
46925 + }
46926 +#endif
46927 +
46928 +#ifdef CONFIG_PAX_EMUTRAMP
46929 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
46930 + pax_flags |= MF_PAX_EMUTRAMP;
46931 +#endif
46932 +
46933 +#ifdef CONFIG_PAX_MPROTECT
46934 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
46935 + pax_flags |= MF_PAX_MPROTECT;
46936 +#endif
46937 +
46938 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46939 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
46940 + pax_flags |= MF_PAX_RANDMMAP;
46941 +#endif
46942 +
46943 + return pax_flags;
46944 +}
46945 +#endif
46946 +
46947 +static unsigned long pax_parse_xattr_pax(struct file * const file)
46948 +{
46949 +
46950 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
46951 + ssize_t xattr_size, i;
46952 + unsigned char xattr_value[5];
46953 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
46954 +
46955 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
46956 + if (xattr_size <= 0)
46957 + return ~0UL;
46958 +
46959 + for (i = 0; i < xattr_size; i++)
46960 + switch (xattr_value[i]) {
46961 + default:
46962 + return ~0UL;
46963 +
46964 +#define parse_flag(option1, option2, flag) \
46965 + case option1: \
46966 + pax_flags_hardmode |= MF_PAX_##flag; \
46967 + break; \
46968 + case option2: \
46969 + pax_flags_softmode |= MF_PAX_##flag; \
46970 + break;
46971 +
46972 + parse_flag('p', 'P', PAGEEXEC);
46973 + parse_flag('e', 'E', EMUTRAMP);
46974 + parse_flag('m', 'M', MPROTECT);
46975 + parse_flag('r', 'R', RANDMMAP);
46976 + parse_flag('s', 'S', SEGMEXEC);
46977 +
46978 +#undef parse_flag
46979 + }
46980 +
46981 + if (pax_flags_hardmode & pax_flags_softmode)
46982 + return ~0UL;
46983 +
46984 +#ifdef CONFIG_PAX_SOFTMODE
46985 + if (pax_softmode)
46986 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
46987 + else
46988 +#endif
46989 +
46990 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
46991 +#else
46992 + return ~0UL;
46993 +#endif
46994 +
46995 +}
46996 +
46997 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
46998 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
46999 +{
47000 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
47001 +
47002 + pax_flags = pax_parse_ei_pax(elf_ex);
47003 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
47004 + xattr_pax_flags = pax_parse_xattr_pax(file);
47005 +
47006 + if (pt_pax_flags == ~0UL)
47007 + pt_pax_flags = xattr_pax_flags;
47008 + else if (xattr_pax_flags == ~0UL)
47009 + xattr_pax_flags = pt_pax_flags;
47010 + if (pt_pax_flags != xattr_pax_flags)
47011 + return -EINVAL;
47012 + if (pt_pax_flags != ~0UL)
47013 + pax_flags = pt_pax_flags;
47014 +
47015 + if (0 > pax_check_flags(&pax_flags))
47016 + return -EINVAL;
47017 +
47018 + current->mm->pax_flags = pax_flags;
47019 + return 0;
47020 +}
47021 +#endif
47022 +
47023 /*
47024 * These are the functions used to load ELF style executables and shared
47025 * libraries. There is no binary dependent code anywhere else.
47026 @@ -548,6 +919,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
47027 {
47028 unsigned int random_variable = 0;
47029
47030 +#ifdef CONFIG_PAX_RANDUSTACK
47031 + if (randomize_va_space)
47032 + return stack_top - current->mm->delta_stack;
47033 +#endif
47034 +
47035 if ((current->flags & PF_RANDOMIZE) &&
47036 !(current->personality & ADDR_NO_RANDOMIZE)) {
47037 random_variable = get_random_int() & STACK_RND_MASK;
47038 @@ -566,7 +942,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47039 unsigned long load_addr = 0, load_bias = 0;
47040 int load_addr_set = 0;
47041 char * elf_interpreter = NULL;
47042 - unsigned long error;
47043 + unsigned long error = 0;
47044 struct elf_phdr *elf_ppnt, *elf_phdata;
47045 unsigned long elf_bss, elf_brk;
47046 int retval, i;
47047 @@ -576,11 +952,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47048 unsigned long start_code, end_code, start_data, end_data;
47049 unsigned long reloc_func_desc = 0;
47050 int executable_stack = EXSTACK_DEFAULT;
47051 - unsigned long def_flags = 0;
47052 struct {
47053 struct elfhdr elf_ex;
47054 struct elfhdr interp_elf_ex;
47055 } *loc;
47056 + unsigned long pax_task_size = TASK_SIZE;
47057
47058 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
47059 if (!loc) {
47060 @@ -718,11 +1094,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47061
47062 /* OK, This is the point of no return */
47063 current->flags &= ~PF_FORKNOEXEC;
47064 - current->mm->def_flags = def_flags;
47065 +
47066 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47067 + current->mm->pax_flags = 0UL;
47068 +#endif
47069 +
47070 +#ifdef CONFIG_PAX_DLRESOLVE
47071 + current->mm->call_dl_resolve = 0UL;
47072 +#endif
47073 +
47074 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
47075 + current->mm->call_syscall = 0UL;
47076 +#endif
47077 +
47078 +#ifdef CONFIG_PAX_ASLR
47079 + current->mm->delta_mmap = 0UL;
47080 + current->mm->delta_stack = 0UL;
47081 +#endif
47082 +
47083 + current->mm->def_flags = 0;
47084 +
47085 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
47086 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
47087 + send_sig(SIGKILL, current, 0);
47088 + goto out_free_dentry;
47089 + }
47090 +#endif
47091 +
47092 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47093 + pax_set_initial_flags(bprm);
47094 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
47095 + if (pax_set_initial_flags_func)
47096 + (pax_set_initial_flags_func)(bprm);
47097 +#endif
47098 +
47099 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47100 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
47101 + current->mm->context.user_cs_limit = PAGE_SIZE;
47102 + current->mm->def_flags |= VM_PAGEEXEC;
47103 + }
47104 +#endif
47105 +
47106 +#ifdef CONFIG_PAX_SEGMEXEC
47107 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
47108 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
47109 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
47110 + pax_task_size = SEGMEXEC_TASK_SIZE;
47111 + }
47112 +#endif
47113 +
47114 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
47115 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47116 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
47117 + put_cpu();
47118 + }
47119 +#endif
47120
47121 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
47122 may depend on the personality. */
47123 SET_PERSONALITY(loc->elf_ex);
47124 +
47125 +#ifdef CONFIG_PAX_ASLR
47126 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
47127 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
47128 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
47129 + }
47130 +#endif
47131 +
47132 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
47133 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47134 + executable_stack = EXSTACK_DISABLE_X;
47135 + current->personality &= ~READ_IMPLIES_EXEC;
47136 + } else
47137 +#endif
47138 +
47139 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
47140 current->personality |= READ_IMPLIES_EXEC;
47141
47142 @@ -800,10 +1245,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47143 * might try to exec. This is because the brk will
47144 * follow the loader, and is not movable. */
47145 #ifdef CONFIG_X86
47146 - load_bias = 0;
47147 + if (current->flags & PF_RANDOMIZE)
47148 + load_bias = 0;
47149 + else
47150 + load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47151 #else
47152 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47153 #endif
47154 +
47155 +#ifdef CONFIG_PAX_RANDMMAP
47156 + /* PaX: randomize base address at the default exe base if requested */
47157 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
47158 +#ifdef CONFIG_SPARC64
47159 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
47160 +#else
47161 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
47162 +#endif
47163 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
47164 + elf_flags |= MAP_FIXED;
47165 + }
47166 +#endif
47167 +
47168 }
47169
47170 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
47171 @@ -836,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47172 * allowed task size. Note that p_filesz must always be
47173 * <= p_memsz so it is only necessary to check p_memsz.
47174 */
47175 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47176 - elf_ppnt->p_memsz > TASK_SIZE ||
47177 - TASK_SIZE - elf_ppnt->p_memsz < k) {
47178 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47179 + elf_ppnt->p_memsz > pax_task_size ||
47180 + pax_task_size - elf_ppnt->p_memsz < k) {
47181 /* set_brk can never work. Avoid overflows. */
47182 send_sig(SIGKILL, current, 0);
47183 retval = -EINVAL;
47184 @@ -866,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47185 start_data += load_bias;
47186 end_data += load_bias;
47187
47188 +#ifdef CONFIG_PAX_RANDMMAP
47189 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
47190 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
47191 +#endif
47192 +
47193 /* Calling set_brk effectively mmaps the pages that we need
47194 * for the bss and break sections. We must do this before
47195 * mapping in the interpreter, to make sure it doesn't wind
47196 @@ -877,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47197 goto out_free_dentry;
47198 }
47199 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
47200 - send_sig(SIGSEGV, current, 0);
47201 - retval = -EFAULT; /* Nobody gets to see this, but.. */
47202 - goto out_free_dentry;
47203 + /*
47204 + * This bss-zeroing can fail if the ELF
47205 + * file specifies odd protections. So
47206 + * we don't check the return value
47207 + */
47208 }
47209
47210 if (elf_interpreter) {
47211 @@ -1112,8 +1581,10 @@ static int dump_seek(struct file *file, loff_t off)
47212 unsigned long n = off;
47213 if (n > PAGE_SIZE)
47214 n = PAGE_SIZE;
47215 - if (!dump_write(file, buf, n))
47216 + if (!dump_write(file, buf, n)) {
47217 + free_page((unsigned long)buf);
47218 return 0;
47219 + }
47220 off -= n;
47221 }
47222 free_page((unsigned long)buf);
47223 @@ -1125,7 +1596,7 @@ static int dump_seek(struct file *file, loff_t off)
47224 * Decide what to dump of a segment, part, all or none.
47225 */
47226 static unsigned long vma_dump_size(struct vm_area_struct *vma,
47227 - unsigned long mm_flags)
47228 + unsigned long mm_flags, long signr)
47229 {
47230 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
47231
47232 @@ -1159,7 +1630,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
47233 if (vma->vm_file == NULL)
47234 return 0;
47235
47236 - if (FILTER(MAPPED_PRIVATE))
47237 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
47238 goto whole;
47239
47240 /*
47241 @@ -1255,8 +1726,11 @@ static int writenote(struct memelfnote *men, struct file *file,
47242 #undef DUMP_WRITE
47243
47244 #define DUMP_WRITE(addr, nr) \
47245 + do { \
47246 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
47247 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
47248 - goto end_coredump;
47249 + goto end_coredump; \
47250 + } while (0);
47251
47252 static void fill_elf_header(struct elfhdr *elf, int segs,
47253 u16 machine, u32 flags, u8 osabi)
47254 @@ -1385,9 +1859,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
47255 {
47256 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
47257 int i = 0;
47258 - do
47259 + do {
47260 i += 2;
47261 - while (auxv[i - 2] != AT_NULL);
47262 + } while (auxv[i - 2] != AT_NULL);
47263 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
47264 }
47265
47266 @@ -1973,7 +2447,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47267 phdr.p_offset = offset;
47268 phdr.p_vaddr = vma->vm_start;
47269 phdr.p_paddr = 0;
47270 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
47271 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
47272 phdr.p_memsz = vma->vm_end - vma->vm_start;
47273 offset += phdr.p_filesz;
47274 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
47275 @@ -2006,7 +2480,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47276 unsigned long addr;
47277 unsigned long end;
47278
47279 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
47280 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
47281
47282 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
47283 struct page *page;
47284 @@ -2015,6 +2489,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47285 page = get_dump_page(addr);
47286 if (page) {
47287 void *kaddr = kmap(page);
47288 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
47289 stop = ((size += PAGE_SIZE) > limit) ||
47290 !dump_write(file, kaddr, PAGE_SIZE);
47291 kunmap(page);
47292 @@ -2042,6 +2517,97 @@ out:
47293
47294 #endif /* USE_ELF_CORE_DUMP */
47295
47296 +#ifdef CONFIG_PAX_MPROTECT
47297 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
47298 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
47299 + * we'll remove VM_MAYWRITE for good on RELRO segments.
47300 + *
47301 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
47302 + * basis because we want to allow the common case and not the special ones.
47303 + */
47304 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
47305 +{
47306 + struct elfhdr elf_h;
47307 + struct elf_phdr elf_p;
47308 + unsigned long i;
47309 + unsigned long oldflags;
47310 + bool is_textrel_rw, is_textrel_rx, is_relro;
47311 +
47312 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
47313 + return;
47314 +
47315 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
47316 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
47317 +
47318 +#ifdef CONFIG_PAX_ELFRELOCS
47319 + /* possible TEXTREL */
47320 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
47321 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
47322 +#else
47323 + is_textrel_rw = false;
47324 + is_textrel_rx = false;
47325 +#endif
47326 +
47327 + /* possible RELRO */
47328 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
47329 +
47330 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
47331 + return;
47332 +
47333 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
47334 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
47335 +
47336 +#ifdef CONFIG_PAX_ETEXECRELOCS
47337 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47338 +#else
47339 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
47340 +#endif
47341 +
47342 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47343 + !elf_check_arch(&elf_h) ||
47344 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
47345 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
47346 + return;
47347 +
47348 + for (i = 0UL; i < elf_h.e_phnum; i++) {
47349 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
47350 + return;
47351 + switch (elf_p.p_type) {
47352 + case PT_DYNAMIC:
47353 + if (!is_textrel_rw && !is_textrel_rx)
47354 + continue;
47355 + i = 0UL;
47356 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
47357 + elf_dyn dyn;
47358 +
47359 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
47360 + return;
47361 + if (dyn.d_tag == DT_NULL)
47362 + return;
47363 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
47364 + gr_log_textrel(vma);
47365 + if (is_textrel_rw)
47366 + vma->vm_flags |= VM_MAYWRITE;
47367 + else
47368 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
47369 + vma->vm_flags &= ~VM_MAYWRITE;
47370 + return;
47371 + }
47372 + i++;
47373 + }
47374 + return;
47375 +
47376 + case PT_GNU_RELRO:
47377 + if (!is_relro)
47378 + continue;
47379 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
47380 + vma->vm_flags &= ~VM_MAYWRITE;
47381 + return;
47382 + }
47383 + }
47384 +}
47385 +#endif
47386 +
47387 static int __init init_elf_binfmt(void)
47388 {
47389 return register_binfmt(&elf_format);
47390 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
47391 index ca88c46..f155a60 100644
47392 --- a/fs/binfmt_flat.c
47393 +++ b/fs/binfmt_flat.c
47394 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
47395 realdatastart = (unsigned long) -ENOMEM;
47396 printk("Unable to allocate RAM for process data, errno %d\n",
47397 (int)-realdatastart);
47398 + down_write(&current->mm->mmap_sem);
47399 do_munmap(current->mm, textpos, text_len);
47400 + up_write(&current->mm->mmap_sem);
47401 ret = realdatastart;
47402 goto err;
47403 }
47404 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47405 }
47406 if (IS_ERR_VALUE(result)) {
47407 printk("Unable to read data+bss, errno %d\n", (int)-result);
47408 + down_write(&current->mm->mmap_sem);
47409 do_munmap(current->mm, textpos, text_len);
47410 do_munmap(current->mm, realdatastart, data_len + extra);
47411 + up_write(&current->mm->mmap_sem);
47412 ret = result;
47413 goto err;
47414 }
47415 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47416 }
47417 if (IS_ERR_VALUE(result)) {
47418 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
47419 + down_write(&current->mm->mmap_sem);
47420 do_munmap(current->mm, textpos, text_len + data_len + extra +
47421 MAX_SHARED_LIBS * sizeof(unsigned long));
47422 + up_write(&current->mm->mmap_sem);
47423 ret = result;
47424 goto err;
47425 }
47426 diff --git a/fs/bio.c b/fs/bio.c
47427 index e696713..83de133 100644
47428 --- a/fs/bio.c
47429 +++ b/fs/bio.c
47430 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
47431
47432 i = 0;
47433 while (i < bio_slab_nr) {
47434 - struct bio_slab *bslab = &bio_slabs[i];
47435 + bslab = &bio_slabs[i];
47436
47437 if (!bslab->slab && entry == -1)
47438 entry = i;
47439 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
47440 const int read = bio_data_dir(bio) == READ;
47441 struct bio_map_data *bmd = bio->bi_private;
47442 int i;
47443 - char *p = bmd->sgvecs[0].iov_base;
47444 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
47445
47446 __bio_for_each_segment(bvec, bio, i, 0) {
47447 char *addr = page_address(bvec->bv_page);
47448 diff --git a/fs/block_dev.c b/fs/block_dev.c
47449 index e65efa2..04fae57 100644
47450 --- a/fs/block_dev.c
47451 +++ b/fs/block_dev.c
47452 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
47453 else if (bdev->bd_contains == bdev)
47454 res = 0; /* is a whole device which isn't held */
47455
47456 - else if (bdev->bd_contains->bd_holder == bd_claim)
47457 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
47458 res = 0; /* is a partition of a device that is being partitioned */
47459 else if (bdev->bd_contains->bd_holder != NULL)
47460 res = -EBUSY; /* is a partition of a held device */
47461 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
47462 index c4bc570..42acd8d 100644
47463 --- a/fs/btrfs/ctree.c
47464 +++ b/fs/btrfs/ctree.c
47465 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
47466 free_extent_buffer(buf);
47467 add_root_to_dirty_list(root);
47468 } else {
47469 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
47470 - parent_start = parent->start;
47471 - else
47472 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
47473 + if (parent)
47474 + parent_start = parent->start;
47475 + else
47476 + parent_start = 0;
47477 + } else
47478 parent_start = 0;
47479
47480 WARN_ON(trans->transid != btrfs_header_generation(parent));
47481 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
47482
47483 ret = 0;
47484 if (slot == 0) {
47485 - struct btrfs_disk_key disk_key;
47486 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
47487 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
47488 }
47489 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
47490 index f447188..59c17c5 100644
47491 --- a/fs/btrfs/disk-io.c
47492 +++ b/fs/btrfs/disk-io.c
47493 @@ -39,7 +39,7 @@
47494 #include "tree-log.h"
47495 #include "free-space-cache.h"
47496
47497 -static struct extent_io_ops btree_extent_io_ops;
47498 +static const struct extent_io_ops btree_extent_io_ops;
47499 static void end_workqueue_fn(struct btrfs_work *work);
47500 static void free_fs_root(struct btrfs_root *root);
47501
47502 @@ -2607,7 +2607,7 @@ out:
47503 return 0;
47504 }
47505
47506 -static struct extent_io_ops btree_extent_io_ops = {
47507 +static const struct extent_io_ops btree_extent_io_ops = {
47508 .write_cache_pages_lock_hook = btree_lock_page_hook,
47509 .readpage_end_io_hook = btree_readpage_end_io_hook,
47510 .submit_bio_hook = btree_submit_bio_hook,
47511 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
47512 index 559f724..a026171 100644
47513 --- a/fs/btrfs/extent-tree.c
47514 +++ b/fs/btrfs/extent-tree.c
47515 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
47516 u64 group_start = group->key.objectid;
47517 new_extents = kmalloc(sizeof(*new_extents),
47518 GFP_NOFS);
47519 + if (!new_extents) {
47520 + ret = -ENOMEM;
47521 + goto out;
47522 + }
47523 nr_extents = 1;
47524 ret = get_new_locations(reloc_inode,
47525 extent_key,
47526 diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
47527 index 36de250..7ec75c7 100644
47528 --- a/fs/btrfs/extent_io.h
47529 +++ b/fs/btrfs/extent_io.h
47530 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
47531 struct bio *bio, int mirror_num,
47532 unsigned long bio_flags);
47533 struct extent_io_ops {
47534 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
47535 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
47536 u64 start, u64 end, int *page_started,
47537 unsigned long *nr_written);
47538 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
47539 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
47540 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
47541 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
47542 extent_submit_bio_hook_t *submit_bio_hook;
47543 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
47544 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
47545 size_t size, struct bio *bio,
47546 unsigned long bio_flags);
47547 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
47548 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
47549 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
47550 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
47551 u64 start, u64 end,
47552 struct extent_state *state);
47553 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
47554 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
47555 u64 start, u64 end,
47556 struct extent_state *state);
47557 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47558 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47559 struct extent_state *state);
47560 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47561 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47562 struct extent_state *state, int uptodate);
47563 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
47564 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
47565 unsigned long old, unsigned long bits);
47566 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
47567 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
47568 unsigned long bits);
47569 - int (*merge_extent_hook)(struct inode *inode,
47570 + int (* const merge_extent_hook)(struct inode *inode,
47571 struct extent_state *new,
47572 struct extent_state *other);
47573 - int (*split_extent_hook)(struct inode *inode,
47574 + int (* const split_extent_hook)(struct inode *inode,
47575 struct extent_state *orig, u64 split);
47576 - int (*write_cache_pages_lock_hook)(struct page *page);
47577 + int (* const write_cache_pages_lock_hook)(struct page *page);
47578 };
47579
47580 struct extent_io_tree {
47581 @@ -88,7 +88,7 @@ struct extent_io_tree {
47582 u64 dirty_bytes;
47583 spinlock_t lock;
47584 spinlock_t buffer_lock;
47585 - struct extent_io_ops *ops;
47586 + const struct extent_io_ops *ops;
47587 };
47588
47589 struct extent_state {
47590 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
47591 index cb2849f..3718fb4 100644
47592 --- a/fs/btrfs/free-space-cache.c
47593 +++ b/fs/btrfs/free-space-cache.c
47594 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
47595
47596 while(1) {
47597 if (entry->bytes < bytes || entry->offset < min_start) {
47598 - struct rb_node *node;
47599 -
47600 node = rb_next(&entry->offset_index);
47601 if (!node)
47602 break;
47603 @@ -1226,7 +1224,7 @@ again:
47604 */
47605 while (entry->bitmap || found_bitmap ||
47606 (!entry->bitmap && entry->bytes < min_bytes)) {
47607 - struct rb_node *node = rb_next(&entry->offset_index);
47608 + node = rb_next(&entry->offset_index);
47609
47610 if (entry->bitmap && entry->bytes > bytes + empty_size) {
47611 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
47612 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
47613 index e03a836..323837e 100644
47614 --- a/fs/btrfs/inode.c
47615 +++ b/fs/btrfs/inode.c
47616 @@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
47617 static const struct address_space_operations btrfs_aops;
47618 static const struct address_space_operations btrfs_symlink_aops;
47619 static const struct file_operations btrfs_dir_file_operations;
47620 -static struct extent_io_ops btrfs_extent_io_ops;
47621 +static const struct extent_io_ops btrfs_extent_io_ops;
47622
47623 static struct kmem_cache *btrfs_inode_cachep;
47624 struct kmem_cache *btrfs_trans_handle_cachep;
47625 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
47626 1, 0, NULL, GFP_NOFS);
47627 while (start < end) {
47628 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
47629 + BUG_ON(!async_cow);
47630 async_cow->inode = inode;
47631 async_cow->root = root;
47632 async_cow->locked_page = locked_page;
47633 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
47634 inline_size = btrfs_file_extent_inline_item_len(leaf,
47635 btrfs_item_nr(leaf, path->slots[0]));
47636 tmp = kmalloc(inline_size, GFP_NOFS);
47637 + if (!tmp)
47638 + return -ENOMEM;
47639 ptr = btrfs_file_extent_inline_start(item);
47640
47641 read_extent_buffer(leaf, tmp, ptr, inline_size);
47642 @@ -5410,7 +5413,7 @@ fail:
47643 return -ENOMEM;
47644 }
47645
47646 -static int btrfs_getattr(struct vfsmount *mnt,
47647 +int btrfs_getattr(struct vfsmount *mnt,
47648 struct dentry *dentry, struct kstat *stat)
47649 {
47650 struct inode *inode = dentry->d_inode;
47651 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
47652 return 0;
47653 }
47654
47655 +EXPORT_SYMBOL(btrfs_getattr);
47656 +
47657 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
47658 +{
47659 + return BTRFS_I(inode)->root->anon_super.s_dev;
47660 +}
47661 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
47662 +
47663 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
47664 struct inode *new_dir, struct dentry *new_dentry)
47665 {
47666 @@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
47667 .fsync = btrfs_sync_file,
47668 };
47669
47670 -static struct extent_io_ops btrfs_extent_io_ops = {
47671 +static const struct extent_io_ops btrfs_extent_io_ops = {
47672 .fill_delalloc = run_delalloc_range,
47673 .submit_bio_hook = btrfs_submit_bio_hook,
47674 .merge_bio_hook = btrfs_merge_bio_hook,
47675 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
47676 index ab7ab53..94e0781 100644
47677 --- a/fs/btrfs/relocation.c
47678 +++ b/fs/btrfs/relocation.c
47679 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
47680 }
47681 spin_unlock(&rc->reloc_root_tree.lock);
47682
47683 - BUG_ON((struct btrfs_root *)node->data != root);
47684 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
47685
47686 if (!del) {
47687 spin_lock(&rc->reloc_root_tree.lock);
47688 diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
47689 index a240b6f..4ce16ef 100644
47690 --- a/fs/btrfs/sysfs.c
47691 +++ b/fs/btrfs/sysfs.c
47692 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
47693 complete(&root->kobj_unregister);
47694 }
47695
47696 -static struct sysfs_ops btrfs_super_attr_ops = {
47697 +static const struct sysfs_ops btrfs_super_attr_ops = {
47698 .show = btrfs_super_attr_show,
47699 .store = btrfs_super_attr_store,
47700 };
47701
47702 -static struct sysfs_ops btrfs_root_attr_ops = {
47703 +static const struct sysfs_ops btrfs_root_attr_ops = {
47704 .show = btrfs_root_attr_show,
47705 .store = btrfs_root_attr_store,
47706 };
47707 diff --git a/fs/buffer.c b/fs/buffer.c
47708 index 6fa5302..395d9f6 100644
47709 --- a/fs/buffer.c
47710 +++ b/fs/buffer.c
47711 @@ -25,6 +25,7 @@
47712 #include <linux/percpu.h>
47713 #include <linux/slab.h>
47714 #include <linux/capability.h>
47715 +#include <linux/security.h>
47716 #include <linux/blkdev.h>
47717 #include <linux/file.h>
47718 #include <linux/quotaops.h>
47719 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
47720 index 3797e00..ce776f6 100644
47721 --- a/fs/cachefiles/bind.c
47722 +++ b/fs/cachefiles/bind.c
47723 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
47724 args);
47725
47726 /* start by checking things over */
47727 - ASSERT(cache->fstop_percent >= 0 &&
47728 - cache->fstop_percent < cache->fcull_percent &&
47729 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
47730 cache->fcull_percent < cache->frun_percent &&
47731 cache->frun_percent < 100);
47732
47733 - ASSERT(cache->bstop_percent >= 0 &&
47734 - cache->bstop_percent < cache->bcull_percent &&
47735 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
47736 cache->bcull_percent < cache->brun_percent &&
47737 cache->brun_percent < 100);
47738
47739 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
47740 index 4618516..bb30d01 100644
47741 --- a/fs/cachefiles/daemon.c
47742 +++ b/fs/cachefiles/daemon.c
47743 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
47744 if (test_bit(CACHEFILES_DEAD, &cache->flags))
47745 return -EIO;
47746
47747 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
47748 + if (datalen > PAGE_SIZE - 1)
47749 return -EOPNOTSUPP;
47750
47751 /* drag the command string into the kernel so we can parse it */
47752 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
47753 if (args[0] != '%' || args[1] != '\0')
47754 return -EINVAL;
47755
47756 - if (fstop < 0 || fstop >= cache->fcull_percent)
47757 + if (fstop >= cache->fcull_percent)
47758 return cachefiles_daemon_range_error(cache, args);
47759
47760 cache->fstop_percent = fstop;
47761 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
47762 if (args[0] != '%' || args[1] != '\0')
47763 return -EINVAL;
47764
47765 - if (bstop < 0 || bstop >= cache->bcull_percent)
47766 + if (bstop >= cache->bcull_percent)
47767 return cachefiles_daemon_range_error(cache, args);
47768
47769 cache->bstop_percent = bstop;
47770 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
47771 index f7c255f..fcd61de 100644
47772 --- a/fs/cachefiles/internal.h
47773 +++ b/fs/cachefiles/internal.h
47774 @@ -56,7 +56,7 @@ struct cachefiles_cache {
47775 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
47776 struct rb_root active_nodes; /* active nodes (can't be culled) */
47777 rwlock_t active_lock; /* lock for active_nodes */
47778 - atomic_t gravecounter; /* graveyard uniquifier */
47779 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
47780 unsigned frun_percent; /* when to stop culling (% files) */
47781 unsigned fcull_percent; /* when to start culling (% files) */
47782 unsigned fstop_percent; /* when to stop allocating (% files) */
47783 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
47784 * proc.c
47785 */
47786 #ifdef CONFIG_CACHEFILES_HISTOGRAM
47787 -extern atomic_t cachefiles_lookup_histogram[HZ];
47788 -extern atomic_t cachefiles_mkdir_histogram[HZ];
47789 -extern atomic_t cachefiles_create_histogram[HZ];
47790 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47791 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47792 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
47793
47794 extern int __init cachefiles_proc_init(void);
47795 extern void cachefiles_proc_cleanup(void);
47796 static inline
47797 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
47798 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
47799 {
47800 unsigned long jif = jiffies - start_jif;
47801 if (jif >= HZ)
47802 jif = HZ - 1;
47803 - atomic_inc(&histogram[jif]);
47804 + atomic_inc_unchecked(&histogram[jif]);
47805 }
47806
47807 #else
47808 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
47809 index 14ac480..a62766c 100644
47810 --- a/fs/cachefiles/namei.c
47811 +++ b/fs/cachefiles/namei.c
47812 @@ -250,7 +250,7 @@ try_again:
47813 /* first step is to make up a grave dentry in the graveyard */
47814 sprintf(nbuffer, "%08x%08x",
47815 (uint32_t) get_seconds(),
47816 - (uint32_t) atomic_inc_return(&cache->gravecounter));
47817 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
47818
47819 /* do the multiway lock magic */
47820 trap = lock_rename(cache->graveyard, dir);
47821 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
47822 index eccd339..4c1d995 100644
47823 --- a/fs/cachefiles/proc.c
47824 +++ b/fs/cachefiles/proc.c
47825 @@ -14,9 +14,9 @@
47826 #include <linux/seq_file.h>
47827 #include "internal.h"
47828
47829 -atomic_t cachefiles_lookup_histogram[HZ];
47830 -atomic_t cachefiles_mkdir_histogram[HZ];
47831 -atomic_t cachefiles_create_histogram[HZ];
47832 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47833 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47834 +atomic_unchecked_t cachefiles_create_histogram[HZ];
47835
47836 /*
47837 * display the latency histogram
47838 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
47839 return 0;
47840 default:
47841 index = (unsigned long) v - 3;
47842 - x = atomic_read(&cachefiles_lookup_histogram[index]);
47843 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
47844 - z = atomic_read(&cachefiles_create_histogram[index]);
47845 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
47846 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
47847 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
47848 if (x == 0 && y == 0 && z == 0)
47849 return 0;
47850
47851 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
47852 index a6c8c6f..5cf8517 100644
47853 --- a/fs/cachefiles/rdwr.c
47854 +++ b/fs/cachefiles/rdwr.c
47855 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
47856 old_fs = get_fs();
47857 set_fs(KERNEL_DS);
47858 ret = file->f_op->write(
47859 - file, (const void __user *) data, len, &pos);
47860 + file, (const void __force_user *) data, len, &pos);
47861 set_fs(old_fs);
47862 kunmap(page);
47863 if (ret != len)
47864 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
47865 index 42cec2a..2aba466 100644
47866 --- a/fs/cifs/cifs_debug.c
47867 +++ b/fs/cifs/cifs_debug.c
47868 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
47869 tcon = list_entry(tmp3,
47870 struct cifsTconInfo,
47871 tcon_list);
47872 - atomic_set(&tcon->num_smbs_sent, 0);
47873 - atomic_set(&tcon->num_writes, 0);
47874 - atomic_set(&tcon->num_reads, 0);
47875 - atomic_set(&tcon->num_oplock_brks, 0);
47876 - atomic_set(&tcon->num_opens, 0);
47877 - atomic_set(&tcon->num_posixopens, 0);
47878 - atomic_set(&tcon->num_posixmkdirs, 0);
47879 - atomic_set(&tcon->num_closes, 0);
47880 - atomic_set(&tcon->num_deletes, 0);
47881 - atomic_set(&tcon->num_mkdirs, 0);
47882 - atomic_set(&tcon->num_rmdirs, 0);
47883 - atomic_set(&tcon->num_renames, 0);
47884 - atomic_set(&tcon->num_t2renames, 0);
47885 - atomic_set(&tcon->num_ffirst, 0);
47886 - atomic_set(&tcon->num_fnext, 0);
47887 - atomic_set(&tcon->num_fclose, 0);
47888 - atomic_set(&tcon->num_hardlinks, 0);
47889 - atomic_set(&tcon->num_symlinks, 0);
47890 - atomic_set(&tcon->num_locks, 0);
47891 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
47892 + atomic_set_unchecked(&tcon->num_writes, 0);
47893 + atomic_set_unchecked(&tcon->num_reads, 0);
47894 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
47895 + atomic_set_unchecked(&tcon->num_opens, 0);
47896 + atomic_set_unchecked(&tcon->num_posixopens, 0);
47897 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
47898 + atomic_set_unchecked(&tcon->num_closes, 0);
47899 + atomic_set_unchecked(&tcon->num_deletes, 0);
47900 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
47901 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
47902 + atomic_set_unchecked(&tcon->num_renames, 0);
47903 + atomic_set_unchecked(&tcon->num_t2renames, 0);
47904 + atomic_set_unchecked(&tcon->num_ffirst, 0);
47905 + atomic_set_unchecked(&tcon->num_fnext, 0);
47906 + atomic_set_unchecked(&tcon->num_fclose, 0);
47907 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
47908 + atomic_set_unchecked(&tcon->num_symlinks, 0);
47909 + atomic_set_unchecked(&tcon->num_locks, 0);
47910 }
47911 }
47912 }
47913 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
47914 if (tcon->need_reconnect)
47915 seq_puts(m, "\tDISCONNECTED ");
47916 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
47917 - atomic_read(&tcon->num_smbs_sent),
47918 - atomic_read(&tcon->num_oplock_brks));
47919 + atomic_read_unchecked(&tcon->num_smbs_sent),
47920 + atomic_read_unchecked(&tcon->num_oplock_brks));
47921 seq_printf(m, "\nReads: %d Bytes: %lld",
47922 - atomic_read(&tcon->num_reads),
47923 + atomic_read_unchecked(&tcon->num_reads),
47924 (long long)(tcon->bytes_read));
47925 seq_printf(m, "\nWrites: %d Bytes: %lld",
47926 - atomic_read(&tcon->num_writes),
47927 + atomic_read_unchecked(&tcon->num_writes),
47928 (long long)(tcon->bytes_written));
47929 seq_printf(m, "\nFlushes: %d",
47930 - atomic_read(&tcon->num_flushes));
47931 + atomic_read_unchecked(&tcon->num_flushes));
47932 seq_printf(m, "\nLocks: %d HardLinks: %d "
47933 "Symlinks: %d",
47934 - atomic_read(&tcon->num_locks),
47935 - atomic_read(&tcon->num_hardlinks),
47936 - atomic_read(&tcon->num_symlinks));
47937 + atomic_read_unchecked(&tcon->num_locks),
47938 + atomic_read_unchecked(&tcon->num_hardlinks),
47939 + atomic_read_unchecked(&tcon->num_symlinks));
47940 seq_printf(m, "\nOpens: %d Closes: %d "
47941 "Deletes: %d",
47942 - atomic_read(&tcon->num_opens),
47943 - atomic_read(&tcon->num_closes),
47944 - atomic_read(&tcon->num_deletes));
47945 + atomic_read_unchecked(&tcon->num_opens),
47946 + atomic_read_unchecked(&tcon->num_closes),
47947 + atomic_read_unchecked(&tcon->num_deletes));
47948 seq_printf(m, "\nPosix Opens: %d "
47949 "Posix Mkdirs: %d",
47950 - atomic_read(&tcon->num_posixopens),
47951 - atomic_read(&tcon->num_posixmkdirs));
47952 + atomic_read_unchecked(&tcon->num_posixopens),
47953 + atomic_read_unchecked(&tcon->num_posixmkdirs));
47954 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
47955 - atomic_read(&tcon->num_mkdirs),
47956 - atomic_read(&tcon->num_rmdirs));
47957 + atomic_read_unchecked(&tcon->num_mkdirs),
47958 + atomic_read_unchecked(&tcon->num_rmdirs));
47959 seq_printf(m, "\nRenames: %d T2 Renames %d",
47960 - atomic_read(&tcon->num_renames),
47961 - atomic_read(&tcon->num_t2renames));
47962 + atomic_read_unchecked(&tcon->num_renames),
47963 + atomic_read_unchecked(&tcon->num_t2renames));
47964 seq_printf(m, "\nFindFirst: %d FNext %d "
47965 "FClose %d",
47966 - atomic_read(&tcon->num_ffirst),
47967 - atomic_read(&tcon->num_fnext),
47968 - atomic_read(&tcon->num_fclose));
47969 + atomic_read_unchecked(&tcon->num_ffirst),
47970 + atomic_read_unchecked(&tcon->num_fnext),
47971 + atomic_read_unchecked(&tcon->num_fclose));
47972 }
47973 }
47974 }
47975 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
47976 index 1445407..68cb0dc 100644
47977 --- a/fs/cifs/cifsfs.c
47978 +++ b/fs/cifs/cifsfs.c
47979 @@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
47980 cifs_req_cachep = kmem_cache_create("cifs_request",
47981 CIFSMaxBufSize +
47982 MAX_CIFS_HDR_SIZE, 0,
47983 - SLAB_HWCACHE_ALIGN, NULL);
47984 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
47985 if (cifs_req_cachep == NULL)
47986 return -ENOMEM;
47987
47988 @@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
47989 efficient to alloc 1 per page off the slab compared to 17K (5page)
47990 alloc of large cifs buffers even when page debugging is on */
47991 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
47992 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
47993 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
47994 NULL);
47995 if (cifs_sm_req_cachep == NULL) {
47996 mempool_destroy(cifs_req_poolp);
47997 @@ -991,8 +991,8 @@ init_cifs(void)
47998 atomic_set(&bufAllocCount, 0);
47999 atomic_set(&smBufAllocCount, 0);
48000 #ifdef CONFIG_CIFS_STATS2
48001 - atomic_set(&totBufAllocCount, 0);
48002 - atomic_set(&totSmBufAllocCount, 0);
48003 + atomic_set_unchecked(&totBufAllocCount, 0);
48004 + atomic_set_unchecked(&totSmBufAllocCount, 0);
48005 #endif /* CONFIG_CIFS_STATS2 */
48006
48007 atomic_set(&midCount, 0);
48008 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
48009 index e29581e..1c22bab 100644
48010 --- a/fs/cifs/cifsglob.h
48011 +++ b/fs/cifs/cifsglob.h
48012 @@ -252,28 +252,28 @@ struct cifsTconInfo {
48013 __u16 Flags; /* optional support bits */
48014 enum statusEnum tidStatus;
48015 #ifdef CONFIG_CIFS_STATS
48016 - atomic_t num_smbs_sent;
48017 - atomic_t num_writes;
48018 - atomic_t num_reads;
48019 - atomic_t num_flushes;
48020 - atomic_t num_oplock_brks;
48021 - atomic_t num_opens;
48022 - atomic_t num_closes;
48023 - atomic_t num_deletes;
48024 - atomic_t num_mkdirs;
48025 - atomic_t num_posixopens;
48026 - atomic_t num_posixmkdirs;
48027 - atomic_t num_rmdirs;
48028 - atomic_t num_renames;
48029 - atomic_t num_t2renames;
48030 - atomic_t num_ffirst;
48031 - atomic_t num_fnext;
48032 - atomic_t num_fclose;
48033 - atomic_t num_hardlinks;
48034 - atomic_t num_symlinks;
48035 - atomic_t num_locks;
48036 - atomic_t num_acl_get;
48037 - atomic_t num_acl_set;
48038 + atomic_unchecked_t num_smbs_sent;
48039 + atomic_unchecked_t num_writes;
48040 + atomic_unchecked_t num_reads;
48041 + atomic_unchecked_t num_flushes;
48042 + atomic_unchecked_t num_oplock_brks;
48043 + atomic_unchecked_t num_opens;
48044 + atomic_unchecked_t num_closes;
48045 + atomic_unchecked_t num_deletes;
48046 + atomic_unchecked_t num_mkdirs;
48047 + atomic_unchecked_t num_posixopens;
48048 + atomic_unchecked_t num_posixmkdirs;
48049 + atomic_unchecked_t num_rmdirs;
48050 + atomic_unchecked_t num_renames;
48051 + atomic_unchecked_t num_t2renames;
48052 + atomic_unchecked_t num_ffirst;
48053 + atomic_unchecked_t num_fnext;
48054 + atomic_unchecked_t num_fclose;
48055 + atomic_unchecked_t num_hardlinks;
48056 + atomic_unchecked_t num_symlinks;
48057 + atomic_unchecked_t num_locks;
48058 + atomic_unchecked_t num_acl_get;
48059 + atomic_unchecked_t num_acl_set;
48060 #ifdef CONFIG_CIFS_STATS2
48061 unsigned long long time_writes;
48062 unsigned long long time_reads;
48063 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
48064 }
48065
48066 #ifdef CONFIG_CIFS_STATS
48067 -#define cifs_stats_inc atomic_inc
48068 +#define cifs_stats_inc atomic_inc_unchecked
48069
48070 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
48071 unsigned int bytes)
48072 @@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
48073 /* Various Debug counters */
48074 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
48075 #ifdef CONFIG_CIFS_STATS2
48076 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
48077 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
48078 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
48079 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
48080 #endif
48081 GLOBAL_EXTERN atomic_t smBufAllocCount;
48082 GLOBAL_EXTERN atomic_t midCount;
48083 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
48084 index fc1e048..28b3441 100644
48085 --- a/fs/cifs/link.c
48086 +++ b/fs/cifs/link.c
48087 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
48088
48089 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
48090 {
48091 - char *p = nd_get_link(nd);
48092 + const char *p = nd_get_link(nd);
48093 if (!IS_ERR(p))
48094 kfree(p);
48095 }
48096 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
48097 index 95b82e8..12a538d 100644
48098 --- a/fs/cifs/misc.c
48099 +++ b/fs/cifs/misc.c
48100 @@ -155,7 +155,7 @@ cifs_buf_get(void)
48101 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
48102 atomic_inc(&bufAllocCount);
48103 #ifdef CONFIG_CIFS_STATS2
48104 - atomic_inc(&totBufAllocCount);
48105 + atomic_inc_unchecked(&totBufAllocCount);
48106 #endif /* CONFIG_CIFS_STATS2 */
48107 }
48108
48109 @@ -190,7 +190,7 @@ cifs_small_buf_get(void)
48110 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
48111 atomic_inc(&smBufAllocCount);
48112 #ifdef CONFIG_CIFS_STATS2
48113 - atomic_inc(&totSmBufAllocCount);
48114 + atomic_inc_unchecked(&totSmBufAllocCount);
48115 #endif /* CONFIG_CIFS_STATS2 */
48116
48117 }
48118 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
48119 index a5bf577..6d19845 100644
48120 --- a/fs/coda/cache.c
48121 +++ b/fs/coda/cache.c
48122 @@ -24,14 +24,14 @@
48123 #include <linux/coda_fs_i.h>
48124 #include <linux/coda_cache.h>
48125
48126 -static atomic_t permission_epoch = ATOMIC_INIT(0);
48127 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
48128
48129 /* replace or extend an acl cache hit */
48130 void coda_cache_enter(struct inode *inode, int mask)
48131 {
48132 struct coda_inode_info *cii = ITOC(inode);
48133
48134 - cii->c_cached_epoch = atomic_read(&permission_epoch);
48135 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
48136 if (cii->c_uid != current_fsuid()) {
48137 cii->c_uid = current_fsuid();
48138 cii->c_cached_perm = mask;
48139 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
48140 void coda_cache_clear_inode(struct inode *inode)
48141 {
48142 struct coda_inode_info *cii = ITOC(inode);
48143 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
48144 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
48145 }
48146
48147 /* remove all acl caches */
48148 void coda_cache_clear_all(struct super_block *sb)
48149 {
48150 - atomic_inc(&permission_epoch);
48151 + atomic_inc_unchecked(&permission_epoch);
48152 }
48153
48154
48155 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
48156
48157 hit = (mask & cii->c_cached_perm) == mask &&
48158 cii->c_uid == current_fsuid() &&
48159 - cii->c_cached_epoch == atomic_read(&permission_epoch);
48160 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
48161
48162 return hit;
48163 }
48164 diff --git a/fs/compat.c b/fs/compat.c
48165 index d1e2411..c2ef8ed 100644
48166 --- a/fs/compat.c
48167 +++ b/fs/compat.c
48168 @@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
48169 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
48170 {
48171 compat_ino_t ino = stat->ino;
48172 - typeof(ubuf->st_uid) uid = 0;
48173 - typeof(ubuf->st_gid) gid = 0;
48174 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
48175 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
48176 int err;
48177
48178 SET_UID(uid, stat->uid);
48179 @@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
48180
48181 set_fs(KERNEL_DS);
48182 /* The __user pointer cast is valid because of the set_fs() */
48183 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
48184 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
48185 set_fs(oldfs);
48186 /* truncating is ok because it's a user address */
48187 if (!ret)
48188 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
48189
48190 struct compat_readdir_callback {
48191 struct compat_old_linux_dirent __user *dirent;
48192 + struct file * file;
48193 int result;
48194 };
48195
48196 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
48197 buf->result = -EOVERFLOW;
48198 return -EOVERFLOW;
48199 }
48200 +
48201 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48202 + return 0;
48203 +
48204 buf->result++;
48205 dirent = buf->dirent;
48206 if (!access_ok(VERIFY_WRITE, dirent,
48207 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
48208
48209 buf.result = 0;
48210 buf.dirent = dirent;
48211 + buf.file = file;
48212
48213 error = vfs_readdir(file, compat_fillonedir, &buf);
48214 if (buf.result)
48215 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
48216 struct compat_getdents_callback {
48217 struct compat_linux_dirent __user *current_dir;
48218 struct compat_linux_dirent __user *previous;
48219 + struct file * file;
48220 int count;
48221 int error;
48222 };
48223 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
48224 buf->error = -EOVERFLOW;
48225 return -EOVERFLOW;
48226 }
48227 +
48228 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48229 + return 0;
48230 +
48231 dirent = buf->previous;
48232 if (dirent) {
48233 if (__put_user(offset, &dirent->d_off))
48234 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
48235 buf.previous = NULL;
48236 buf.count = count;
48237 buf.error = 0;
48238 + buf.file = file;
48239
48240 error = vfs_readdir(file, compat_filldir, &buf);
48241 if (error >= 0)
48242 @@ -987,6 +999,7 @@ out:
48243 struct compat_getdents_callback64 {
48244 struct linux_dirent64 __user *current_dir;
48245 struct linux_dirent64 __user *previous;
48246 + struct file * file;
48247 int count;
48248 int error;
48249 };
48250 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
48251 buf->error = -EINVAL; /* only used if we fail.. */
48252 if (reclen > buf->count)
48253 return -EINVAL;
48254 +
48255 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48256 + return 0;
48257 +
48258 dirent = buf->previous;
48259
48260 if (dirent) {
48261 @@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
48262 buf.previous = NULL;
48263 buf.count = count;
48264 buf.error = 0;
48265 + buf.file = file;
48266
48267 error = vfs_readdir(file, compat_filldir64, &buf);
48268 if (error >= 0)
48269 error = buf.error;
48270 lastdirent = buf.previous;
48271 if (lastdirent) {
48272 - typeof(lastdirent->d_off) d_off = file->f_pos;
48273 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48274 if (__put_user_unaligned(d_off, &lastdirent->d_off))
48275 error = -EFAULT;
48276 else
48277 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
48278 * verify all the pointers
48279 */
48280 ret = -EINVAL;
48281 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
48282 + if (nr_segs > UIO_MAXIOV)
48283 goto out;
48284 if (!file->f_op)
48285 goto out;
48286 @@ -1454,6 +1472,10 @@ out:
48287 return ret;
48288 }
48289
48290 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48291 +extern atomic64_unchecked_t global_exec_counter;
48292 +#endif
48293 +
48294 /*
48295 * compat_do_execve() is mostly a copy of do_execve(), with the exception
48296 * that it processes 32 bit argv and envp pointers.
48297 @@ -1463,11 +1485,35 @@ int compat_do_execve(char * filename,
48298 compat_uptr_t __user *envp,
48299 struct pt_regs * regs)
48300 {
48301 +#ifdef CONFIG_GRKERNSEC
48302 + struct file *old_exec_file;
48303 + struct acl_subject_label *old_acl;
48304 + struct rlimit old_rlim[RLIM_NLIMITS];
48305 +#endif
48306 struct linux_binprm *bprm;
48307 struct file *file;
48308 struct files_struct *displaced;
48309 bool clear_in_exec;
48310 int retval;
48311 + const struct cred *cred = current_cred();
48312 +
48313 + /*
48314 + * We move the actual failure in case of RLIMIT_NPROC excess from
48315 + * set*uid() to execve() because too many poorly written programs
48316 + * don't check setuid() return code. Here we additionally recheck
48317 + * whether NPROC limit is still exceeded.
48318 + */
48319 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48320 +
48321 + if ((current->flags & PF_NPROC_EXCEEDED) &&
48322 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48323 + retval = -EAGAIN;
48324 + goto out_ret;
48325 + }
48326 +
48327 + /* We're below the limit (still or again), so we don't want to make
48328 + * further execve() calls fail. */
48329 + current->flags &= ~PF_NPROC_EXCEEDED;
48330
48331 retval = unshare_files(&displaced);
48332 if (retval)
48333 @@ -1493,12 +1539,26 @@ int compat_do_execve(char * filename,
48334 if (IS_ERR(file))
48335 goto out_unmark;
48336
48337 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
48338 + retval = -EPERM;
48339 + goto out_file;
48340 + }
48341 +
48342 sched_exec();
48343
48344 bprm->file = file;
48345 bprm->filename = filename;
48346 bprm->interp = filename;
48347
48348 + if (gr_process_user_ban()) {
48349 + retval = -EPERM;
48350 + goto out_file;
48351 + }
48352 +
48353 + retval = -EACCES;
48354 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
48355 + goto out_file;
48356 +
48357 retval = bprm_mm_init(bprm);
48358 if (retval)
48359 goto out_file;
48360 @@ -1528,11 +1588,45 @@ int compat_do_execve(char * filename,
48361 if (retval < 0)
48362 goto out;
48363
48364 + if (!gr_tpe_allow(file)) {
48365 + retval = -EACCES;
48366 + goto out;
48367 + }
48368 +
48369 + if (gr_check_crash_exec(file)) {
48370 + retval = -EACCES;
48371 + goto out;
48372 + }
48373 +
48374 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48375 +
48376 + gr_handle_exec_args_compat(bprm, argv);
48377 +
48378 +#ifdef CONFIG_GRKERNSEC
48379 + old_acl = current->acl;
48380 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48381 + old_exec_file = current->exec_file;
48382 + get_file(file);
48383 + current->exec_file = file;
48384 +#endif
48385 +
48386 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48387 + bprm->unsafe);
48388 + if (retval < 0)
48389 + goto out_fail;
48390 +
48391 retval = search_binary_handler(bprm, regs);
48392 if (retval < 0)
48393 - goto out;
48394 + goto out_fail;
48395 +#ifdef CONFIG_GRKERNSEC
48396 + if (old_exec_file)
48397 + fput(old_exec_file);
48398 +#endif
48399
48400 /* execve succeeded */
48401 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48402 + current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
48403 +#endif
48404 current->fs->in_exec = 0;
48405 current->in_execve = 0;
48406 acct_update_integrals(current);
48407 @@ -1541,6 +1635,14 @@ int compat_do_execve(char * filename,
48408 put_files_struct(displaced);
48409 return retval;
48410
48411 +out_fail:
48412 +#ifdef CONFIG_GRKERNSEC
48413 + current->acl = old_acl;
48414 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48415 + fput(current->exec_file);
48416 + current->exec_file = old_exec_file;
48417 +#endif
48418 +
48419 out:
48420 if (bprm->mm) {
48421 acct_arg_size(bprm, 0);
48422 @@ -1711,6 +1813,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
48423 struct fdtable *fdt;
48424 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
48425
48426 + pax_track_stack();
48427 +
48428 if (n < 0)
48429 goto out_nofds;
48430
48431 @@ -2151,7 +2255,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
48432 oldfs = get_fs();
48433 set_fs(KERNEL_DS);
48434 /* The __user pointer casts are valid because of the set_fs() */
48435 - err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
48436 + err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
48437 set_fs(oldfs);
48438
48439 if (err)
48440 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
48441 index 0adced2..bbb1b0d 100644
48442 --- a/fs/compat_binfmt_elf.c
48443 +++ b/fs/compat_binfmt_elf.c
48444 @@ -29,10 +29,12 @@
48445 #undef elfhdr
48446 #undef elf_phdr
48447 #undef elf_note
48448 +#undef elf_dyn
48449 #undef elf_addr_t
48450 #define elfhdr elf32_hdr
48451 #define elf_phdr elf32_phdr
48452 #define elf_note elf32_note
48453 +#define elf_dyn Elf32_Dyn
48454 #define elf_addr_t Elf32_Addr
48455
48456 /*
48457 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
48458 index d84e705..d8c364c 100644
48459 --- a/fs/compat_ioctl.c
48460 +++ b/fs/compat_ioctl.c
48461 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
48462 up = (struct compat_video_spu_palette __user *) arg;
48463 err = get_user(palp, &up->palette);
48464 err |= get_user(length, &up->length);
48465 + if (err)
48466 + return -EFAULT;
48467
48468 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
48469 err = put_user(compat_ptr(palp), &up_native->palette);
48470 @@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
48471 return -EFAULT;
48472 if (__get_user(udata, &ss32->iomem_base))
48473 return -EFAULT;
48474 - ss.iomem_base = compat_ptr(udata);
48475 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
48476 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
48477 __get_user(ss.port_high, &ss32->port_high))
48478 return -EFAULT;
48479 @@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
48480 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
48481 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
48482 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
48483 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48484 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48485 return -EFAULT;
48486
48487 return ioctl_preallocate(file, p);
48488 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
48489 index 8e48b52..f01ed91 100644
48490 --- a/fs/configfs/dir.c
48491 +++ b/fs/configfs/dir.c
48492 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48493 }
48494 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
48495 struct configfs_dirent *next;
48496 - const char * name;
48497 + const unsigned char * name;
48498 + char d_name[sizeof(next->s_dentry->d_iname)];
48499 int len;
48500
48501 next = list_entry(p, struct configfs_dirent,
48502 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48503 continue;
48504
48505 name = configfs_get_name(next);
48506 - len = strlen(name);
48507 + if (next->s_dentry && name == next->s_dentry->d_iname) {
48508 + len = next->s_dentry->d_name.len;
48509 + memcpy(d_name, name, len);
48510 + name = d_name;
48511 + } else
48512 + len = strlen(name);
48513 if (next->s_dentry)
48514 ino = next->s_dentry->d_inode->i_ino;
48515 else
48516 diff --git a/fs/dcache.c b/fs/dcache.c
48517 index 44c0aea..2529092 100644
48518 --- a/fs/dcache.c
48519 +++ b/fs/dcache.c
48520 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
48521
48522 static struct kmem_cache *dentry_cache __read_mostly;
48523
48524 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
48525 -
48526 /*
48527 * This is the single most critical data structure when it comes
48528 * to the dcache: the hashtable for lookups. Somebody should try
48529 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
48530 mempages -= reserve;
48531
48532 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
48533 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
48534 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
48535
48536 dcache_init();
48537 inode_init();
48538 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
48539 index 39c6ee8..dcee0f1 100644
48540 --- a/fs/debugfs/inode.c
48541 +++ b/fs/debugfs/inode.c
48542 @@ -269,7 +269,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
48543 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
48544 {
48545 return debugfs_create_file(name,
48546 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
48547 + S_IFDIR | S_IRWXU,
48548 +#else
48549 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
48550 +#endif
48551 parent, NULL, NULL);
48552 }
48553 EXPORT_SYMBOL_GPL(debugfs_create_dir);
48554 diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
48555 index c010ecf..a8d8c59 100644
48556 --- a/fs/dlm/lockspace.c
48557 +++ b/fs/dlm/lockspace.c
48558 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
48559 kfree(ls);
48560 }
48561
48562 -static struct sysfs_ops dlm_attr_ops = {
48563 +static const struct sysfs_ops dlm_attr_ops = {
48564 .show = dlm_attr_show,
48565 .store = dlm_attr_store,
48566 };
48567 diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
48568 index 7a5f1ac..205b034 100644
48569 --- a/fs/ecryptfs/crypto.c
48570 +++ b/fs/ecryptfs/crypto.c
48571 @@ -418,17 +418,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48572 rc);
48573 goto out;
48574 }
48575 - if (unlikely(ecryptfs_verbosity > 0)) {
48576 - ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
48577 - "with iv:\n");
48578 - ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48579 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48580 - "encryption:\n");
48581 - ecryptfs_dump_hex((char *)
48582 - (page_address(page)
48583 - + (extent_offset * crypt_stat->extent_size)),
48584 - 8);
48585 - }
48586 rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
48587 page, (extent_offset
48588 * crypt_stat->extent_size),
48589 @@ -441,14 +430,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48590 goto out;
48591 }
48592 rc = 0;
48593 - if (unlikely(ecryptfs_verbosity > 0)) {
48594 - ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; "
48595 - "rc = [%d]\n", (extent_base + extent_offset),
48596 - rc);
48597 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
48598 - "encryption:\n");
48599 - ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
48600 - }
48601 out:
48602 return rc;
48603 }
48604 @@ -545,17 +526,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
48605 rc);
48606 goto out;
48607 }
48608 - if (unlikely(ecryptfs_verbosity > 0)) {
48609 - ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
48610 - "with iv:\n");
48611 - ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48612 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48613 - "decryption:\n");
48614 - ecryptfs_dump_hex((char *)
48615 - (page_address(enc_extent_page)
48616 - + (extent_offset * crypt_stat->extent_size)),
48617 - 8);
48618 - }
48619 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
48620 (extent_offset
48621 * crypt_stat->extent_size),
48622 @@ -569,16 +539,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
48623 goto out;
48624 }
48625 rc = 0;
48626 - if (unlikely(ecryptfs_verbosity > 0)) {
48627 - ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16x]; "
48628 - "rc = [%d]\n", (extent_base + extent_offset),
48629 - rc);
48630 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
48631 - "decryption:\n");
48632 - ecryptfs_dump_hex((char *)(page_address(page)
48633 - + (extent_offset
48634 - * crypt_stat->extent_size)), 8);
48635 - }
48636 out:
48637 return rc;
48638 }
48639 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
48640 index 88ba4d4..073f003 100644
48641 --- a/fs/ecryptfs/inode.c
48642 +++ b/fs/ecryptfs/inode.c
48643 @@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
48644 old_fs = get_fs();
48645 set_fs(get_ds());
48646 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
48647 - (char __user *)lower_buf,
48648 + (char __force_user *)lower_buf,
48649 lower_bufsiz);
48650 set_fs(old_fs);
48651 if (rc < 0)
48652 @@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48653 }
48654 old_fs = get_fs();
48655 set_fs(get_ds());
48656 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
48657 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
48658 set_fs(old_fs);
48659 if (rc < 0)
48660 goto out_free;
48661 diff --git a/fs/exec.c b/fs/exec.c
48662 index 86fafc6..6272c0e 100644
48663 --- a/fs/exec.c
48664 +++ b/fs/exec.c
48665 @@ -56,12 +56,28 @@
48666 #include <linux/fsnotify.h>
48667 #include <linux/fs_struct.h>
48668 #include <linux/pipe_fs_i.h>
48669 +#include <linux/random.h>
48670 +#include <linux/seq_file.h>
48671 +
48672 +#ifdef CONFIG_PAX_REFCOUNT
48673 +#include <linux/kallsyms.h>
48674 +#include <linux/kdebug.h>
48675 +#endif
48676
48677 #include <asm/uaccess.h>
48678 #include <asm/mmu_context.h>
48679 #include <asm/tlb.h>
48680 #include "internal.h"
48681
48682 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
48683 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
48684 +#endif
48685 +
48686 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
48687 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
48688 +EXPORT_SYMBOL(pax_set_initial_flags_func);
48689 +#endif
48690 +
48691 int core_uses_pid;
48692 char core_pattern[CORENAME_MAX_SIZE] = "core";
48693 unsigned int core_pipe_limit;
48694 @@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
48695 int write)
48696 {
48697 struct page *page;
48698 - int ret;
48699
48700 -#ifdef CONFIG_STACK_GROWSUP
48701 - if (write) {
48702 - ret = expand_stack_downwards(bprm->vma, pos);
48703 - if (ret < 0)
48704 - return NULL;
48705 - }
48706 -#endif
48707 - ret = get_user_pages(current, bprm->mm, pos,
48708 - 1, write, 1, &page, NULL);
48709 - if (ret <= 0)
48710 + if (0 > expand_stack_downwards(bprm->vma, pos))
48711 + return NULL;
48712 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
48713 return NULL;
48714
48715 if (write) {
48716 @@ -205,6 +213,17 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
48717 if (size <= ARG_MAX)
48718 return page;
48719
48720 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48721 + // only allow 1MB for argv+env on suid/sgid binaries
48722 + // to prevent easy ASLR exhaustion
48723 + if (((bprm->cred->euid != current_euid()) ||
48724 + (bprm->cred->egid != current_egid())) &&
48725 + (size > (1024 * 1024))) {
48726 + put_page(page);
48727 + return NULL;
48728 + }
48729 +#endif
48730 +
48731 /*
48732 * Limit to 1/4-th the stack size for the argv+env strings.
48733 * This ensures that:
48734 @@ -263,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48735 vma->vm_end = STACK_TOP_MAX;
48736 vma->vm_start = vma->vm_end - PAGE_SIZE;
48737 vma->vm_flags = VM_STACK_FLAGS;
48738 +
48739 +#ifdef CONFIG_PAX_SEGMEXEC
48740 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
48741 +#endif
48742 +
48743 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
48744
48745 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
48746 @@ -276,6 +300,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48747 mm->stack_vm = mm->total_vm = 1;
48748 up_write(&mm->mmap_sem);
48749 bprm->p = vma->vm_end - sizeof(void *);
48750 +
48751 +#ifdef CONFIG_PAX_RANDUSTACK
48752 + if (randomize_va_space)
48753 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
48754 +#endif
48755 +
48756 return 0;
48757 err:
48758 up_write(&mm->mmap_sem);
48759 @@ -510,7 +540,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
48760 int r;
48761 mm_segment_t oldfs = get_fs();
48762 set_fs(KERNEL_DS);
48763 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
48764 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
48765 set_fs(oldfs);
48766 return r;
48767 }
48768 @@ -540,7 +570,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48769 unsigned long new_end = old_end - shift;
48770 struct mmu_gather *tlb;
48771
48772 - BUG_ON(new_start > new_end);
48773 + if (new_start >= new_end || new_start < mmap_min_addr)
48774 + return -ENOMEM;
48775
48776 /*
48777 * ensure there are no vmas between where we want to go
48778 @@ -549,6 +580,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48779 if (vma != find_vma(mm, new_start))
48780 return -EFAULT;
48781
48782 +#ifdef CONFIG_PAX_SEGMEXEC
48783 + BUG_ON(pax_find_mirror_vma(vma));
48784 +#endif
48785 +
48786 /*
48787 * cover the whole range: [new_start, old_end)
48788 */
48789 @@ -630,10 +665,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
48790 stack_top = arch_align_stack(stack_top);
48791 stack_top = PAGE_ALIGN(stack_top);
48792
48793 - if (unlikely(stack_top < mmap_min_addr) ||
48794 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
48795 - return -ENOMEM;
48796 -
48797 stack_shift = vma->vm_end - stack_top;
48798
48799 bprm->p -= stack_shift;
48800 @@ -645,6 +676,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
48801 bprm->exec -= stack_shift;
48802
48803 down_write(&mm->mmap_sem);
48804 +
48805 + /* Move stack pages down in memory. */
48806 + if (stack_shift) {
48807 + ret = shift_arg_pages(vma, stack_shift);
48808 + if (ret)
48809 + goto out_unlock;
48810 + }
48811 +
48812 vm_flags = VM_STACK_FLAGS;
48813
48814 /*
48815 @@ -658,19 +697,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
48816 vm_flags &= ~VM_EXEC;
48817 vm_flags |= mm->def_flags;
48818
48819 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48820 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48821 + vm_flags &= ~VM_EXEC;
48822 +
48823 +#ifdef CONFIG_PAX_MPROTECT
48824 + if (mm->pax_flags & MF_PAX_MPROTECT)
48825 + vm_flags &= ~VM_MAYEXEC;
48826 +#endif
48827 +
48828 + }
48829 +#endif
48830 +
48831 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
48832 vm_flags);
48833 if (ret)
48834 goto out_unlock;
48835 BUG_ON(prev != vma);
48836
48837 - /* Move stack pages down in memory. */
48838 - if (stack_shift) {
48839 - ret = shift_arg_pages(vma, stack_shift);
48840 - if (ret)
48841 - goto out_unlock;
48842 - }
48843 -
48844 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
48845 stack_size = vma->vm_end - vma->vm_start;
48846 /*
48847 @@ -744,7 +788,7 @@ int kernel_read(struct file *file, loff_t offset,
48848 old_fs = get_fs();
48849 set_fs(get_ds());
48850 /* The cast to a user pointer is valid due to the set_fs() */
48851 - result = vfs_read(file, (void __user *)addr, count, &pos);
48852 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
48853 set_fs(old_fs);
48854 return result;
48855 }
48856 @@ -985,6 +1029,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
48857 perf_event_comm(tsk);
48858 }
48859
48860 +static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
48861 +{
48862 + int i, ch;
48863 +
48864 + /* Copies the binary name from after last slash */
48865 + for (i = 0; (ch = *(fn++)) != '\0';) {
48866 + if (ch == '/')
48867 + i = 0; /* overwrite what we wrote */
48868 + else
48869 + if (i < len - 1)
48870 + tcomm[i++] = ch;
48871 + }
48872 + tcomm[i] = '\0';
48873 +}
48874 +
48875 int flush_old_exec(struct linux_binprm * bprm)
48876 {
48877 int retval;
48878 @@ -999,6 +1058,7 @@ int flush_old_exec(struct linux_binprm * bprm)
48879
48880 set_mm_exe_file(bprm->mm, bprm->file);
48881
48882 + filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
48883 /*
48884 * Release all of the old mmap stuff
48885 */
48886 @@ -1023,10 +1083,6 @@ EXPORT_SYMBOL(flush_old_exec);
48887
48888 void setup_new_exec(struct linux_binprm * bprm)
48889 {
48890 - int i, ch;
48891 - char * name;
48892 - char tcomm[sizeof(current->comm)];
48893 -
48894 arch_pick_mmap_layout(current->mm);
48895
48896 /* This is the point of no return */
48897 @@ -1037,18 +1093,7 @@ void setup_new_exec(struct linux_binprm * bprm)
48898 else
48899 set_dumpable(current->mm, suid_dumpable);
48900
48901 - name = bprm->filename;
48902 -
48903 - /* Copies the binary name from after last slash */
48904 - for (i=0; (ch = *(name++)) != '\0';) {
48905 - if (ch == '/')
48906 - i = 0; /* overwrite what we wrote */
48907 - else
48908 - if (i < (sizeof(tcomm) - 1))
48909 - tcomm[i++] = ch;
48910 - }
48911 - tcomm[i] = '\0';
48912 - set_task_comm(current, tcomm);
48913 + set_task_comm(current, bprm->tcomm);
48914
48915 /* Set the new mm task size. We have to do that late because it may
48916 * depend on TIF_32BIT which is only updated in flush_thread() on
48917 @@ -1152,7 +1197,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
48918 }
48919 rcu_read_unlock();
48920
48921 - if (p->fs->users > n_fs) {
48922 + if (atomic_read(&p->fs->users) > n_fs) {
48923 bprm->unsafe |= LSM_UNSAFE_SHARE;
48924 } else {
48925 res = -EAGAIN;
48926 @@ -1339,6 +1384,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
48927
48928 EXPORT_SYMBOL(search_binary_handler);
48929
48930 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48931 +atomic64_unchecked_t global_exec_counter = ATOMIC64_INIT(0);
48932 +#endif
48933 +
48934 /*
48935 * sys_execve() executes a new program.
48936 */
48937 @@ -1347,11 +1396,35 @@ int do_execve(char * filename,
48938 char __user *__user *envp,
48939 struct pt_regs * regs)
48940 {
48941 +#ifdef CONFIG_GRKERNSEC
48942 + struct file *old_exec_file;
48943 + struct acl_subject_label *old_acl;
48944 + struct rlimit old_rlim[RLIM_NLIMITS];
48945 +#endif
48946 struct linux_binprm *bprm;
48947 struct file *file;
48948 struct files_struct *displaced;
48949 bool clear_in_exec;
48950 int retval;
48951 + const struct cred *cred = current_cred();
48952 +
48953 + /*
48954 + * We move the actual failure in case of RLIMIT_NPROC excess from
48955 + * set*uid() to execve() because too many poorly written programs
48956 + * don't check setuid() return code. Here we additionally recheck
48957 + * whether NPROC limit is still exceeded.
48958 + */
48959 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48960 +
48961 + if ((current->flags & PF_NPROC_EXCEEDED) &&
48962 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48963 + retval = -EAGAIN;
48964 + goto out_ret;
48965 + }
48966 +
48967 + /* We're below the limit (still or again), so we don't want to make
48968 + * further execve() calls fail. */
48969 + current->flags &= ~PF_NPROC_EXCEEDED;
48970
48971 retval = unshare_files(&displaced);
48972 if (retval)
48973 @@ -1377,12 +1450,27 @@ int do_execve(char * filename,
48974 if (IS_ERR(file))
48975 goto out_unmark;
48976
48977 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
48978 + retval = -EPERM;
48979 + goto out_file;
48980 + }
48981 +
48982 sched_exec();
48983
48984 bprm->file = file;
48985 bprm->filename = filename;
48986 bprm->interp = filename;
48987
48988 + if (gr_process_user_ban()) {
48989 + retval = -EPERM;
48990 + goto out_file;
48991 + }
48992 +
48993 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
48994 + retval = -EACCES;
48995 + goto out_file;
48996 + }
48997 +
48998 retval = bprm_mm_init(bprm);
48999 if (retval)
49000 goto out_file;
49001 @@ -1412,12 +1500,47 @@ int do_execve(char * filename,
49002 if (retval < 0)
49003 goto out;
49004
49005 + if (!gr_tpe_allow(file)) {
49006 + retval = -EACCES;
49007 + goto out;
49008 + }
49009 +
49010 + if (gr_check_crash_exec(file)) {
49011 + retval = -EACCES;
49012 + goto out;
49013 + }
49014 +
49015 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
49016 +
49017 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
49018 +
49019 +#ifdef CONFIG_GRKERNSEC
49020 + old_acl = current->acl;
49021 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
49022 + old_exec_file = current->exec_file;
49023 + get_file(file);
49024 + current->exec_file = file;
49025 +#endif
49026 +
49027 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
49028 + bprm->unsafe);
49029 + if (retval < 0)
49030 + goto out_fail;
49031 +
49032 current->flags &= ~PF_KTHREAD;
49033 retval = search_binary_handler(bprm,regs);
49034 if (retval < 0)
49035 - goto out;
49036 + goto out_fail;
49037 +#ifdef CONFIG_GRKERNSEC
49038 + if (old_exec_file)
49039 + fput(old_exec_file);
49040 +#endif
49041
49042 /* execve succeeded */
49043 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49044 + current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
49045 +#endif
49046 +
49047 current->fs->in_exec = 0;
49048 current->in_execve = 0;
49049 acct_update_integrals(current);
49050 @@ -1426,6 +1549,14 @@ int do_execve(char * filename,
49051 put_files_struct(displaced);
49052 return retval;
49053
49054 +out_fail:
49055 +#ifdef CONFIG_GRKERNSEC
49056 + current->acl = old_acl;
49057 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
49058 + fput(current->exec_file);
49059 + current->exec_file = old_exec_file;
49060 +#endif
49061 +
49062 out:
49063 if (bprm->mm) {
49064 acct_arg_size(bprm, 0);
49065 @@ -1591,6 +1722,220 @@ out:
49066 return ispipe;
49067 }
49068
49069 +int pax_check_flags(unsigned long *flags)
49070 +{
49071 + int retval = 0;
49072 +
49073 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
49074 + if (*flags & MF_PAX_SEGMEXEC)
49075 + {
49076 + *flags &= ~MF_PAX_SEGMEXEC;
49077 + retval = -EINVAL;
49078 + }
49079 +#endif
49080 +
49081 + if ((*flags & MF_PAX_PAGEEXEC)
49082 +
49083 +#ifdef CONFIG_PAX_PAGEEXEC
49084 + && (*flags & MF_PAX_SEGMEXEC)
49085 +#endif
49086 +
49087 + )
49088 + {
49089 + *flags &= ~MF_PAX_PAGEEXEC;
49090 + retval = -EINVAL;
49091 + }
49092 +
49093 + if ((*flags & MF_PAX_MPROTECT)
49094 +
49095 +#ifdef CONFIG_PAX_MPROTECT
49096 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
49097 +#endif
49098 +
49099 + )
49100 + {
49101 + *flags &= ~MF_PAX_MPROTECT;
49102 + retval = -EINVAL;
49103 + }
49104 +
49105 + if ((*flags & MF_PAX_EMUTRAMP)
49106 +
49107 +#ifdef CONFIG_PAX_EMUTRAMP
49108 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
49109 +#endif
49110 +
49111 + )
49112 + {
49113 + *flags &= ~MF_PAX_EMUTRAMP;
49114 + retval = -EINVAL;
49115 + }
49116 +
49117 + return retval;
49118 +}
49119 +
49120 +EXPORT_SYMBOL(pax_check_flags);
49121 +
49122 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
49123 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
49124 +{
49125 + struct task_struct *tsk = current;
49126 + struct mm_struct *mm = current->mm;
49127 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
49128 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
49129 + char *path_exec = NULL;
49130 + char *path_fault = NULL;
49131 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
49132 +
49133 + if (buffer_exec && buffer_fault) {
49134 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
49135 +
49136 + down_read(&mm->mmap_sem);
49137 + vma = mm->mmap;
49138 + while (vma && (!vma_exec || !vma_fault)) {
49139 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
49140 + vma_exec = vma;
49141 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
49142 + vma_fault = vma;
49143 + vma = vma->vm_next;
49144 + }
49145 + if (vma_exec) {
49146 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
49147 + if (IS_ERR(path_exec))
49148 + path_exec = "<path too long>";
49149 + else {
49150 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
49151 + if (path_exec) {
49152 + *path_exec = 0;
49153 + path_exec = buffer_exec;
49154 + } else
49155 + path_exec = "<path too long>";
49156 + }
49157 + }
49158 + if (vma_fault) {
49159 + start = vma_fault->vm_start;
49160 + end = vma_fault->vm_end;
49161 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
49162 + if (vma_fault->vm_file) {
49163 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
49164 + if (IS_ERR(path_fault))
49165 + path_fault = "<path too long>";
49166 + else {
49167 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
49168 + if (path_fault) {
49169 + *path_fault = 0;
49170 + path_fault = buffer_fault;
49171 + } else
49172 + path_fault = "<path too long>";
49173 + }
49174 + } else
49175 + path_fault = "<anonymous mapping>";
49176 + }
49177 + up_read(&mm->mmap_sem);
49178 + }
49179 + if (tsk->signal->curr_ip)
49180 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
49181 + else
49182 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
49183 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
49184 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
49185 + task_uid(tsk), task_euid(tsk), pc, sp);
49186 + free_page((unsigned long)buffer_exec);
49187 + free_page((unsigned long)buffer_fault);
49188 + pax_report_insns(regs, pc, sp);
49189 + do_coredump(SIGKILL, SIGKILL, regs);
49190 +}
49191 +#endif
49192 +
49193 +#ifdef CONFIG_PAX_REFCOUNT
49194 +void pax_report_refcount_overflow(struct pt_regs *regs)
49195 +{
49196 + if (current->signal->curr_ip)
49197 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
49198 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
49199 + else
49200 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
49201 + current->comm, task_pid_nr(current), current_uid(), current_euid());
49202 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
49203 + show_regs(regs);
49204 + force_sig_specific(SIGKILL, current);
49205 +}
49206 +#endif
49207 +
49208 +#ifdef CONFIG_PAX_USERCOPY
49209 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
49210 +int object_is_on_stack(const void *obj, unsigned long len)
49211 +{
49212 + const void * const stack = task_stack_page(current);
49213 + const void * const stackend = stack + THREAD_SIZE;
49214 +
49215 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
49216 + const void *frame = NULL;
49217 + const void *oldframe;
49218 +#endif
49219 +
49220 + if (obj + len < obj)
49221 + return -1;
49222 +
49223 + if (obj + len <= stack || stackend <= obj)
49224 + return 0;
49225 +
49226 + if (obj < stack || stackend < obj + len)
49227 + return -1;
49228 +
49229 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
49230 + oldframe = __builtin_frame_address(1);
49231 + if (oldframe)
49232 + frame = __builtin_frame_address(2);
49233 + /*
49234 + low ----------------------------------------------> high
49235 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
49236 + ^----------------^
49237 + allow copies only within here
49238 + */
49239 + while (stack <= frame && frame < stackend) {
49240 + /* if obj + len extends past the last frame, this
49241 + check won't pass and the next frame will be 0,
49242 + causing us to bail out and correctly report
49243 + the copy as invalid
49244 + */
49245 + if (obj + len <= frame)
49246 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
49247 + oldframe = frame;
49248 + frame = *(const void * const *)frame;
49249 + }
49250 + return -1;
49251 +#else
49252 + return 1;
49253 +#endif
49254 +}
49255 +
49256 +
49257 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
49258 +{
49259 + if (current->signal->curr_ip)
49260 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
49261 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
49262 + else
49263 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
49264 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
49265 +
49266 + dump_stack();
49267 + gr_handle_kernel_exploit();
49268 + do_group_exit(SIGKILL);
49269 +}
49270 +#endif
49271 +
49272 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
49273 +void pax_track_stack(void)
49274 +{
49275 + unsigned long sp = (unsigned long)&sp;
49276 + if (sp < current_thread_info()->lowest_stack &&
49277 + sp > (unsigned long)task_stack_page(current))
49278 + current_thread_info()->lowest_stack = sp;
49279 +}
49280 +EXPORT_SYMBOL(pax_track_stack);
49281 +#endif
49282 +
49283 static int zap_process(struct task_struct *start)
49284 {
49285 struct task_struct *t;
49286 @@ -1793,17 +2138,17 @@ static void wait_for_dump_helpers(struct file *file)
49287 pipe = file->f_path.dentry->d_inode->i_pipe;
49288
49289 pipe_lock(pipe);
49290 - pipe->readers++;
49291 - pipe->writers--;
49292 + atomic_inc(&pipe->readers);
49293 + atomic_dec(&pipe->writers);
49294
49295 - while ((pipe->readers > 1) && (!signal_pending(current))) {
49296 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
49297 wake_up_interruptible_sync(&pipe->wait);
49298 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
49299 pipe_wait(pipe);
49300 }
49301
49302 - pipe->readers--;
49303 - pipe->writers++;
49304 + atomic_dec(&pipe->readers);
49305 + atomic_inc(&pipe->writers);
49306 pipe_unlock(pipe);
49307
49308 }
49309 @@ -1826,10 +2171,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49310 char **helper_argv = NULL;
49311 int helper_argc = 0;
49312 int dump_count = 0;
49313 - static atomic_t core_dump_count = ATOMIC_INIT(0);
49314 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
49315
49316 audit_core_dumps(signr);
49317
49318 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
49319 + gr_handle_brute_attach(current, mm->flags);
49320 +
49321 binfmt = mm->binfmt;
49322 if (!binfmt || !binfmt->core_dump)
49323 goto fail;
49324 @@ -1874,6 +2222,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49325 */
49326 clear_thread_flag(TIF_SIGPENDING);
49327
49328 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
49329 +
49330 /*
49331 * lock_kernel() because format_corename() is controlled by sysctl, which
49332 * uses lock_kernel()
49333 @@ -1908,7 +2258,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49334 goto fail_unlock;
49335 }
49336
49337 - dump_count = atomic_inc_return(&core_dump_count);
49338 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
49339 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
49340 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
49341 task_tgid_vnr(current), current->comm);
49342 @@ -1972,7 +2322,7 @@ close_fail:
49343 filp_close(file, NULL);
49344 fail_dropcount:
49345 if (dump_count)
49346 - atomic_dec(&core_dump_count);
49347 + atomic_dec_unchecked(&core_dump_count);
49348 fail_unlock:
49349 if (helper_argv)
49350 argv_free(helper_argv);
49351 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
49352 index 7f8d2e5..a1abdbb 100644
49353 --- a/fs/ext2/balloc.c
49354 +++ b/fs/ext2/balloc.c
49355 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
49356
49357 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49358 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49359 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49360 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49361 sbi->s_resuid != current_fsuid() &&
49362 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49363 return 0;
49364 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
49365 index 27967f9..9f2a5fb 100644
49366 --- a/fs/ext3/balloc.c
49367 +++ b/fs/ext3/balloc.c
49368 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
49369
49370 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49371 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49372 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49373 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49374 sbi->s_resuid != current_fsuid() &&
49375 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49376 return 0;
49377 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
49378 index e85b63c..80398e6 100644
49379 --- a/fs/ext4/balloc.c
49380 +++ b/fs/ext4/balloc.c
49381 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
49382 /* Hm, nope. Are (enough) root reserved blocks available? */
49383 if (sbi->s_resuid == current_fsuid() ||
49384 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
49385 - capable(CAP_SYS_RESOURCE)) {
49386 + capable_nolog(CAP_SYS_RESOURCE)) {
49387 if (free_blocks >= (nblocks + dirty_blocks))
49388 return 1;
49389 }
49390 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
49391 index 67c46ed..1f237e5 100644
49392 --- a/fs/ext4/ext4.h
49393 +++ b/fs/ext4/ext4.h
49394 @@ -1077,19 +1077,19 @@ struct ext4_sb_info {
49395
49396 /* stats for buddy allocator */
49397 spinlock_t s_mb_pa_lock;
49398 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
49399 - atomic_t s_bal_success; /* we found long enough chunks */
49400 - atomic_t s_bal_allocated; /* in blocks */
49401 - atomic_t s_bal_ex_scanned; /* total extents scanned */
49402 - atomic_t s_bal_goals; /* goal hits */
49403 - atomic_t s_bal_breaks; /* too long searches */
49404 - atomic_t s_bal_2orders; /* 2^order hits */
49405 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
49406 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
49407 + atomic_unchecked_t s_bal_allocated; /* in blocks */
49408 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
49409 + atomic_unchecked_t s_bal_goals; /* goal hits */
49410 + atomic_unchecked_t s_bal_breaks; /* too long searches */
49411 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
49412 spinlock_t s_bal_lock;
49413 unsigned long s_mb_buddies_generated;
49414 unsigned long long s_mb_generation_time;
49415 - atomic_t s_mb_lost_chunks;
49416 - atomic_t s_mb_preallocated;
49417 - atomic_t s_mb_discarded;
49418 + atomic_unchecked_t s_mb_lost_chunks;
49419 + atomic_unchecked_t s_mb_preallocated;
49420 + atomic_unchecked_t s_mb_discarded;
49421 atomic_t s_lock_busy;
49422
49423 /* locality groups */
49424 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
49425 index 2a60541..7439d61 100644
49426 --- a/fs/ext4/file.c
49427 +++ b/fs/ext4/file.c
49428 @@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
49429 cp = d_path(&path, buf, sizeof(buf));
49430 path_put(&path);
49431 if (!IS_ERR(cp)) {
49432 - memcpy(sbi->s_es->s_last_mounted, cp,
49433 - sizeof(sbi->s_es->s_last_mounted));
49434 + strlcpy(sbi->s_es->s_last_mounted, cp,
49435 + sizeof(sbi->s_es->s_last_mounted));
49436 sb->s_dirt = 1;
49437 }
49438 }
49439 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
49440 index 42bac1b..0aab9d8 100644
49441 --- a/fs/ext4/mballoc.c
49442 +++ b/fs/ext4/mballoc.c
49443 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
49444 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
49445
49446 if (EXT4_SB(sb)->s_mb_stats)
49447 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
49448 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
49449
49450 break;
49451 }
49452 @@ -2131,7 +2131,7 @@ repeat:
49453 ac->ac_status = AC_STATUS_CONTINUE;
49454 ac->ac_flags |= EXT4_MB_HINT_FIRST;
49455 cr = 3;
49456 - atomic_inc(&sbi->s_mb_lost_chunks);
49457 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
49458 goto repeat;
49459 }
49460 }
49461 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
49462 ext4_grpblk_t counters[16];
49463 } sg;
49464
49465 + pax_track_stack();
49466 +
49467 group--;
49468 if (group == 0)
49469 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
49470 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
49471 if (sbi->s_mb_stats) {
49472 printk(KERN_INFO
49473 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
49474 - atomic_read(&sbi->s_bal_allocated),
49475 - atomic_read(&sbi->s_bal_reqs),
49476 - atomic_read(&sbi->s_bal_success));
49477 + atomic_read_unchecked(&sbi->s_bal_allocated),
49478 + atomic_read_unchecked(&sbi->s_bal_reqs),
49479 + atomic_read_unchecked(&sbi->s_bal_success));
49480 printk(KERN_INFO
49481 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
49482 "%u 2^N hits, %u breaks, %u lost\n",
49483 - atomic_read(&sbi->s_bal_ex_scanned),
49484 - atomic_read(&sbi->s_bal_goals),
49485 - atomic_read(&sbi->s_bal_2orders),
49486 - atomic_read(&sbi->s_bal_breaks),
49487 - atomic_read(&sbi->s_mb_lost_chunks));
49488 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
49489 + atomic_read_unchecked(&sbi->s_bal_goals),
49490 + atomic_read_unchecked(&sbi->s_bal_2orders),
49491 + atomic_read_unchecked(&sbi->s_bal_breaks),
49492 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
49493 printk(KERN_INFO
49494 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
49495 sbi->s_mb_buddies_generated++,
49496 sbi->s_mb_generation_time);
49497 printk(KERN_INFO
49498 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
49499 - atomic_read(&sbi->s_mb_preallocated),
49500 - atomic_read(&sbi->s_mb_discarded));
49501 + atomic_read_unchecked(&sbi->s_mb_preallocated),
49502 + atomic_read_unchecked(&sbi->s_mb_discarded));
49503 }
49504
49505 free_percpu(sbi->s_locality_groups);
49506 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
49507 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
49508
49509 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
49510 - atomic_inc(&sbi->s_bal_reqs);
49511 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49512 + atomic_inc_unchecked(&sbi->s_bal_reqs);
49513 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49514 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
49515 - atomic_inc(&sbi->s_bal_success);
49516 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
49517 + atomic_inc_unchecked(&sbi->s_bal_success);
49518 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
49519 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
49520 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
49521 - atomic_inc(&sbi->s_bal_goals);
49522 + atomic_inc_unchecked(&sbi->s_bal_goals);
49523 if (ac->ac_found > sbi->s_mb_max_to_scan)
49524 - atomic_inc(&sbi->s_bal_breaks);
49525 + atomic_inc_unchecked(&sbi->s_bal_breaks);
49526 }
49527
49528 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
49529 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
49530 trace_ext4_mb_new_inode_pa(ac, pa);
49531
49532 ext4_mb_use_inode_pa(ac, pa);
49533 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49534 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49535
49536 ei = EXT4_I(ac->ac_inode);
49537 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49538 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
49539 trace_ext4_mb_new_group_pa(ac, pa);
49540
49541 ext4_mb_use_group_pa(ac, pa);
49542 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49543 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49544
49545 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49546 lg = ac->ac_lg;
49547 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
49548 * from the bitmap and continue.
49549 */
49550 }
49551 - atomic_add(free, &sbi->s_mb_discarded);
49552 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
49553
49554 return err;
49555 }
49556 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
49557 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
49558 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
49559 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
49560 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49561 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49562
49563 if (ac) {
49564 ac->ac_sb = sb;
49565 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
49566 index f1e7077..edd86b2 100644
49567 --- a/fs/ext4/super.c
49568 +++ b/fs/ext4/super.c
49569 @@ -2286,7 +2286,7 @@ static void ext4_sb_release(struct kobject *kobj)
49570 }
49571
49572
49573 -static struct sysfs_ops ext4_attr_ops = {
49574 +static const struct sysfs_ops ext4_attr_ops = {
49575 .show = ext4_attr_show,
49576 .store = ext4_attr_store,
49577 };
49578 diff --git a/fs/fcntl.c b/fs/fcntl.c
49579 index 97e01dc..e9aab2d 100644
49580 --- a/fs/fcntl.c
49581 +++ b/fs/fcntl.c
49582 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
49583 if (err)
49584 return err;
49585
49586 + if (gr_handle_chroot_fowner(pid, type))
49587 + return -ENOENT;
49588 + if (gr_check_protected_task_fowner(pid, type))
49589 + return -EACCES;
49590 +
49591 f_modown(filp, pid, type, force);
49592 return 0;
49593 }
49594 @@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
49595
49596 static int f_setown_ex(struct file *filp, unsigned long arg)
49597 {
49598 - struct f_owner_ex * __user owner_p = (void * __user)arg;
49599 + struct f_owner_ex __user *owner_p = (void __user *)arg;
49600 struct f_owner_ex owner;
49601 struct pid *pid;
49602 int type;
49603 @@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
49604
49605 static int f_getown_ex(struct file *filp, unsigned long arg)
49606 {
49607 - struct f_owner_ex * __user owner_p = (void * __user)arg;
49608 + struct f_owner_ex __user *owner_p = (void __user *)arg;
49609 struct f_owner_ex owner;
49610 int ret = 0;
49611
49612 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
49613 switch (cmd) {
49614 case F_DUPFD:
49615 case F_DUPFD_CLOEXEC:
49616 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
49617 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49618 break;
49619 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
49620 diff --git a/fs/fifo.c b/fs/fifo.c
49621 index f8f97b8..b1f2259 100644
49622 --- a/fs/fifo.c
49623 +++ b/fs/fifo.c
49624 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
49625 */
49626 filp->f_op = &read_pipefifo_fops;
49627 pipe->r_counter++;
49628 - if (pipe->readers++ == 0)
49629 + if (atomic_inc_return(&pipe->readers) == 1)
49630 wake_up_partner(inode);
49631
49632 - if (!pipe->writers) {
49633 + if (!atomic_read(&pipe->writers)) {
49634 if ((filp->f_flags & O_NONBLOCK)) {
49635 /* suppress POLLHUP until we have
49636 * seen a writer */
49637 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
49638 * errno=ENXIO when there is no process reading the FIFO.
49639 */
49640 ret = -ENXIO;
49641 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
49642 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
49643 goto err;
49644
49645 filp->f_op = &write_pipefifo_fops;
49646 pipe->w_counter++;
49647 - if (!pipe->writers++)
49648 + if (atomic_inc_return(&pipe->writers) == 1)
49649 wake_up_partner(inode);
49650
49651 - if (!pipe->readers) {
49652 + if (!atomic_read(&pipe->readers)) {
49653 wait_for_partner(inode, &pipe->r_counter);
49654 if (signal_pending(current))
49655 goto err_wr;
49656 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
49657 */
49658 filp->f_op = &rdwr_pipefifo_fops;
49659
49660 - pipe->readers++;
49661 - pipe->writers++;
49662 + atomic_inc(&pipe->readers);
49663 + atomic_inc(&pipe->writers);
49664 pipe->r_counter++;
49665 pipe->w_counter++;
49666 - if (pipe->readers == 1 || pipe->writers == 1)
49667 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
49668 wake_up_partner(inode);
49669 break;
49670
49671 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
49672 return 0;
49673
49674 err_rd:
49675 - if (!--pipe->readers)
49676 + if (atomic_dec_and_test(&pipe->readers))
49677 wake_up_interruptible(&pipe->wait);
49678 ret = -ERESTARTSYS;
49679 goto err;
49680
49681 err_wr:
49682 - if (!--pipe->writers)
49683 + if (atomic_dec_and_test(&pipe->writers))
49684 wake_up_interruptible(&pipe->wait);
49685 ret = -ERESTARTSYS;
49686 goto err;
49687
49688 err:
49689 - if (!pipe->readers && !pipe->writers)
49690 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
49691 free_pipe_info(inode);
49692
49693 err_nocleanup:
49694 diff --git a/fs/file.c b/fs/file.c
49695 index 87e1290..a930cc4 100644
49696 --- a/fs/file.c
49697 +++ b/fs/file.c
49698 @@ -14,6 +14,7 @@
49699 #include <linux/slab.h>
49700 #include <linux/vmalloc.h>
49701 #include <linux/file.h>
49702 +#include <linux/security.h>
49703 #include <linux/fdtable.h>
49704 #include <linux/bitops.h>
49705 #include <linux/interrupt.h>
49706 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
49707 * N.B. For clone tasks sharing a files structure, this test
49708 * will limit the total number of files that can be opened.
49709 */
49710 +
49711 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
49712 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49713 return -EMFILE;
49714
49715 diff --git a/fs/filesystems.c b/fs/filesystems.c
49716 index a24c58e..53f91ee 100644
49717 --- a/fs/filesystems.c
49718 +++ b/fs/filesystems.c
49719 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
49720 int len = dot ? dot - name : strlen(name);
49721
49722 fs = __get_fs_type(name, len);
49723 +
49724 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
49725 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
49726 +#else
49727 if (!fs && (request_module("%.*s", len, name) == 0))
49728 +#endif
49729 fs = __get_fs_type(name, len);
49730
49731 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
49732 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
49733 index eee0590..1181166 100644
49734 --- a/fs/fs_struct.c
49735 +++ b/fs/fs_struct.c
49736 @@ -4,6 +4,7 @@
49737 #include <linux/path.h>
49738 #include <linux/slab.h>
49739 #include <linux/fs_struct.h>
49740 +#include <linux/grsecurity.h>
49741
49742 /*
49743 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
49744 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
49745 old_root = fs->root;
49746 fs->root = *path;
49747 path_get(path);
49748 + gr_set_chroot_entries(current, path);
49749 write_unlock(&fs->lock);
49750 if (old_root.dentry)
49751 path_put(&old_root);
49752 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
49753 && fs->root.mnt == old_root->mnt) {
49754 path_get(new_root);
49755 fs->root = *new_root;
49756 + gr_set_chroot_entries(p, new_root);
49757 count++;
49758 }
49759 if (fs->pwd.dentry == old_root->dentry
49760 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
49761 task_lock(tsk);
49762 write_lock(&fs->lock);
49763 tsk->fs = NULL;
49764 - kill = !--fs->users;
49765 + gr_clear_chroot_entries(tsk);
49766 + kill = !atomic_dec_return(&fs->users);
49767 write_unlock(&fs->lock);
49768 task_unlock(tsk);
49769 if (kill)
49770 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
49771 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
49772 /* We don't need to lock fs - think why ;-) */
49773 if (fs) {
49774 - fs->users = 1;
49775 + atomic_set(&fs->users, 1);
49776 fs->in_exec = 0;
49777 rwlock_init(&fs->lock);
49778 fs->umask = old->umask;
49779 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
49780
49781 task_lock(current);
49782 write_lock(&fs->lock);
49783 - kill = !--fs->users;
49784 + kill = !atomic_dec_return(&fs->users);
49785 current->fs = new_fs;
49786 + gr_set_chroot_entries(current, &new_fs->root);
49787 write_unlock(&fs->lock);
49788 task_unlock(current);
49789
49790 @@ -141,13 +146,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
49791
49792 int current_umask(void)
49793 {
49794 - return current->fs->umask;
49795 + return current->fs->umask | gr_acl_umask();
49796 }
49797 EXPORT_SYMBOL(current_umask);
49798
49799 /* to be mentioned only in INIT_TASK */
49800 struct fs_struct init_fs = {
49801 - .users = 1,
49802 + .users = ATOMIC_INIT(1),
49803 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
49804 .umask = 0022,
49805 };
49806 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
49807 task_lock(current);
49808
49809 write_lock(&init_fs.lock);
49810 - init_fs.users++;
49811 + atomic_inc(&init_fs.users);
49812 write_unlock(&init_fs.lock);
49813
49814 write_lock(&fs->lock);
49815 current->fs = &init_fs;
49816 - kill = !--fs->users;
49817 + gr_set_chroot_entries(current, &current->fs->root);
49818 + kill = !atomic_dec_return(&fs->users);
49819 write_unlock(&fs->lock);
49820
49821 task_unlock(current);
49822 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
49823 index 9905350..02eaec4 100644
49824 --- a/fs/fscache/cookie.c
49825 +++ b/fs/fscache/cookie.c
49826 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
49827 parent ? (char *) parent->def->name : "<no-parent>",
49828 def->name, netfs_data);
49829
49830 - fscache_stat(&fscache_n_acquires);
49831 + fscache_stat_unchecked(&fscache_n_acquires);
49832
49833 /* if there's no parent cookie, then we don't create one here either */
49834 if (!parent) {
49835 - fscache_stat(&fscache_n_acquires_null);
49836 + fscache_stat_unchecked(&fscache_n_acquires_null);
49837 _leave(" [no parent]");
49838 return NULL;
49839 }
49840 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
49841 /* allocate and initialise a cookie */
49842 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
49843 if (!cookie) {
49844 - fscache_stat(&fscache_n_acquires_oom);
49845 + fscache_stat_unchecked(&fscache_n_acquires_oom);
49846 _leave(" [ENOMEM]");
49847 return NULL;
49848 }
49849 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49850
49851 switch (cookie->def->type) {
49852 case FSCACHE_COOKIE_TYPE_INDEX:
49853 - fscache_stat(&fscache_n_cookie_index);
49854 + fscache_stat_unchecked(&fscache_n_cookie_index);
49855 break;
49856 case FSCACHE_COOKIE_TYPE_DATAFILE:
49857 - fscache_stat(&fscache_n_cookie_data);
49858 + fscache_stat_unchecked(&fscache_n_cookie_data);
49859 break;
49860 default:
49861 - fscache_stat(&fscache_n_cookie_special);
49862 + fscache_stat_unchecked(&fscache_n_cookie_special);
49863 break;
49864 }
49865
49866 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49867 if (fscache_acquire_non_index_cookie(cookie) < 0) {
49868 atomic_dec(&parent->n_children);
49869 __fscache_cookie_put(cookie);
49870 - fscache_stat(&fscache_n_acquires_nobufs);
49871 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
49872 _leave(" = NULL");
49873 return NULL;
49874 }
49875 }
49876
49877 - fscache_stat(&fscache_n_acquires_ok);
49878 + fscache_stat_unchecked(&fscache_n_acquires_ok);
49879 _leave(" = %p", cookie);
49880 return cookie;
49881 }
49882 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
49883 cache = fscache_select_cache_for_object(cookie->parent);
49884 if (!cache) {
49885 up_read(&fscache_addremove_sem);
49886 - fscache_stat(&fscache_n_acquires_no_cache);
49887 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
49888 _leave(" = -ENOMEDIUM [no cache]");
49889 return -ENOMEDIUM;
49890 }
49891 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
49892 object = cache->ops->alloc_object(cache, cookie);
49893 fscache_stat_d(&fscache_n_cop_alloc_object);
49894 if (IS_ERR(object)) {
49895 - fscache_stat(&fscache_n_object_no_alloc);
49896 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
49897 ret = PTR_ERR(object);
49898 goto error;
49899 }
49900
49901 - fscache_stat(&fscache_n_object_alloc);
49902 + fscache_stat_unchecked(&fscache_n_object_alloc);
49903
49904 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
49905
49906 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
49907 struct fscache_object *object;
49908 struct hlist_node *_p;
49909
49910 - fscache_stat(&fscache_n_updates);
49911 + fscache_stat_unchecked(&fscache_n_updates);
49912
49913 if (!cookie) {
49914 - fscache_stat(&fscache_n_updates_null);
49915 + fscache_stat_unchecked(&fscache_n_updates_null);
49916 _leave(" [no cookie]");
49917 return;
49918 }
49919 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49920 struct fscache_object *object;
49921 unsigned long event;
49922
49923 - fscache_stat(&fscache_n_relinquishes);
49924 + fscache_stat_unchecked(&fscache_n_relinquishes);
49925 if (retire)
49926 - fscache_stat(&fscache_n_relinquishes_retire);
49927 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
49928
49929 if (!cookie) {
49930 - fscache_stat(&fscache_n_relinquishes_null);
49931 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
49932 _leave(" [no cookie]");
49933 return;
49934 }
49935 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49936
49937 /* wait for the cookie to finish being instantiated (or to fail) */
49938 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
49939 - fscache_stat(&fscache_n_relinquishes_waitcrt);
49940 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
49941 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
49942 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
49943 }
49944 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
49945 index edd7434..0725e66 100644
49946 --- a/fs/fscache/internal.h
49947 +++ b/fs/fscache/internal.h
49948 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
49949 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
49950 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
49951
49952 -extern atomic_t fscache_n_op_pend;
49953 -extern atomic_t fscache_n_op_run;
49954 -extern atomic_t fscache_n_op_enqueue;
49955 -extern atomic_t fscache_n_op_deferred_release;
49956 -extern atomic_t fscache_n_op_release;
49957 -extern atomic_t fscache_n_op_gc;
49958 -extern atomic_t fscache_n_op_cancelled;
49959 -extern atomic_t fscache_n_op_rejected;
49960 +extern atomic_unchecked_t fscache_n_op_pend;
49961 +extern atomic_unchecked_t fscache_n_op_run;
49962 +extern atomic_unchecked_t fscache_n_op_enqueue;
49963 +extern atomic_unchecked_t fscache_n_op_deferred_release;
49964 +extern atomic_unchecked_t fscache_n_op_release;
49965 +extern atomic_unchecked_t fscache_n_op_gc;
49966 +extern atomic_unchecked_t fscache_n_op_cancelled;
49967 +extern atomic_unchecked_t fscache_n_op_rejected;
49968
49969 -extern atomic_t fscache_n_attr_changed;
49970 -extern atomic_t fscache_n_attr_changed_ok;
49971 -extern atomic_t fscache_n_attr_changed_nobufs;
49972 -extern atomic_t fscache_n_attr_changed_nomem;
49973 -extern atomic_t fscache_n_attr_changed_calls;
49974 +extern atomic_unchecked_t fscache_n_attr_changed;
49975 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
49976 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
49977 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
49978 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
49979
49980 -extern atomic_t fscache_n_allocs;
49981 -extern atomic_t fscache_n_allocs_ok;
49982 -extern atomic_t fscache_n_allocs_wait;
49983 -extern atomic_t fscache_n_allocs_nobufs;
49984 -extern atomic_t fscache_n_allocs_intr;
49985 -extern atomic_t fscache_n_allocs_object_dead;
49986 -extern atomic_t fscache_n_alloc_ops;
49987 -extern atomic_t fscache_n_alloc_op_waits;
49988 +extern atomic_unchecked_t fscache_n_allocs;
49989 +extern atomic_unchecked_t fscache_n_allocs_ok;
49990 +extern atomic_unchecked_t fscache_n_allocs_wait;
49991 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
49992 +extern atomic_unchecked_t fscache_n_allocs_intr;
49993 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
49994 +extern atomic_unchecked_t fscache_n_alloc_ops;
49995 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
49996
49997 -extern atomic_t fscache_n_retrievals;
49998 -extern atomic_t fscache_n_retrievals_ok;
49999 -extern atomic_t fscache_n_retrievals_wait;
50000 -extern atomic_t fscache_n_retrievals_nodata;
50001 -extern atomic_t fscache_n_retrievals_nobufs;
50002 -extern atomic_t fscache_n_retrievals_intr;
50003 -extern atomic_t fscache_n_retrievals_nomem;
50004 -extern atomic_t fscache_n_retrievals_object_dead;
50005 -extern atomic_t fscache_n_retrieval_ops;
50006 -extern atomic_t fscache_n_retrieval_op_waits;
50007 +extern atomic_unchecked_t fscache_n_retrievals;
50008 +extern atomic_unchecked_t fscache_n_retrievals_ok;
50009 +extern atomic_unchecked_t fscache_n_retrievals_wait;
50010 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
50011 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
50012 +extern atomic_unchecked_t fscache_n_retrievals_intr;
50013 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
50014 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
50015 +extern atomic_unchecked_t fscache_n_retrieval_ops;
50016 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
50017
50018 -extern atomic_t fscache_n_stores;
50019 -extern atomic_t fscache_n_stores_ok;
50020 -extern atomic_t fscache_n_stores_again;
50021 -extern atomic_t fscache_n_stores_nobufs;
50022 -extern atomic_t fscache_n_stores_oom;
50023 -extern atomic_t fscache_n_store_ops;
50024 -extern atomic_t fscache_n_store_calls;
50025 -extern atomic_t fscache_n_store_pages;
50026 -extern atomic_t fscache_n_store_radix_deletes;
50027 -extern atomic_t fscache_n_store_pages_over_limit;
50028 +extern atomic_unchecked_t fscache_n_stores;
50029 +extern atomic_unchecked_t fscache_n_stores_ok;
50030 +extern atomic_unchecked_t fscache_n_stores_again;
50031 +extern atomic_unchecked_t fscache_n_stores_nobufs;
50032 +extern atomic_unchecked_t fscache_n_stores_oom;
50033 +extern atomic_unchecked_t fscache_n_store_ops;
50034 +extern atomic_unchecked_t fscache_n_store_calls;
50035 +extern atomic_unchecked_t fscache_n_store_pages;
50036 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
50037 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
50038
50039 -extern atomic_t fscache_n_store_vmscan_not_storing;
50040 -extern atomic_t fscache_n_store_vmscan_gone;
50041 -extern atomic_t fscache_n_store_vmscan_busy;
50042 -extern atomic_t fscache_n_store_vmscan_cancelled;
50043 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50044 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
50045 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
50046 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50047
50048 -extern atomic_t fscache_n_marks;
50049 -extern atomic_t fscache_n_uncaches;
50050 +extern atomic_unchecked_t fscache_n_marks;
50051 +extern atomic_unchecked_t fscache_n_uncaches;
50052
50053 -extern atomic_t fscache_n_acquires;
50054 -extern atomic_t fscache_n_acquires_null;
50055 -extern atomic_t fscache_n_acquires_no_cache;
50056 -extern atomic_t fscache_n_acquires_ok;
50057 -extern atomic_t fscache_n_acquires_nobufs;
50058 -extern atomic_t fscache_n_acquires_oom;
50059 +extern atomic_unchecked_t fscache_n_acquires;
50060 +extern atomic_unchecked_t fscache_n_acquires_null;
50061 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
50062 +extern atomic_unchecked_t fscache_n_acquires_ok;
50063 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
50064 +extern atomic_unchecked_t fscache_n_acquires_oom;
50065
50066 -extern atomic_t fscache_n_updates;
50067 -extern atomic_t fscache_n_updates_null;
50068 -extern atomic_t fscache_n_updates_run;
50069 +extern atomic_unchecked_t fscache_n_updates;
50070 +extern atomic_unchecked_t fscache_n_updates_null;
50071 +extern atomic_unchecked_t fscache_n_updates_run;
50072
50073 -extern atomic_t fscache_n_relinquishes;
50074 -extern atomic_t fscache_n_relinquishes_null;
50075 -extern atomic_t fscache_n_relinquishes_waitcrt;
50076 -extern atomic_t fscache_n_relinquishes_retire;
50077 +extern atomic_unchecked_t fscache_n_relinquishes;
50078 +extern atomic_unchecked_t fscache_n_relinquishes_null;
50079 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50080 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
50081
50082 -extern atomic_t fscache_n_cookie_index;
50083 -extern atomic_t fscache_n_cookie_data;
50084 -extern atomic_t fscache_n_cookie_special;
50085 +extern atomic_unchecked_t fscache_n_cookie_index;
50086 +extern atomic_unchecked_t fscache_n_cookie_data;
50087 +extern atomic_unchecked_t fscache_n_cookie_special;
50088
50089 -extern atomic_t fscache_n_object_alloc;
50090 -extern atomic_t fscache_n_object_no_alloc;
50091 -extern atomic_t fscache_n_object_lookups;
50092 -extern atomic_t fscache_n_object_lookups_negative;
50093 -extern atomic_t fscache_n_object_lookups_positive;
50094 -extern atomic_t fscache_n_object_lookups_timed_out;
50095 -extern atomic_t fscache_n_object_created;
50096 -extern atomic_t fscache_n_object_avail;
50097 -extern atomic_t fscache_n_object_dead;
50098 +extern atomic_unchecked_t fscache_n_object_alloc;
50099 +extern atomic_unchecked_t fscache_n_object_no_alloc;
50100 +extern atomic_unchecked_t fscache_n_object_lookups;
50101 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
50102 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
50103 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
50104 +extern atomic_unchecked_t fscache_n_object_created;
50105 +extern atomic_unchecked_t fscache_n_object_avail;
50106 +extern atomic_unchecked_t fscache_n_object_dead;
50107
50108 -extern atomic_t fscache_n_checkaux_none;
50109 -extern atomic_t fscache_n_checkaux_okay;
50110 -extern atomic_t fscache_n_checkaux_update;
50111 -extern atomic_t fscache_n_checkaux_obsolete;
50112 +extern atomic_unchecked_t fscache_n_checkaux_none;
50113 +extern atomic_unchecked_t fscache_n_checkaux_okay;
50114 +extern atomic_unchecked_t fscache_n_checkaux_update;
50115 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
50116
50117 extern atomic_t fscache_n_cop_alloc_object;
50118 extern atomic_t fscache_n_cop_lookup_object;
50119 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
50120 atomic_inc(stat);
50121 }
50122
50123 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
50124 +{
50125 + atomic_inc_unchecked(stat);
50126 +}
50127 +
50128 static inline void fscache_stat_d(atomic_t *stat)
50129 {
50130 atomic_dec(stat);
50131 @@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
50132
50133 #define __fscache_stat(stat) (NULL)
50134 #define fscache_stat(stat) do {} while (0)
50135 +#define fscache_stat_unchecked(stat) do {} while (0)
50136 #define fscache_stat_d(stat) do {} while (0)
50137 #endif
50138
50139 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
50140 index e513ac5..e888d34 100644
50141 --- a/fs/fscache/object.c
50142 +++ b/fs/fscache/object.c
50143 @@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50144 /* update the object metadata on disk */
50145 case FSCACHE_OBJECT_UPDATING:
50146 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
50147 - fscache_stat(&fscache_n_updates_run);
50148 + fscache_stat_unchecked(&fscache_n_updates_run);
50149 fscache_stat(&fscache_n_cop_update_object);
50150 object->cache->ops->update_object(object);
50151 fscache_stat_d(&fscache_n_cop_update_object);
50152 @@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50153 spin_lock(&object->lock);
50154 object->state = FSCACHE_OBJECT_DEAD;
50155 spin_unlock(&object->lock);
50156 - fscache_stat(&fscache_n_object_dead);
50157 + fscache_stat_unchecked(&fscache_n_object_dead);
50158 goto terminal_transit;
50159
50160 /* handle the parent cache of this object being withdrawn from
50161 @@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50162 spin_lock(&object->lock);
50163 object->state = FSCACHE_OBJECT_DEAD;
50164 spin_unlock(&object->lock);
50165 - fscache_stat(&fscache_n_object_dead);
50166 + fscache_stat_unchecked(&fscache_n_object_dead);
50167 goto terminal_transit;
50168
50169 /* complain about the object being woken up once it is
50170 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
50171 parent->cookie->def->name, cookie->def->name,
50172 object->cache->tag->name);
50173
50174 - fscache_stat(&fscache_n_object_lookups);
50175 + fscache_stat_unchecked(&fscache_n_object_lookups);
50176 fscache_stat(&fscache_n_cop_lookup_object);
50177 ret = object->cache->ops->lookup_object(object);
50178 fscache_stat_d(&fscache_n_cop_lookup_object);
50179 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
50180 if (ret == -ETIMEDOUT) {
50181 /* probably stuck behind another object, so move this one to
50182 * the back of the queue */
50183 - fscache_stat(&fscache_n_object_lookups_timed_out);
50184 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
50185 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
50186 }
50187
50188 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
50189
50190 spin_lock(&object->lock);
50191 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
50192 - fscache_stat(&fscache_n_object_lookups_negative);
50193 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
50194
50195 /* transit here to allow write requests to begin stacking up
50196 * and read requests to begin returning ENODATA */
50197 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
50198 * result, in which case there may be data available */
50199 spin_lock(&object->lock);
50200 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
50201 - fscache_stat(&fscache_n_object_lookups_positive);
50202 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
50203
50204 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
50205
50206 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
50207 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
50208 } else {
50209 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
50210 - fscache_stat(&fscache_n_object_created);
50211 + fscache_stat_unchecked(&fscache_n_object_created);
50212
50213 object->state = FSCACHE_OBJECT_AVAILABLE;
50214 spin_unlock(&object->lock);
50215 @@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
50216 fscache_enqueue_dependents(object);
50217
50218 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
50219 - fscache_stat(&fscache_n_object_avail);
50220 + fscache_stat_unchecked(&fscache_n_object_avail);
50221
50222 _leave("");
50223 }
50224 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
50225 enum fscache_checkaux result;
50226
50227 if (!object->cookie->def->check_aux) {
50228 - fscache_stat(&fscache_n_checkaux_none);
50229 + fscache_stat_unchecked(&fscache_n_checkaux_none);
50230 return FSCACHE_CHECKAUX_OKAY;
50231 }
50232
50233 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
50234 switch (result) {
50235 /* entry okay as is */
50236 case FSCACHE_CHECKAUX_OKAY:
50237 - fscache_stat(&fscache_n_checkaux_okay);
50238 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
50239 break;
50240
50241 /* entry requires update */
50242 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
50243 - fscache_stat(&fscache_n_checkaux_update);
50244 + fscache_stat_unchecked(&fscache_n_checkaux_update);
50245 break;
50246
50247 /* entry requires deletion */
50248 case FSCACHE_CHECKAUX_OBSOLETE:
50249 - fscache_stat(&fscache_n_checkaux_obsolete);
50250 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
50251 break;
50252
50253 default:
50254 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
50255 index 313e79a..775240f 100644
50256 --- a/fs/fscache/operation.c
50257 +++ b/fs/fscache/operation.c
50258 @@ -16,7 +16,7 @@
50259 #include <linux/seq_file.h>
50260 #include "internal.h"
50261
50262 -atomic_t fscache_op_debug_id;
50263 +atomic_unchecked_t fscache_op_debug_id;
50264 EXPORT_SYMBOL(fscache_op_debug_id);
50265
50266 /**
50267 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
50268 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
50269 ASSERTCMP(atomic_read(&op->usage), >, 0);
50270
50271 - fscache_stat(&fscache_n_op_enqueue);
50272 + fscache_stat_unchecked(&fscache_n_op_enqueue);
50273 switch (op->flags & FSCACHE_OP_TYPE) {
50274 case FSCACHE_OP_FAST:
50275 _debug("queue fast");
50276 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
50277 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
50278 if (op->processor)
50279 fscache_enqueue_operation(op);
50280 - fscache_stat(&fscache_n_op_run);
50281 + fscache_stat_unchecked(&fscache_n_op_run);
50282 }
50283
50284 /*
50285 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
50286 if (object->n_ops > 0) {
50287 atomic_inc(&op->usage);
50288 list_add_tail(&op->pend_link, &object->pending_ops);
50289 - fscache_stat(&fscache_n_op_pend);
50290 + fscache_stat_unchecked(&fscache_n_op_pend);
50291 } else if (!list_empty(&object->pending_ops)) {
50292 atomic_inc(&op->usage);
50293 list_add_tail(&op->pend_link, &object->pending_ops);
50294 - fscache_stat(&fscache_n_op_pend);
50295 + fscache_stat_unchecked(&fscache_n_op_pend);
50296 fscache_start_operations(object);
50297 } else {
50298 ASSERTCMP(object->n_in_progress, ==, 0);
50299 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
50300 object->n_exclusive++; /* reads and writes must wait */
50301 atomic_inc(&op->usage);
50302 list_add_tail(&op->pend_link, &object->pending_ops);
50303 - fscache_stat(&fscache_n_op_pend);
50304 + fscache_stat_unchecked(&fscache_n_op_pend);
50305 ret = 0;
50306 } else {
50307 /* not allowed to submit ops in any other state */
50308 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
50309 if (object->n_exclusive > 0) {
50310 atomic_inc(&op->usage);
50311 list_add_tail(&op->pend_link, &object->pending_ops);
50312 - fscache_stat(&fscache_n_op_pend);
50313 + fscache_stat_unchecked(&fscache_n_op_pend);
50314 } else if (!list_empty(&object->pending_ops)) {
50315 atomic_inc(&op->usage);
50316 list_add_tail(&op->pend_link, &object->pending_ops);
50317 - fscache_stat(&fscache_n_op_pend);
50318 + fscache_stat_unchecked(&fscache_n_op_pend);
50319 fscache_start_operations(object);
50320 } else {
50321 ASSERTCMP(object->n_exclusive, ==, 0);
50322 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
50323 object->n_ops++;
50324 atomic_inc(&op->usage);
50325 list_add_tail(&op->pend_link, &object->pending_ops);
50326 - fscache_stat(&fscache_n_op_pend);
50327 + fscache_stat_unchecked(&fscache_n_op_pend);
50328 ret = 0;
50329 } else if (object->state == FSCACHE_OBJECT_DYING ||
50330 object->state == FSCACHE_OBJECT_LC_DYING ||
50331 object->state == FSCACHE_OBJECT_WITHDRAWING) {
50332 - fscache_stat(&fscache_n_op_rejected);
50333 + fscache_stat_unchecked(&fscache_n_op_rejected);
50334 ret = -ENOBUFS;
50335 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
50336 fscache_report_unexpected_submission(object, op, ostate);
50337 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
50338
50339 ret = -EBUSY;
50340 if (!list_empty(&op->pend_link)) {
50341 - fscache_stat(&fscache_n_op_cancelled);
50342 + fscache_stat_unchecked(&fscache_n_op_cancelled);
50343 list_del_init(&op->pend_link);
50344 object->n_ops--;
50345 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
50346 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
50347 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
50348 BUG();
50349
50350 - fscache_stat(&fscache_n_op_release);
50351 + fscache_stat_unchecked(&fscache_n_op_release);
50352
50353 if (op->release) {
50354 op->release(op);
50355 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
50356 * lock, and defer it otherwise */
50357 if (!spin_trylock(&object->lock)) {
50358 _debug("defer put");
50359 - fscache_stat(&fscache_n_op_deferred_release);
50360 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
50361
50362 cache = object->cache;
50363 spin_lock(&cache->op_gc_list_lock);
50364 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
50365
50366 _debug("GC DEFERRED REL OBJ%x OP%x",
50367 object->debug_id, op->debug_id);
50368 - fscache_stat(&fscache_n_op_gc);
50369 + fscache_stat_unchecked(&fscache_n_op_gc);
50370
50371 ASSERTCMP(atomic_read(&op->usage), ==, 0);
50372
50373 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
50374 index c598ea4..6aac13e 100644
50375 --- a/fs/fscache/page.c
50376 +++ b/fs/fscache/page.c
50377 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50378 val = radix_tree_lookup(&cookie->stores, page->index);
50379 if (!val) {
50380 rcu_read_unlock();
50381 - fscache_stat(&fscache_n_store_vmscan_not_storing);
50382 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
50383 __fscache_uncache_page(cookie, page);
50384 return true;
50385 }
50386 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50387 spin_unlock(&cookie->stores_lock);
50388
50389 if (xpage) {
50390 - fscache_stat(&fscache_n_store_vmscan_cancelled);
50391 - fscache_stat(&fscache_n_store_radix_deletes);
50392 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
50393 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50394 ASSERTCMP(xpage, ==, page);
50395 } else {
50396 - fscache_stat(&fscache_n_store_vmscan_gone);
50397 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
50398 }
50399
50400 wake_up_bit(&cookie->flags, 0);
50401 @@ -106,7 +106,7 @@ page_busy:
50402 /* we might want to wait here, but that could deadlock the allocator as
50403 * the slow-work threads writing to the cache may all end up sleeping
50404 * on memory allocation */
50405 - fscache_stat(&fscache_n_store_vmscan_busy);
50406 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
50407 return false;
50408 }
50409 EXPORT_SYMBOL(__fscache_maybe_release_page);
50410 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
50411 FSCACHE_COOKIE_STORING_TAG);
50412 if (!radix_tree_tag_get(&cookie->stores, page->index,
50413 FSCACHE_COOKIE_PENDING_TAG)) {
50414 - fscache_stat(&fscache_n_store_radix_deletes);
50415 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50416 xpage = radix_tree_delete(&cookie->stores, page->index);
50417 }
50418 spin_unlock(&cookie->stores_lock);
50419 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
50420
50421 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
50422
50423 - fscache_stat(&fscache_n_attr_changed_calls);
50424 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
50425
50426 if (fscache_object_is_active(object)) {
50427 fscache_set_op_state(op, "CallFS");
50428 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50429
50430 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50431
50432 - fscache_stat(&fscache_n_attr_changed);
50433 + fscache_stat_unchecked(&fscache_n_attr_changed);
50434
50435 op = kzalloc(sizeof(*op), GFP_KERNEL);
50436 if (!op) {
50437 - fscache_stat(&fscache_n_attr_changed_nomem);
50438 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
50439 _leave(" = -ENOMEM");
50440 return -ENOMEM;
50441 }
50442 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50443 if (fscache_submit_exclusive_op(object, op) < 0)
50444 goto nobufs;
50445 spin_unlock(&cookie->lock);
50446 - fscache_stat(&fscache_n_attr_changed_ok);
50447 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
50448 fscache_put_operation(op);
50449 _leave(" = 0");
50450 return 0;
50451 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50452 nobufs:
50453 spin_unlock(&cookie->lock);
50454 kfree(op);
50455 - fscache_stat(&fscache_n_attr_changed_nobufs);
50456 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
50457 _leave(" = %d", -ENOBUFS);
50458 return -ENOBUFS;
50459 }
50460 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
50461 /* allocate a retrieval operation and attempt to submit it */
50462 op = kzalloc(sizeof(*op), GFP_NOIO);
50463 if (!op) {
50464 - fscache_stat(&fscache_n_retrievals_nomem);
50465 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50466 return NULL;
50467 }
50468
50469 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50470 return 0;
50471 }
50472
50473 - fscache_stat(&fscache_n_retrievals_wait);
50474 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
50475
50476 jif = jiffies;
50477 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
50478 fscache_wait_bit_interruptible,
50479 TASK_INTERRUPTIBLE) != 0) {
50480 - fscache_stat(&fscache_n_retrievals_intr);
50481 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
50482 _leave(" = -ERESTARTSYS");
50483 return -ERESTARTSYS;
50484 }
50485 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50486 */
50487 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50488 struct fscache_retrieval *op,
50489 - atomic_t *stat_op_waits,
50490 - atomic_t *stat_object_dead)
50491 + atomic_unchecked_t *stat_op_waits,
50492 + atomic_unchecked_t *stat_object_dead)
50493 {
50494 int ret;
50495
50496 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50497 goto check_if_dead;
50498
50499 _debug(">>> WT");
50500 - fscache_stat(stat_op_waits);
50501 + fscache_stat_unchecked(stat_op_waits);
50502 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
50503 fscache_wait_bit_interruptible,
50504 TASK_INTERRUPTIBLE) < 0) {
50505 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50506
50507 check_if_dead:
50508 if (unlikely(fscache_object_is_dead(object))) {
50509 - fscache_stat(stat_object_dead);
50510 + fscache_stat_unchecked(stat_object_dead);
50511 return -ENOBUFS;
50512 }
50513 return 0;
50514 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50515
50516 _enter("%p,%p,,,", cookie, page);
50517
50518 - fscache_stat(&fscache_n_retrievals);
50519 + fscache_stat_unchecked(&fscache_n_retrievals);
50520
50521 if (hlist_empty(&cookie->backing_objects))
50522 goto nobufs;
50523 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50524 goto nobufs_unlock;
50525 spin_unlock(&cookie->lock);
50526
50527 - fscache_stat(&fscache_n_retrieval_ops);
50528 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
50529
50530 /* pin the netfs read context in case we need to do the actual netfs
50531 * read because we've encountered a cache read failure */
50532 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50533
50534 error:
50535 if (ret == -ENOMEM)
50536 - fscache_stat(&fscache_n_retrievals_nomem);
50537 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50538 else if (ret == -ERESTARTSYS)
50539 - fscache_stat(&fscache_n_retrievals_intr);
50540 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
50541 else if (ret == -ENODATA)
50542 - fscache_stat(&fscache_n_retrievals_nodata);
50543 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50544 else if (ret < 0)
50545 - fscache_stat(&fscache_n_retrievals_nobufs);
50546 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50547 else
50548 - fscache_stat(&fscache_n_retrievals_ok);
50549 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
50550
50551 fscache_put_retrieval(op);
50552 _leave(" = %d", ret);
50553 @@ -453,7 +453,7 @@ nobufs_unlock:
50554 spin_unlock(&cookie->lock);
50555 kfree(op);
50556 nobufs:
50557 - fscache_stat(&fscache_n_retrievals_nobufs);
50558 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50559 _leave(" = -ENOBUFS");
50560 return -ENOBUFS;
50561 }
50562 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50563
50564 _enter("%p,,%d,,,", cookie, *nr_pages);
50565
50566 - fscache_stat(&fscache_n_retrievals);
50567 + fscache_stat_unchecked(&fscache_n_retrievals);
50568
50569 if (hlist_empty(&cookie->backing_objects))
50570 goto nobufs;
50571 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50572 goto nobufs_unlock;
50573 spin_unlock(&cookie->lock);
50574
50575 - fscache_stat(&fscache_n_retrieval_ops);
50576 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
50577
50578 /* pin the netfs read context in case we need to do the actual netfs
50579 * read because we've encountered a cache read failure */
50580 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50581
50582 error:
50583 if (ret == -ENOMEM)
50584 - fscache_stat(&fscache_n_retrievals_nomem);
50585 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50586 else if (ret == -ERESTARTSYS)
50587 - fscache_stat(&fscache_n_retrievals_intr);
50588 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
50589 else if (ret == -ENODATA)
50590 - fscache_stat(&fscache_n_retrievals_nodata);
50591 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50592 else if (ret < 0)
50593 - fscache_stat(&fscache_n_retrievals_nobufs);
50594 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50595 else
50596 - fscache_stat(&fscache_n_retrievals_ok);
50597 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
50598
50599 fscache_put_retrieval(op);
50600 _leave(" = %d", ret);
50601 @@ -570,7 +570,7 @@ nobufs_unlock:
50602 spin_unlock(&cookie->lock);
50603 kfree(op);
50604 nobufs:
50605 - fscache_stat(&fscache_n_retrievals_nobufs);
50606 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50607 _leave(" = -ENOBUFS");
50608 return -ENOBUFS;
50609 }
50610 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50611
50612 _enter("%p,%p,,,", cookie, page);
50613
50614 - fscache_stat(&fscache_n_allocs);
50615 + fscache_stat_unchecked(&fscache_n_allocs);
50616
50617 if (hlist_empty(&cookie->backing_objects))
50618 goto nobufs;
50619 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50620 goto nobufs_unlock;
50621 spin_unlock(&cookie->lock);
50622
50623 - fscache_stat(&fscache_n_alloc_ops);
50624 + fscache_stat_unchecked(&fscache_n_alloc_ops);
50625
50626 ret = fscache_wait_for_retrieval_activation(
50627 object, op,
50628 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50629
50630 error:
50631 if (ret == -ERESTARTSYS)
50632 - fscache_stat(&fscache_n_allocs_intr);
50633 + fscache_stat_unchecked(&fscache_n_allocs_intr);
50634 else if (ret < 0)
50635 - fscache_stat(&fscache_n_allocs_nobufs);
50636 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50637 else
50638 - fscache_stat(&fscache_n_allocs_ok);
50639 + fscache_stat_unchecked(&fscache_n_allocs_ok);
50640
50641 fscache_put_retrieval(op);
50642 _leave(" = %d", ret);
50643 @@ -651,7 +651,7 @@ nobufs_unlock:
50644 spin_unlock(&cookie->lock);
50645 kfree(op);
50646 nobufs:
50647 - fscache_stat(&fscache_n_allocs_nobufs);
50648 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50649 _leave(" = -ENOBUFS");
50650 return -ENOBUFS;
50651 }
50652 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50653
50654 spin_lock(&cookie->stores_lock);
50655
50656 - fscache_stat(&fscache_n_store_calls);
50657 + fscache_stat_unchecked(&fscache_n_store_calls);
50658
50659 /* find a page to store */
50660 page = NULL;
50661 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50662 page = results[0];
50663 _debug("gang %d [%lx]", n, page->index);
50664 if (page->index > op->store_limit) {
50665 - fscache_stat(&fscache_n_store_pages_over_limit);
50666 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
50667 goto superseded;
50668 }
50669
50670 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50671
50672 if (page) {
50673 fscache_set_op_state(&op->op, "Store");
50674 - fscache_stat(&fscache_n_store_pages);
50675 + fscache_stat_unchecked(&fscache_n_store_pages);
50676 fscache_stat(&fscache_n_cop_write_page);
50677 ret = object->cache->ops->write_page(op, page);
50678 fscache_stat_d(&fscache_n_cop_write_page);
50679 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50680 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50681 ASSERT(PageFsCache(page));
50682
50683 - fscache_stat(&fscache_n_stores);
50684 + fscache_stat_unchecked(&fscache_n_stores);
50685
50686 op = kzalloc(sizeof(*op), GFP_NOIO);
50687 if (!op)
50688 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50689 spin_unlock(&cookie->stores_lock);
50690 spin_unlock(&object->lock);
50691
50692 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
50693 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
50694 op->store_limit = object->store_limit;
50695
50696 if (fscache_submit_op(object, &op->op) < 0)
50697 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50698
50699 spin_unlock(&cookie->lock);
50700 radix_tree_preload_end();
50701 - fscache_stat(&fscache_n_store_ops);
50702 - fscache_stat(&fscache_n_stores_ok);
50703 + fscache_stat_unchecked(&fscache_n_store_ops);
50704 + fscache_stat_unchecked(&fscache_n_stores_ok);
50705
50706 /* the slow work queue now carries its own ref on the object */
50707 fscache_put_operation(&op->op);
50708 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50709 return 0;
50710
50711 already_queued:
50712 - fscache_stat(&fscache_n_stores_again);
50713 + fscache_stat_unchecked(&fscache_n_stores_again);
50714 already_pending:
50715 spin_unlock(&cookie->stores_lock);
50716 spin_unlock(&object->lock);
50717 spin_unlock(&cookie->lock);
50718 radix_tree_preload_end();
50719 kfree(op);
50720 - fscache_stat(&fscache_n_stores_ok);
50721 + fscache_stat_unchecked(&fscache_n_stores_ok);
50722 _leave(" = 0");
50723 return 0;
50724
50725 @@ -886,14 +886,14 @@ nobufs:
50726 spin_unlock(&cookie->lock);
50727 radix_tree_preload_end();
50728 kfree(op);
50729 - fscache_stat(&fscache_n_stores_nobufs);
50730 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
50731 _leave(" = -ENOBUFS");
50732 return -ENOBUFS;
50733
50734 nomem_free:
50735 kfree(op);
50736 nomem:
50737 - fscache_stat(&fscache_n_stores_oom);
50738 + fscache_stat_unchecked(&fscache_n_stores_oom);
50739 _leave(" = -ENOMEM");
50740 return -ENOMEM;
50741 }
50742 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
50743 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50744 ASSERTCMP(page, !=, NULL);
50745
50746 - fscache_stat(&fscache_n_uncaches);
50747 + fscache_stat_unchecked(&fscache_n_uncaches);
50748
50749 /* cache withdrawal may beat us to it */
50750 if (!PageFsCache(page))
50751 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
50752 unsigned long loop;
50753
50754 #ifdef CONFIG_FSCACHE_STATS
50755 - atomic_add(pagevec->nr, &fscache_n_marks);
50756 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
50757 #endif
50758
50759 for (loop = 0; loop < pagevec->nr; loop++) {
50760 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
50761 index 46435f3..8cddf18 100644
50762 --- a/fs/fscache/stats.c
50763 +++ b/fs/fscache/stats.c
50764 @@ -18,95 +18,95 @@
50765 /*
50766 * operation counters
50767 */
50768 -atomic_t fscache_n_op_pend;
50769 -atomic_t fscache_n_op_run;
50770 -atomic_t fscache_n_op_enqueue;
50771 -atomic_t fscache_n_op_requeue;
50772 -atomic_t fscache_n_op_deferred_release;
50773 -atomic_t fscache_n_op_release;
50774 -atomic_t fscache_n_op_gc;
50775 -atomic_t fscache_n_op_cancelled;
50776 -atomic_t fscache_n_op_rejected;
50777 +atomic_unchecked_t fscache_n_op_pend;
50778 +atomic_unchecked_t fscache_n_op_run;
50779 +atomic_unchecked_t fscache_n_op_enqueue;
50780 +atomic_unchecked_t fscache_n_op_requeue;
50781 +atomic_unchecked_t fscache_n_op_deferred_release;
50782 +atomic_unchecked_t fscache_n_op_release;
50783 +atomic_unchecked_t fscache_n_op_gc;
50784 +atomic_unchecked_t fscache_n_op_cancelled;
50785 +atomic_unchecked_t fscache_n_op_rejected;
50786
50787 -atomic_t fscache_n_attr_changed;
50788 -atomic_t fscache_n_attr_changed_ok;
50789 -atomic_t fscache_n_attr_changed_nobufs;
50790 -atomic_t fscache_n_attr_changed_nomem;
50791 -atomic_t fscache_n_attr_changed_calls;
50792 +atomic_unchecked_t fscache_n_attr_changed;
50793 +atomic_unchecked_t fscache_n_attr_changed_ok;
50794 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
50795 +atomic_unchecked_t fscache_n_attr_changed_nomem;
50796 +atomic_unchecked_t fscache_n_attr_changed_calls;
50797
50798 -atomic_t fscache_n_allocs;
50799 -atomic_t fscache_n_allocs_ok;
50800 -atomic_t fscache_n_allocs_wait;
50801 -atomic_t fscache_n_allocs_nobufs;
50802 -atomic_t fscache_n_allocs_intr;
50803 -atomic_t fscache_n_allocs_object_dead;
50804 -atomic_t fscache_n_alloc_ops;
50805 -atomic_t fscache_n_alloc_op_waits;
50806 +atomic_unchecked_t fscache_n_allocs;
50807 +atomic_unchecked_t fscache_n_allocs_ok;
50808 +atomic_unchecked_t fscache_n_allocs_wait;
50809 +atomic_unchecked_t fscache_n_allocs_nobufs;
50810 +atomic_unchecked_t fscache_n_allocs_intr;
50811 +atomic_unchecked_t fscache_n_allocs_object_dead;
50812 +atomic_unchecked_t fscache_n_alloc_ops;
50813 +atomic_unchecked_t fscache_n_alloc_op_waits;
50814
50815 -atomic_t fscache_n_retrievals;
50816 -atomic_t fscache_n_retrievals_ok;
50817 -atomic_t fscache_n_retrievals_wait;
50818 -atomic_t fscache_n_retrievals_nodata;
50819 -atomic_t fscache_n_retrievals_nobufs;
50820 -atomic_t fscache_n_retrievals_intr;
50821 -atomic_t fscache_n_retrievals_nomem;
50822 -atomic_t fscache_n_retrievals_object_dead;
50823 -atomic_t fscache_n_retrieval_ops;
50824 -atomic_t fscache_n_retrieval_op_waits;
50825 +atomic_unchecked_t fscache_n_retrievals;
50826 +atomic_unchecked_t fscache_n_retrievals_ok;
50827 +atomic_unchecked_t fscache_n_retrievals_wait;
50828 +atomic_unchecked_t fscache_n_retrievals_nodata;
50829 +atomic_unchecked_t fscache_n_retrievals_nobufs;
50830 +atomic_unchecked_t fscache_n_retrievals_intr;
50831 +atomic_unchecked_t fscache_n_retrievals_nomem;
50832 +atomic_unchecked_t fscache_n_retrievals_object_dead;
50833 +atomic_unchecked_t fscache_n_retrieval_ops;
50834 +atomic_unchecked_t fscache_n_retrieval_op_waits;
50835
50836 -atomic_t fscache_n_stores;
50837 -atomic_t fscache_n_stores_ok;
50838 -atomic_t fscache_n_stores_again;
50839 -atomic_t fscache_n_stores_nobufs;
50840 -atomic_t fscache_n_stores_oom;
50841 -atomic_t fscache_n_store_ops;
50842 -atomic_t fscache_n_store_calls;
50843 -atomic_t fscache_n_store_pages;
50844 -atomic_t fscache_n_store_radix_deletes;
50845 -atomic_t fscache_n_store_pages_over_limit;
50846 +atomic_unchecked_t fscache_n_stores;
50847 +atomic_unchecked_t fscache_n_stores_ok;
50848 +atomic_unchecked_t fscache_n_stores_again;
50849 +atomic_unchecked_t fscache_n_stores_nobufs;
50850 +atomic_unchecked_t fscache_n_stores_oom;
50851 +atomic_unchecked_t fscache_n_store_ops;
50852 +atomic_unchecked_t fscache_n_store_calls;
50853 +atomic_unchecked_t fscache_n_store_pages;
50854 +atomic_unchecked_t fscache_n_store_radix_deletes;
50855 +atomic_unchecked_t fscache_n_store_pages_over_limit;
50856
50857 -atomic_t fscache_n_store_vmscan_not_storing;
50858 -atomic_t fscache_n_store_vmscan_gone;
50859 -atomic_t fscache_n_store_vmscan_busy;
50860 -atomic_t fscache_n_store_vmscan_cancelled;
50861 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50862 +atomic_unchecked_t fscache_n_store_vmscan_gone;
50863 +atomic_unchecked_t fscache_n_store_vmscan_busy;
50864 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50865
50866 -atomic_t fscache_n_marks;
50867 -atomic_t fscache_n_uncaches;
50868 +atomic_unchecked_t fscache_n_marks;
50869 +atomic_unchecked_t fscache_n_uncaches;
50870
50871 -atomic_t fscache_n_acquires;
50872 -atomic_t fscache_n_acquires_null;
50873 -atomic_t fscache_n_acquires_no_cache;
50874 -atomic_t fscache_n_acquires_ok;
50875 -atomic_t fscache_n_acquires_nobufs;
50876 -atomic_t fscache_n_acquires_oom;
50877 +atomic_unchecked_t fscache_n_acquires;
50878 +atomic_unchecked_t fscache_n_acquires_null;
50879 +atomic_unchecked_t fscache_n_acquires_no_cache;
50880 +atomic_unchecked_t fscache_n_acquires_ok;
50881 +atomic_unchecked_t fscache_n_acquires_nobufs;
50882 +atomic_unchecked_t fscache_n_acquires_oom;
50883
50884 -atomic_t fscache_n_updates;
50885 -atomic_t fscache_n_updates_null;
50886 -atomic_t fscache_n_updates_run;
50887 +atomic_unchecked_t fscache_n_updates;
50888 +atomic_unchecked_t fscache_n_updates_null;
50889 +atomic_unchecked_t fscache_n_updates_run;
50890
50891 -atomic_t fscache_n_relinquishes;
50892 -atomic_t fscache_n_relinquishes_null;
50893 -atomic_t fscache_n_relinquishes_waitcrt;
50894 -atomic_t fscache_n_relinquishes_retire;
50895 +atomic_unchecked_t fscache_n_relinquishes;
50896 +atomic_unchecked_t fscache_n_relinquishes_null;
50897 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50898 +atomic_unchecked_t fscache_n_relinquishes_retire;
50899
50900 -atomic_t fscache_n_cookie_index;
50901 -atomic_t fscache_n_cookie_data;
50902 -atomic_t fscache_n_cookie_special;
50903 +atomic_unchecked_t fscache_n_cookie_index;
50904 +atomic_unchecked_t fscache_n_cookie_data;
50905 +atomic_unchecked_t fscache_n_cookie_special;
50906
50907 -atomic_t fscache_n_object_alloc;
50908 -atomic_t fscache_n_object_no_alloc;
50909 -atomic_t fscache_n_object_lookups;
50910 -atomic_t fscache_n_object_lookups_negative;
50911 -atomic_t fscache_n_object_lookups_positive;
50912 -atomic_t fscache_n_object_lookups_timed_out;
50913 -atomic_t fscache_n_object_created;
50914 -atomic_t fscache_n_object_avail;
50915 -atomic_t fscache_n_object_dead;
50916 +atomic_unchecked_t fscache_n_object_alloc;
50917 +atomic_unchecked_t fscache_n_object_no_alloc;
50918 +atomic_unchecked_t fscache_n_object_lookups;
50919 +atomic_unchecked_t fscache_n_object_lookups_negative;
50920 +atomic_unchecked_t fscache_n_object_lookups_positive;
50921 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
50922 +atomic_unchecked_t fscache_n_object_created;
50923 +atomic_unchecked_t fscache_n_object_avail;
50924 +atomic_unchecked_t fscache_n_object_dead;
50925
50926 -atomic_t fscache_n_checkaux_none;
50927 -atomic_t fscache_n_checkaux_okay;
50928 -atomic_t fscache_n_checkaux_update;
50929 -atomic_t fscache_n_checkaux_obsolete;
50930 +atomic_unchecked_t fscache_n_checkaux_none;
50931 +atomic_unchecked_t fscache_n_checkaux_okay;
50932 +atomic_unchecked_t fscache_n_checkaux_update;
50933 +atomic_unchecked_t fscache_n_checkaux_obsolete;
50934
50935 atomic_t fscache_n_cop_alloc_object;
50936 atomic_t fscache_n_cop_lookup_object;
50937 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
50938 seq_puts(m, "FS-Cache statistics\n");
50939
50940 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
50941 - atomic_read(&fscache_n_cookie_index),
50942 - atomic_read(&fscache_n_cookie_data),
50943 - atomic_read(&fscache_n_cookie_special));
50944 + atomic_read_unchecked(&fscache_n_cookie_index),
50945 + atomic_read_unchecked(&fscache_n_cookie_data),
50946 + atomic_read_unchecked(&fscache_n_cookie_special));
50947
50948 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
50949 - atomic_read(&fscache_n_object_alloc),
50950 - atomic_read(&fscache_n_object_no_alloc),
50951 - atomic_read(&fscache_n_object_avail),
50952 - atomic_read(&fscache_n_object_dead));
50953 + atomic_read_unchecked(&fscache_n_object_alloc),
50954 + atomic_read_unchecked(&fscache_n_object_no_alloc),
50955 + atomic_read_unchecked(&fscache_n_object_avail),
50956 + atomic_read_unchecked(&fscache_n_object_dead));
50957 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
50958 - atomic_read(&fscache_n_checkaux_none),
50959 - atomic_read(&fscache_n_checkaux_okay),
50960 - atomic_read(&fscache_n_checkaux_update),
50961 - atomic_read(&fscache_n_checkaux_obsolete));
50962 + atomic_read_unchecked(&fscache_n_checkaux_none),
50963 + atomic_read_unchecked(&fscache_n_checkaux_okay),
50964 + atomic_read_unchecked(&fscache_n_checkaux_update),
50965 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
50966
50967 seq_printf(m, "Pages : mrk=%u unc=%u\n",
50968 - atomic_read(&fscache_n_marks),
50969 - atomic_read(&fscache_n_uncaches));
50970 + atomic_read_unchecked(&fscache_n_marks),
50971 + atomic_read_unchecked(&fscache_n_uncaches));
50972
50973 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
50974 " oom=%u\n",
50975 - atomic_read(&fscache_n_acquires),
50976 - atomic_read(&fscache_n_acquires_null),
50977 - atomic_read(&fscache_n_acquires_no_cache),
50978 - atomic_read(&fscache_n_acquires_ok),
50979 - atomic_read(&fscache_n_acquires_nobufs),
50980 - atomic_read(&fscache_n_acquires_oom));
50981 + atomic_read_unchecked(&fscache_n_acquires),
50982 + atomic_read_unchecked(&fscache_n_acquires_null),
50983 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
50984 + atomic_read_unchecked(&fscache_n_acquires_ok),
50985 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
50986 + atomic_read_unchecked(&fscache_n_acquires_oom));
50987
50988 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
50989 - atomic_read(&fscache_n_object_lookups),
50990 - atomic_read(&fscache_n_object_lookups_negative),
50991 - atomic_read(&fscache_n_object_lookups_positive),
50992 - atomic_read(&fscache_n_object_lookups_timed_out),
50993 - atomic_read(&fscache_n_object_created));
50994 + atomic_read_unchecked(&fscache_n_object_lookups),
50995 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
50996 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
50997 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
50998 + atomic_read_unchecked(&fscache_n_object_created));
50999
51000 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
51001 - atomic_read(&fscache_n_updates),
51002 - atomic_read(&fscache_n_updates_null),
51003 - atomic_read(&fscache_n_updates_run));
51004 + atomic_read_unchecked(&fscache_n_updates),
51005 + atomic_read_unchecked(&fscache_n_updates_null),
51006 + atomic_read_unchecked(&fscache_n_updates_run));
51007
51008 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
51009 - atomic_read(&fscache_n_relinquishes),
51010 - atomic_read(&fscache_n_relinquishes_null),
51011 - atomic_read(&fscache_n_relinquishes_waitcrt),
51012 - atomic_read(&fscache_n_relinquishes_retire));
51013 + atomic_read_unchecked(&fscache_n_relinquishes),
51014 + atomic_read_unchecked(&fscache_n_relinquishes_null),
51015 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
51016 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
51017
51018 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
51019 - atomic_read(&fscache_n_attr_changed),
51020 - atomic_read(&fscache_n_attr_changed_ok),
51021 - atomic_read(&fscache_n_attr_changed_nobufs),
51022 - atomic_read(&fscache_n_attr_changed_nomem),
51023 - atomic_read(&fscache_n_attr_changed_calls));
51024 + atomic_read_unchecked(&fscache_n_attr_changed),
51025 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
51026 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
51027 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
51028 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
51029
51030 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
51031 - atomic_read(&fscache_n_allocs),
51032 - atomic_read(&fscache_n_allocs_ok),
51033 - atomic_read(&fscache_n_allocs_wait),
51034 - atomic_read(&fscache_n_allocs_nobufs),
51035 - atomic_read(&fscache_n_allocs_intr));
51036 + atomic_read_unchecked(&fscache_n_allocs),
51037 + atomic_read_unchecked(&fscache_n_allocs_ok),
51038 + atomic_read_unchecked(&fscache_n_allocs_wait),
51039 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
51040 + atomic_read_unchecked(&fscache_n_allocs_intr));
51041 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
51042 - atomic_read(&fscache_n_alloc_ops),
51043 - atomic_read(&fscache_n_alloc_op_waits),
51044 - atomic_read(&fscache_n_allocs_object_dead));
51045 + atomic_read_unchecked(&fscache_n_alloc_ops),
51046 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
51047 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
51048
51049 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
51050 " int=%u oom=%u\n",
51051 - atomic_read(&fscache_n_retrievals),
51052 - atomic_read(&fscache_n_retrievals_ok),
51053 - atomic_read(&fscache_n_retrievals_wait),
51054 - atomic_read(&fscache_n_retrievals_nodata),
51055 - atomic_read(&fscache_n_retrievals_nobufs),
51056 - atomic_read(&fscache_n_retrievals_intr),
51057 - atomic_read(&fscache_n_retrievals_nomem));
51058 + atomic_read_unchecked(&fscache_n_retrievals),
51059 + atomic_read_unchecked(&fscache_n_retrievals_ok),
51060 + atomic_read_unchecked(&fscache_n_retrievals_wait),
51061 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
51062 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
51063 + atomic_read_unchecked(&fscache_n_retrievals_intr),
51064 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
51065 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
51066 - atomic_read(&fscache_n_retrieval_ops),
51067 - atomic_read(&fscache_n_retrieval_op_waits),
51068 - atomic_read(&fscache_n_retrievals_object_dead));
51069 + atomic_read_unchecked(&fscache_n_retrieval_ops),
51070 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
51071 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
51072
51073 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
51074 - atomic_read(&fscache_n_stores),
51075 - atomic_read(&fscache_n_stores_ok),
51076 - atomic_read(&fscache_n_stores_again),
51077 - atomic_read(&fscache_n_stores_nobufs),
51078 - atomic_read(&fscache_n_stores_oom));
51079 + atomic_read_unchecked(&fscache_n_stores),
51080 + atomic_read_unchecked(&fscache_n_stores_ok),
51081 + atomic_read_unchecked(&fscache_n_stores_again),
51082 + atomic_read_unchecked(&fscache_n_stores_nobufs),
51083 + atomic_read_unchecked(&fscache_n_stores_oom));
51084 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
51085 - atomic_read(&fscache_n_store_ops),
51086 - atomic_read(&fscache_n_store_calls),
51087 - atomic_read(&fscache_n_store_pages),
51088 - atomic_read(&fscache_n_store_radix_deletes),
51089 - atomic_read(&fscache_n_store_pages_over_limit));
51090 + atomic_read_unchecked(&fscache_n_store_ops),
51091 + atomic_read_unchecked(&fscache_n_store_calls),
51092 + atomic_read_unchecked(&fscache_n_store_pages),
51093 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
51094 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
51095
51096 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
51097 - atomic_read(&fscache_n_store_vmscan_not_storing),
51098 - atomic_read(&fscache_n_store_vmscan_gone),
51099 - atomic_read(&fscache_n_store_vmscan_busy),
51100 - atomic_read(&fscache_n_store_vmscan_cancelled));
51101 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
51102 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
51103 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
51104 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
51105
51106 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
51107 - atomic_read(&fscache_n_op_pend),
51108 - atomic_read(&fscache_n_op_run),
51109 - atomic_read(&fscache_n_op_enqueue),
51110 - atomic_read(&fscache_n_op_cancelled),
51111 - atomic_read(&fscache_n_op_rejected));
51112 + atomic_read_unchecked(&fscache_n_op_pend),
51113 + atomic_read_unchecked(&fscache_n_op_run),
51114 + atomic_read_unchecked(&fscache_n_op_enqueue),
51115 + atomic_read_unchecked(&fscache_n_op_cancelled),
51116 + atomic_read_unchecked(&fscache_n_op_rejected));
51117 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
51118 - atomic_read(&fscache_n_op_deferred_release),
51119 - atomic_read(&fscache_n_op_release),
51120 - atomic_read(&fscache_n_op_gc));
51121 + atomic_read_unchecked(&fscache_n_op_deferred_release),
51122 + atomic_read_unchecked(&fscache_n_op_release),
51123 + atomic_read_unchecked(&fscache_n_op_gc));
51124
51125 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
51126 atomic_read(&fscache_n_cop_alloc_object),
51127 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
51128 index de792dc..448b532 100644
51129 --- a/fs/fuse/cuse.c
51130 +++ b/fs/fuse/cuse.c
51131 @@ -576,10 +576,12 @@ static int __init cuse_init(void)
51132 INIT_LIST_HEAD(&cuse_conntbl[i]);
51133
51134 /* inherit and extend fuse_dev_operations */
51135 - cuse_channel_fops = fuse_dev_operations;
51136 - cuse_channel_fops.owner = THIS_MODULE;
51137 - cuse_channel_fops.open = cuse_channel_open;
51138 - cuse_channel_fops.release = cuse_channel_release;
51139 + pax_open_kernel();
51140 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
51141 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
51142 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
51143 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
51144 + pax_close_kernel();
51145
51146 cuse_class = class_create(THIS_MODULE, "cuse");
51147 if (IS_ERR(cuse_class))
51148 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
51149 index 1facb39..7f48557 100644
51150 --- a/fs/fuse/dev.c
51151 +++ b/fs/fuse/dev.c
51152 @@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51153 {
51154 struct fuse_notify_inval_entry_out outarg;
51155 int err = -EINVAL;
51156 - char buf[FUSE_NAME_MAX+1];
51157 + char *buf = NULL;
51158 struct qstr name;
51159
51160 if (size < sizeof(outarg))
51161 @@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51162 if (outarg.namelen > FUSE_NAME_MAX)
51163 goto err;
51164
51165 + err = -ENOMEM;
51166 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
51167 + if (!buf)
51168 + goto err;
51169 +
51170 err = -EINVAL;
51171 if (size != sizeof(outarg) + outarg.namelen + 1)
51172 goto err;
51173 @@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51174
51175 down_read(&fc->killsb);
51176 err = -ENOENT;
51177 - if (!fc->sb)
51178 - goto err_unlock;
51179 -
51180 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
51181 -
51182 -err_unlock:
51183 + if (fc->sb)
51184 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
51185 up_read(&fc->killsb);
51186 + kfree(buf);
51187 return err;
51188
51189 err:
51190 fuse_copy_finish(cs);
51191 + kfree(buf);
51192 return err;
51193 }
51194
51195 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
51196 index 4787ae6..73efff7 100644
51197 --- a/fs/fuse/dir.c
51198 +++ b/fs/fuse/dir.c
51199 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
51200 return link;
51201 }
51202
51203 -static void free_link(char *link)
51204 +static void free_link(const char *link)
51205 {
51206 if (!IS_ERR(link))
51207 free_page((unsigned long) link);
51208 diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
51209 index 247436c..e650ccb 100644
51210 --- a/fs/gfs2/ops_inode.c
51211 +++ b/fs/gfs2/ops_inode.c
51212 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
51213 unsigned int x;
51214 int error;
51215
51216 + pax_track_stack();
51217 +
51218 if (ndentry->d_inode) {
51219 nip = GFS2_I(ndentry->d_inode);
51220 if (ip == nip)
51221 diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
51222 index 4463297..4fed53b 100644
51223 --- a/fs/gfs2/sys.c
51224 +++ b/fs/gfs2/sys.c
51225 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
51226 return a->store ? a->store(sdp, buf, len) : len;
51227 }
51228
51229 -static struct sysfs_ops gfs2_attr_ops = {
51230 +static const struct sysfs_ops gfs2_attr_ops = {
51231 .show = gfs2_attr_show,
51232 .store = gfs2_attr_store,
51233 };
51234 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
51235 return 0;
51236 }
51237
51238 -static struct kset_uevent_ops gfs2_uevent_ops = {
51239 +static const struct kset_uevent_ops gfs2_uevent_ops = {
51240 .uevent = gfs2_uevent,
51241 };
51242
51243 diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
51244 index f6874ac..7cd98a8 100644
51245 --- a/fs/hfsplus/catalog.c
51246 +++ b/fs/hfsplus/catalog.c
51247 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
51248 int err;
51249 u16 type;
51250
51251 + pax_track_stack();
51252 +
51253 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
51254 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
51255 if (err)
51256 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
51257 int entry_size;
51258 int err;
51259
51260 + pax_track_stack();
51261 +
51262 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
51263 sb = dir->i_sb;
51264 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
51265 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
51266 int entry_size, type;
51267 int err = 0;
51268
51269 + pax_track_stack();
51270 +
51271 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
51272 dst_dir->i_ino, dst_name->name);
51273 sb = src_dir->i_sb;
51274 diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
51275 index 5f40236..dac3421 100644
51276 --- a/fs/hfsplus/dir.c
51277 +++ b/fs/hfsplus/dir.c
51278 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
51279 struct hfsplus_readdir_data *rd;
51280 u16 type;
51281
51282 + pax_track_stack();
51283 +
51284 if (filp->f_pos >= inode->i_size)
51285 return 0;
51286
51287 diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
51288 index 1bcf597..905a251 100644
51289 --- a/fs/hfsplus/inode.c
51290 +++ b/fs/hfsplus/inode.c
51291 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
51292 int res = 0;
51293 u16 type;
51294
51295 + pax_track_stack();
51296 +
51297 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
51298
51299 HFSPLUS_I(inode).dev = 0;
51300 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
51301 struct hfs_find_data fd;
51302 hfsplus_cat_entry entry;
51303
51304 + pax_track_stack();
51305 +
51306 if (HFSPLUS_IS_RSRC(inode))
51307 main_inode = HFSPLUS_I(inode).rsrc_inode;
51308
51309 diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
51310 index f457d2c..7ef4ad5 100644
51311 --- a/fs/hfsplus/ioctl.c
51312 +++ b/fs/hfsplus/ioctl.c
51313 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
51314 struct hfsplus_cat_file *file;
51315 int res;
51316
51317 + pax_track_stack();
51318 +
51319 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
51320 return -EOPNOTSUPP;
51321
51322 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
51323 struct hfsplus_cat_file *file;
51324 ssize_t res = 0;
51325
51326 + pax_track_stack();
51327 +
51328 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
51329 return -EOPNOTSUPP;
51330
51331 diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
51332 index 43022f3..7298079 100644
51333 --- a/fs/hfsplus/super.c
51334 +++ b/fs/hfsplus/super.c
51335 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
51336 struct nls_table *nls = NULL;
51337 int err = -EINVAL;
51338
51339 + pax_track_stack();
51340 +
51341 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
51342 if (!sbi)
51343 return -ENOMEM;
51344 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
51345 index 87a1258..5694d91 100644
51346 --- a/fs/hugetlbfs/inode.c
51347 +++ b/fs/hugetlbfs/inode.c
51348 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
51349 .kill_sb = kill_litter_super,
51350 };
51351
51352 -static struct vfsmount *hugetlbfs_vfsmount;
51353 +struct vfsmount *hugetlbfs_vfsmount;
51354
51355 static int can_do_hugetlb_shm(void)
51356 {
51357 diff --git a/fs/ioctl.c b/fs/ioctl.c
51358 index 6c75110..19d2c3c 100644
51359 --- a/fs/ioctl.c
51360 +++ b/fs/ioctl.c
51361 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
51362 u64 phys, u64 len, u32 flags)
51363 {
51364 struct fiemap_extent extent;
51365 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
51366 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
51367
51368 /* only count the extents */
51369 if (fieinfo->fi_extents_max == 0) {
51370 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51371
51372 fieinfo.fi_flags = fiemap.fm_flags;
51373 fieinfo.fi_extents_max = fiemap.fm_extent_count;
51374 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
51375 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
51376
51377 if (fiemap.fm_extent_count != 0 &&
51378 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
51379 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51380 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
51381 fiemap.fm_flags = fieinfo.fi_flags;
51382 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
51383 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
51384 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
51385 error = -EFAULT;
51386
51387 return error;
51388 diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
51389 index b0435dd..81ee0be 100644
51390 --- a/fs/jbd/checkpoint.c
51391 +++ b/fs/jbd/checkpoint.c
51392 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
51393 tid_t this_tid;
51394 int result;
51395
51396 + pax_track_stack();
51397 +
51398 jbd_debug(1, "Start checkpoint\n");
51399
51400 /*
51401 diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
51402 index 546d153..736896c 100644
51403 --- a/fs/jffs2/compr_rtime.c
51404 +++ b/fs/jffs2/compr_rtime.c
51405 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
51406 int outpos = 0;
51407 int pos=0;
51408
51409 + pax_track_stack();
51410 +
51411 memset(positions,0,sizeof(positions));
51412
51413 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
51414 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
51415 int outpos = 0;
51416 int pos=0;
51417
51418 + pax_track_stack();
51419 +
51420 memset(positions,0,sizeof(positions));
51421
51422 while (outpos<destlen) {
51423 diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
51424 index 170d289..3254b98 100644
51425 --- a/fs/jffs2/compr_rubin.c
51426 +++ b/fs/jffs2/compr_rubin.c
51427 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
51428 int ret;
51429 uint32_t mysrclen, mydstlen;
51430
51431 + pax_track_stack();
51432 +
51433 mysrclen = *sourcelen;
51434 mydstlen = *dstlen - 8;
51435
51436 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
51437 index b47679b..00d65d3 100644
51438 --- a/fs/jffs2/erase.c
51439 +++ b/fs/jffs2/erase.c
51440 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
51441 struct jffs2_unknown_node marker = {
51442 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
51443 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51444 - .totlen = cpu_to_je32(c->cleanmarker_size)
51445 + .totlen = cpu_to_je32(c->cleanmarker_size),
51446 + .hdr_crc = cpu_to_je32(0)
51447 };
51448
51449 jffs2_prealloc_raw_node_refs(c, jeb, 1);
51450 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
51451 index 5ef7bac..4fd1e3c 100644
51452 --- a/fs/jffs2/wbuf.c
51453 +++ b/fs/jffs2/wbuf.c
51454 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
51455 {
51456 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
51457 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51458 - .totlen = constant_cpu_to_je32(8)
51459 + .totlen = constant_cpu_to_je32(8),
51460 + .hdr_crc = constant_cpu_to_je32(0)
51461 };
51462
51463 /*
51464 diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
51465 index 082e844..52012a1 100644
51466 --- a/fs/jffs2/xattr.c
51467 +++ b/fs/jffs2/xattr.c
51468 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
51469
51470 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
51471
51472 + pax_track_stack();
51473 +
51474 /* Phase.1 : Merge same xref */
51475 for (i=0; i < XREF_TMPHASH_SIZE; i++)
51476 xref_tmphash[i] = NULL;
51477 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
51478 index 2234c73..f6e6e6b 100644
51479 --- a/fs/jfs/super.c
51480 +++ b/fs/jfs/super.c
51481 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
51482
51483 jfs_inode_cachep =
51484 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
51485 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
51486 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
51487 init_once);
51488 if (jfs_inode_cachep == NULL)
51489 return -ENOMEM;
51490 diff --git a/fs/libfs.c b/fs/libfs.c
51491 index ba36e93..3153fce 100644
51492 --- a/fs/libfs.c
51493 +++ b/fs/libfs.c
51494 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
51495
51496 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
51497 struct dentry *next;
51498 + char d_name[sizeof(next->d_iname)];
51499 + const unsigned char *name;
51500 +
51501 next = list_entry(p, struct dentry, d_u.d_child);
51502 if (d_unhashed(next) || !next->d_inode)
51503 continue;
51504
51505 spin_unlock(&dcache_lock);
51506 - if (filldir(dirent, next->d_name.name,
51507 + name = next->d_name.name;
51508 + if (name == next->d_iname) {
51509 + memcpy(d_name, name, next->d_name.len);
51510 + name = d_name;
51511 + }
51512 + if (filldir(dirent, name,
51513 next->d_name.len, filp->f_pos,
51514 next->d_inode->i_ino,
51515 dt_type(next->d_inode)) < 0)
51516 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
51517 index c325a83..d15b07b 100644
51518 --- a/fs/lockd/clntproc.c
51519 +++ b/fs/lockd/clntproc.c
51520 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
51521 /*
51522 * Cookie counter for NLM requests
51523 */
51524 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
51525 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
51526
51527 void nlmclnt_next_cookie(struct nlm_cookie *c)
51528 {
51529 - u32 cookie = atomic_inc_return(&nlm_cookie);
51530 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
51531
51532 memcpy(c->data, &cookie, 4);
51533 c->len=4;
51534 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
51535 struct nlm_rqst reqst, *req;
51536 int status;
51537
51538 + pax_track_stack();
51539 +
51540 req = &reqst;
51541 memset(req, 0, sizeof(*req));
51542 locks_init_lock(&req->a_args.lock.fl);
51543 diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
51544 index 1a54ae1..6a16c27 100644
51545 --- a/fs/lockd/svc.c
51546 +++ b/fs/lockd/svc.c
51547 @@ -43,7 +43,7 @@
51548
51549 static struct svc_program nlmsvc_program;
51550
51551 -struct nlmsvc_binding * nlmsvc_ops;
51552 +const struct nlmsvc_binding * nlmsvc_ops;
51553 EXPORT_SYMBOL_GPL(nlmsvc_ops);
51554
51555 static DEFINE_MUTEX(nlmsvc_mutex);
51556 diff --git a/fs/locks.c b/fs/locks.c
51557 index a8794f2..4041e55 100644
51558 --- a/fs/locks.c
51559 +++ b/fs/locks.c
51560 @@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
51561
51562 static struct kmem_cache *filelock_cache __read_mostly;
51563
51564 +static void locks_init_lock_always(struct file_lock *fl)
51565 +{
51566 + fl->fl_next = NULL;
51567 + fl->fl_fasync = NULL;
51568 + fl->fl_owner = NULL;
51569 + fl->fl_pid = 0;
51570 + fl->fl_nspid = NULL;
51571 + fl->fl_file = NULL;
51572 + fl->fl_flags = 0;
51573 + fl->fl_type = 0;
51574 + fl->fl_start = fl->fl_end = 0;
51575 +}
51576 +
51577 /* Allocate an empty lock structure. */
51578 static struct file_lock *locks_alloc_lock(void)
51579 {
51580 - return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51581 + struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51582 +
51583 + if (fl)
51584 + locks_init_lock_always(fl);
51585 +
51586 + return fl;
51587 }
51588
51589 void locks_release_private(struct file_lock *fl)
51590 @@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
51591 INIT_LIST_HEAD(&fl->fl_link);
51592 INIT_LIST_HEAD(&fl->fl_block);
51593 init_waitqueue_head(&fl->fl_wait);
51594 - fl->fl_next = NULL;
51595 - fl->fl_fasync = NULL;
51596 - fl->fl_owner = NULL;
51597 - fl->fl_pid = 0;
51598 - fl->fl_nspid = NULL;
51599 - fl->fl_file = NULL;
51600 - fl->fl_flags = 0;
51601 - fl->fl_type = 0;
51602 - fl->fl_start = fl->fl_end = 0;
51603 fl->fl_ops = NULL;
51604 fl->fl_lmops = NULL;
51605 + locks_init_lock_always(fl);
51606 }
51607
51608 EXPORT_SYMBOL(locks_init_lock);
51609 @@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
51610 return;
51611
51612 if (filp->f_op && filp->f_op->flock) {
51613 - struct file_lock fl = {
51614 + struct file_lock flock = {
51615 .fl_pid = current->tgid,
51616 .fl_file = filp,
51617 .fl_flags = FL_FLOCK,
51618 .fl_type = F_UNLCK,
51619 .fl_end = OFFSET_MAX,
51620 };
51621 - filp->f_op->flock(filp, F_SETLKW, &fl);
51622 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
51623 - fl.fl_ops->fl_release_private(&fl);
51624 + filp->f_op->flock(filp, F_SETLKW, &flock);
51625 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
51626 + flock.fl_ops->fl_release_private(&flock);
51627 }
51628
51629 lock_kernel();
51630 diff --git a/fs/mbcache.c b/fs/mbcache.c
51631 index ec88ff3..b843a82 100644
51632 --- a/fs/mbcache.c
51633 +++ b/fs/mbcache.c
51634 @@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
51635 if (!cache)
51636 goto fail;
51637 cache->c_name = name;
51638 - cache->c_op.free = NULL;
51639 + *(void **)&cache->c_op.free = NULL;
51640 if (cache_op)
51641 - cache->c_op.free = cache_op->free;
51642 + *(void **)&cache->c_op.free = cache_op->free;
51643 atomic_set(&cache->c_entry_count, 0);
51644 cache->c_bucket_bits = bucket_bits;
51645 #ifdef MB_CACHE_INDEXES_COUNT
51646 diff --git a/fs/namei.c b/fs/namei.c
51647 index b0afbd4..8d065a1 100644
51648 --- a/fs/namei.c
51649 +++ b/fs/namei.c
51650 @@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
51651 return ret;
51652
51653 /*
51654 + * Searching includes executable on directories, else just read.
51655 + */
51656 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51657 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51658 + if (capable(CAP_DAC_READ_SEARCH))
51659 + return 0;
51660 +
51661 + /*
51662 * Read/write DACs are always overridable.
51663 * Executable DACs are overridable if at least one exec bit is set.
51664 */
51665 @@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask,
51666 if (capable(CAP_DAC_OVERRIDE))
51667 return 0;
51668
51669 - /*
51670 - * Searching includes executable on directories, else just read.
51671 - */
51672 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51673 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51674 - if (capable(CAP_DAC_READ_SEARCH))
51675 - return 0;
51676 -
51677 return -EACCES;
51678 }
51679
51680 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
51681 if (!ret)
51682 goto ok;
51683
51684 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
51685 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
51686 + capable(CAP_DAC_OVERRIDE))
51687 goto ok;
51688
51689 return ret;
51690 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
51691 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
51692 error = PTR_ERR(cookie);
51693 if (!IS_ERR(cookie)) {
51694 - char *s = nd_get_link(nd);
51695 + const char *s = nd_get_link(nd);
51696 error = 0;
51697 if (s)
51698 error = __vfs_follow_link(nd, s);
51699 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
51700 err = security_inode_follow_link(path->dentry, nd);
51701 if (err)
51702 goto loop;
51703 +
51704 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
51705 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
51706 + err = -EACCES;
51707 + goto loop;
51708 + }
51709 +
51710 current->link_count++;
51711 current->total_link_count++;
51712 nd->depth++;
51713 @@ -1016,11 +1024,19 @@ return_reval:
51714 break;
51715 }
51716 return_base:
51717 + if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
51718 + !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
51719 + path_put(&nd->path);
51720 + return -ENOENT;
51721 + }
51722 return 0;
51723 out_dput:
51724 path_put_conditional(&next, nd);
51725 break;
51726 }
51727 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
51728 + err = -ENOENT;
51729 +
51730 path_put(&nd->path);
51731 return_err:
51732 return err;
51733 @@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
51734 int retval = path_init(dfd, name, flags, nd);
51735 if (!retval)
51736 retval = path_walk(name, nd);
51737 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
51738 - nd->path.dentry->d_inode))
51739 - audit_inode(name, nd->path.dentry);
51740 +
51741 + if (likely(!retval)) {
51742 + if (nd->path.dentry && nd->path.dentry->d_inode) {
51743 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
51744 + retval = -ENOENT;
51745 + if (!audit_dummy_context())
51746 + audit_inode(name, nd->path.dentry);
51747 + }
51748 + }
51749 if (nd->root.mnt) {
51750 path_put(&nd->root);
51751 nd->root.mnt = NULL;
51752 }
51753 +
51754 return retval;
51755 }
51756
51757 @@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
51758 if (error)
51759 goto err_out;
51760
51761 +
51762 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
51763 + error = -EPERM;
51764 + goto err_out;
51765 + }
51766 + if (gr_handle_rawio(inode)) {
51767 + error = -EPERM;
51768 + goto err_out;
51769 + }
51770 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
51771 + error = -EACCES;
51772 + goto err_out;
51773 + }
51774 +
51775 if (flag & O_TRUNC) {
51776 error = get_write_access(inode);
51777 if (error)
51778 @@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51779 {
51780 int error;
51781 struct dentry *dir = nd->path.dentry;
51782 + int acc_mode = ACC_MODE(flag);
51783 +
51784 + if (flag & O_TRUNC)
51785 + acc_mode |= MAY_WRITE;
51786 + if (flag & O_APPEND)
51787 + acc_mode |= MAY_APPEND;
51788 +
51789 + if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
51790 + error = -EACCES;
51791 + goto out_unlock;
51792 + }
51793
51794 if (!IS_POSIXACL(dir->d_inode))
51795 mode &= ~current_umask();
51796 @@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51797 if (error)
51798 goto out_unlock;
51799 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
51800 + if (!error)
51801 + gr_handle_create(path->dentry, nd->path.mnt);
51802 out_unlock:
51803 mutex_unlock(&dir->d_inode->i_mutex);
51804 dput(nd->path.dentry);
51805 @@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
51806 &nd, flag);
51807 if (error)
51808 return ERR_PTR(error);
51809 +
51810 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
51811 + error = -EPERM;
51812 + goto exit;
51813 + }
51814 +
51815 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
51816 + error = -EPERM;
51817 + goto exit;
51818 + }
51819 +
51820 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
51821 + error = -EACCES;
51822 + goto exit;
51823 + }
51824 +
51825 goto ok;
51826 }
51827
51828 @@ -1795,6 +1861,19 @@ do_last:
51829 /*
51830 * It already exists.
51831 */
51832 +
51833 + if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
51834 + error = -ENOENT;
51835 + goto exit_mutex_unlock;
51836 + }
51837 +
51838 + /* only check if O_CREAT is specified, all other checks need
51839 + to go into may_open */
51840 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
51841 + error = -EACCES;
51842 + goto exit_mutex_unlock;
51843 + }
51844 +
51845 mutex_unlock(&dir->d_inode->i_mutex);
51846 audit_inode(pathname, path.dentry);
51847
51848 @@ -1887,6 +1966,13 @@ do_link:
51849 error = security_inode_follow_link(path.dentry, &nd);
51850 if (error)
51851 goto exit_dput;
51852 +
51853 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
51854 + path.dentry, nd.path.mnt)) {
51855 + error = -EACCES;
51856 + goto exit_dput;
51857 + }
51858 +
51859 error = __do_follow_link(&path, &nd);
51860 if (error) {
51861 /* Does someone understand code flow here? Or it is only
51862 @@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
51863 }
51864 return dentry;
51865 eexist:
51866 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
51867 + dput(dentry);
51868 + return ERR_PTR(-ENOENT);
51869 + }
51870 dput(dentry);
51871 dentry = ERR_PTR(-EEXIST);
51872 fail:
51873 @@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51874 error = may_mknod(mode);
51875 if (error)
51876 goto out_dput;
51877 +
51878 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
51879 + error = -EPERM;
51880 + goto out_dput;
51881 + }
51882 +
51883 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
51884 + error = -EACCES;
51885 + goto out_dput;
51886 + }
51887 +
51888 error = mnt_want_write(nd.path.mnt);
51889 if (error)
51890 goto out_dput;
51891 @@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51892 }
51893 out_drop_write:
51894 mnt_drop_write(nd.path.mnt);
51895 +
51896 + if (!error)
51897 + gr_handle_create(dentry, nd.path.mnt);
51898 out_dput:
51899 dput(dentry);
51900 out_unlock:
51901 @@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51902 if (IS_ERR(dentry))
51903 goto out_unlock;
51904
51905 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
51906 + error = -EACCES;
51907 + goto out_dput;
51908 + }
51909 +
51910 if (!IS_POSIXACL(nd.path.dentry->d_inode))
51911 mode &= ~current_umask();
51912 error = mnt_want_write(nd.path.mnt);
51913 @@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51914 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
51915 out_drop_write:
51916 mnt_drop_write(nd.path.mnt);
51917 +
51918 + if (!error)
51919 + gr_handle_create(dentry, nd.path.mnt);
51920 +
51921 out_dput:
51922 dput(dentry);
51923 out_unlock:
51924 @@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51925 char * name;
51926 struct dentry *dentry;
51927 struct nameidata nd;
51928 + ino_t saved_ino = 0;
51929 + dev_t saved_dev = 0;
51930
51931 error = user_path_parent(dfd, pathname, &nd, &name);
51932 if (error)
51933 @@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
51934 error = PTR_ERR(dentry);
51935 if (IS_ERR(dentry))
51936 goto exit2;
51937 +
51938 + if (dentry->d_inode != NULL) {
51939 + saved_ino = dentry->d_inode->i_ino;
51940 + saved_dev = gr_get_dev_from_dentry(dentry);
51941 +
51942 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
51943 + error = -EACCES;
51944 + goto exit3;
51945 + }
51946 + }
51947 +
51948 error = mnt_want_write(nd.path.mnt);
51949 if (error)
51950 goto exit3;
51951 @@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51952 if (error)
51953 goto exit4;
51954 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
51955 + if (!error && (saved_dev || saved_ino))
51956 + gr_handle_delete(saved_ino, saved_dev);
51957 exit4:
51958 mnt_drop_write(nd.path.mnt);
51959 exit3:
51960 @@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51961 struct dentry *dentry;
51962 struct nameidata nd;
51963 struct inode *inode = NULL;
51964 + ino_t saved_ino = 0;
51965 + dev_t saved_dev = 0;
51966
51967 error = user_path_parent(dfd, pathname, &nd, &name);
51968 if (error)
51969 @@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51970 if (nd.last.name[nd.last.len])
51971 goto slashes;
51972 inode = dentry->d_inode;
51973 - if (inode)
51974 + if (inode) {
51975 + if (inode->i_nlink <= 1) {
51976 + saved_ino = inode->i_ino;
51977 + saved_dev = gr_get_dev_from_dentry(dentry);
51978 + }
51979 +
51980 atomic_inc(&inode->i_count);
51981 +
51982 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
51983 + error = -EACCES;
51984 + goto exit2;
51985 + }
51986 + }
51987 error = mnt_want_write(nd.path.mnt);
51988 if (error)
51989 goto exit2;
51990 @@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51991 if (error)
51992 goto exit3;
51993 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
51994 + if (!error && (saved_ino || saved_dev))
51995 + gr_handle_delete(saved_ino, saved_dev);
51996 exit3:
51997 mnt_drop_write(nd.path.mnt);
51998 exit2:
51999 @@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
52000 if (IS_ERR(dentry))
52001 goto out_unlock;
52002
52003 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
52004 + error = -EACCES;
52005 + goto out_dput;
52006 + }
52007 +
52008 error = mnt_want_write(nd.path.mnt);
52009 if (error)
52010 goto out_dput;
52011 @@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
52012 if (error)
52013 goto out_drop_write;
52014 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
52015 + if (!error)
52016 + gr_handle_create(dentry, nd.path.mnt);
52017 out_drop_write:
52018 mnt_drop_write(nd.path.mnt);
52019 out_dput:
52020 @@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
52021 error = PTR_ERR(new_dentry);
52022 if (IS_ERR(new_dentry))
52023 goto out_unlock;
52024 +
52025 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
52026 + old_path.dentry->d_inode,
52027 + old_path.dentry->d_inode->i_mode, to)) {
52028 + error = -EACCES;
52029 + goto out_dput;
52030 + }
52031 +
52032 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
52033 + old_path.dentry, old_path.mnt, to)) {
52034 + error = -EACCES;
52035 + goto out_dput;
52036 + }
52037 +
52038 error = mnt_want_write(nd.path.mnt);
52039 if (error)
52040 goto out_dput;
52041 @@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
52042 if (error)
52043 goto out_drop_write;
52044 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
52045 + if (!error)
52046 + gr_handle_create(new_dentry, nd.path.mnt);
52047 out_drop_write:
52048 mnt_drop_write(nd.path.mnt);
52049 out_dput:
52050 @@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52051 char *to;
52052 int error;
52053
52054 + pax_track_stack();
52055 +
52056 error = user_path_parent(olddfd, oldname, &oldnd, &from);
52057 if (error)
52058 goto exit;
52059 @@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52060 if (new_dentry == trap)
52061 goto exit5;
52062
52063 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
52064 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
52065 + to);
52066 + if (error)
52067 + goto exit5;
52068 +
52069 error = mnt_want_write(oldnd.path.mnt);
52070 if (error)
52071 goto exit5;
52072 @@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52073 goto exit6;
52074 error = vfs_rename(old_dir->d_inode, old_dentry,
52075 new_dir->d_inode, new_dentry);
52076 + if (!error)
52077 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
52078 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
52079 exit6:
52080 mnt_drop_write(oldnd.path.mnt);
52081 exit5:
52082 @@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
52083
52084 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
52085 {
52086 + char tmpbuf[64];
52087 + const char *newlink;
52088 int len;
52089
52090 len = PTR_ERR(link);
52091 @@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
52092 len = strlen(link);
52093 if (len > (unsigned) buflen)
52094 len = buflen;
52095 - if (copy_to_user(buffer, link, len))
52096 +
52097 + if (len < sizeof(tmpbuf)) {
52098 + memcpy(tmpbuf, link, len);
52099 + newlink = tmpbuf;
52100 + } else
52101 + newlink = link;
52102 +
52103 + if (copy_to_user(buffer, newlink, len))
52104 len = -EFAULT;
52105 out:
52106 return len;
52107 diff --git a/fs/namespace.c b/fs/namespace.c
52108 index 2beb0fb..11a95a5 100644
52109 --- a/fs/namespace.c
52110 +++ b/fs/namespace.c
52111 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
52112 if (!(sb->s_flags & MS_RDONLY))
52113 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
52114 up_write(&sb->s_umount);
52115 +
52116 + gr_log_remount(mnt->mnt_devname, retval);
52117 +
52118 return retval;
52119 }
52120
52121 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
52122 security_sb_umount_busy(mnt);
52123 up_write(&namespace_sem);
52124 release_mounts(&umount_list);
52125 +
52126 + gr_log_unmount(mnt->mnt_devname, retval);
52127 +
52128 return retval;
52129 }
52130
52131 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
52132 if (retval)
52133 goto dput_out;
52134
52135 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
52136 + retval = -EPERM;
52137 + goto dput_out;
52138 + }
52139 +
52140 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
52141 + retval = -EPERM;
52142 + goto dput_out;
52143 + }
52144 +
52145 if (flags & MS_REMOUNT)
52146 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
52147 data_page);
52148 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
52149 dev_name, data_page);
52150 dput_out:
52151 path_put(&path);
52152 +
52153 + gr_log_mount(dev_name, dir_name, retval);
52154 +
52155 return retval;
52156 }
52157
52158 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
52159 goto out1;
52160 }
52161
52162 + if (gr_handle_chroot_pivot()) {
52163 + error = -EPERM;
52164 + path_put(&old);
52165 + goto out1;
52166 + }
52167 +
52168 read_lock(&current->fs->lock);
52169 root = current->fs->root;
52170 path_get(&current->fs->root);
52171 diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
52172 index b8b5b30..2bd9ccb 100644
52173 --- a/fs/ncpfs/dir.c
52174 +++ b/fs/ncpfs/dir.c
52175 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
52176 int res, val = 0, len;
52177 __u8 __name[NCP_MAXPATHLEN + 1];
52178
52179 + pax_track_stack();
52180 +
52181 parent = dget_parent(dentry);
52182 dir = parent->d_inode;
52183
52184 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
52185 int error, res, len;
52186 __u8 __name[NCP_MAXPATHLEN + 1];
52187
52188 + pax_track_stack();
52189 +
52190 lock_kernel();
52191 error = -EIO;
52192 if (!ncp_conn_valid(server))
52193 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
52194 int error, result, len;
52195 int opmode;
52196 __u8 __name[NCP_MAXPATHLEN + 1];
52197 -
52198 +
52199 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
52200 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
52201
52202 + pax_track_stack();
52203 +
52204 error = -EIO;
52205 lock_kernel();
52206 if (!ncp_conn_valid(server))
52207 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
52208 int error, len;
52209 __u8 __name[NCP_MAXPATHLEN + 1];
52210
52211 + pax_track_stack();
52212 +
52213 DPRINTK("ncp_mkdir: making %s/%s\n",
52214 dentry->d_parent->d_name.name, dentry->d_name.name);
52215
52216 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
52217 if (!ncp_conn_valid(server))
52218 goto out;
52219
52220 + pax_track_stack();
52221 +
52222 ncp_age_dentry(server, dentry);
52223 len = sizeof(__name);
52224 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
52225 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
52226 int old_len, new_len;
52227 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
52228
52229 + pax_track_stack();
52230 +
52231 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
52232 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
52233 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
52234 diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
52235 index cf98da1..da890a9 100644
52236 --- a/fs/ncpfs/inode.c
52237 +++ b/fs/ncpfs/inode.c
52238 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
52239 #endif
52240 struct ncp_entry_info finfo;
52241
52242 + pax_track_stack();
52243 +
52244 data.wdog_pid = NULL;
52245 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
52246 if (!server)
52247 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
52248 index bfaef7b..e9d03ca 100644
52249 --- a/fs/nfs/inode.c
52250 +++ b/fs/nfs/inode.c
52251 @@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
52252 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
52253 nfsi->attrtimeo_timestamp = jiffies;
52254
52255 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
52256 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
52257 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
52258 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
52259 else
52260 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
52261 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
52262 }
52263
52264 -static atomic_long_t nfs_attr_generation_counter;
52265 +static atomic_long_unchecked_t nfs_attr_generation_counter;
52266
52267 static unsigned long nfs_read_attr_generation_counter(void)
52268 {
52269 - return atomic_long_read(&nfs_attr_generation_counter);
52270 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
52271 }
52272
52273 unsigned long nfs_inc_attr_generation_counter(void)
52274 {
52275 - return atomic_long_inc_return(&nfs_attr_generation_counter);
52276 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
52277 }
52278
52279 void nfs_fattr_init(struct nfs_fattr *fattr)
52280 diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
52281 index cc2f505..f6a236f 100644
52282 --- a/fs/nfsd/lockd.c
52283 +++ b/fs/nfsd/lockd.c
52284 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
52285 fput(filp);
52286 }
52287
52288 -static struct nlmsvc_binding nfsd_nlm_ops = {
52289 +static const struct nlmsvc_binding nfsd_nlm_ops = {
52290 .fopen = nlm_fopen, /* open file for locking */
52291 .fclose = nlm_fclose, /* close file */
52292 };
52293 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
52294 index cfc3391..dcc083a 100644
52295 --- a/fs/nfsd/nfs4state.c
52296 +++ b/fs/nfsd/nfs4state.c
52297 @@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
52298 unsigned int cmd;
52299 int err;
52300
52301 + pax_track_stack();
52302 +
52303 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
52304 (long long) lock->lk_offset,
52305 (long long) lock->lk_length);
52306 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
52307 index 4a82a96..0d5fb49 100644
52308 --- a/fs/nfsd/nfs4xdr.c
52309 +++ b/fs/nfsd/nfs4xdr.c
52310 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
52311 struct nfsd4_compoundres *resp = rqstp->rq_resp;
52312 u32 minorversion = resp->cstate.minorversion;
52313
52314 + pax_track_stack();
52315 +
52316 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
52317 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
52318 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
52319 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
52320 index 2e09588..596421d 100644
52321 --- a/fs/nfsd/vfs.c
52322 +++ b/fs/nfsd/vfs.c
52323 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
52324 } else {
52325 oldfs = get_fs();
52326 set_fs(KERNEL_DS);
52327 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
52328 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
52329 set_fs(oldfs);
52330 }
52331
52332 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
52333
52334 /* Write the data. */
52335 oldfs = get_fs(); set_fs(KERNEL_DS);
52336 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
52337 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
52338 set_fs(oldfs);
52339 if (host_err < 0)
52340 goto out_nfserr;
52341 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
52342 */
52343
52344 oldfs = get_fs(); set_fs(KERNEL_DS);
52345 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
52346 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
52347 set_fs(oldfs);
52348
52349 if (host_err < 0)
52350 diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
52351 index f6af760..d0adf34 100644
52352 --- a/fs/nilfs2/ioctl.c
52353 +++ b/fs/nilfs2/ioctl.c
52354 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52355 unsigned int cmd, void __user *argp)
52356 {
52357 struct nilfs_argv argv[5];
52358 - const static size_t argsz[5] = {
52359 + static const size_t argsz[5] = {
52360 sizeof(struct nilfs_vdesc),
52361 sizeof(struct nilfs_period),
52362 sizeof(__u64),
52363 @@ -522,6 +522,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52364 if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
52365 goto out_free;
52366
52367 + if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
52368 + goto out_free;
52369 +
52370 len = argv[n].v_size * argv[n].v_nmembs;
52371 base = (void __user *)(unsigned long)argv[n].v_base;
52372 if (len == 0) {
52373 diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
52374 index 7e54e52..9337248 100644
52375 --- a/fs/notify/dnotify/dnotify.c
52376 +++ b/fs/notify/dnotify/dnotify.c
52377 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
52378 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
52379 }
52380
52381 -static struct fsnotify_ops dnotify_fsnotify_ops = {
52382 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
52383 .handle_event = dnotify_handle_event,
52384 .should_send_event = dnotify_should_send_event,
52385 .free_group_priv = NULL,
52386 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
52387 index b8bf53b..c518688 100644
52388 --- a/fs/notify/notification.c
52389 +++ b/fs/notify/notification.c
52390 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
52391 * get set to 0 so it will never get 'freed'
52392 */
52393 static struct fsnotify_event q_overflow_event;
52394 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52395 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52396
52397 /**
52398 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
52399 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52400 */
52401 u32 fsnotify_get_cookie(void)
52402 {
52403 - return atomic_inc_return(&fsnotify_sync_cookie);
52404 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
52405 }
52406 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
52407
52408 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
52409 index 5a9e344..0f8cd28 100644
52410 --- a/fs/ntfs/dir.c
52411 +++ b/fs/ntfs/dir.c
52412 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
52413 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
52414 ~(s64)(ndir->itype.index.block_size - 1)));
52415 /* Bounds checks. */
52416 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52417 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52418 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
52419 "inode 0x%lx or driver bug.", vdir->i_ino);
52420 goto err_out;
52421 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
52422 index 663c0e3..b6868e9 100644
52423 --- a/fs/ntfs/file.c
52424 +++ b/fs/ntfs/file.c
52425 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
52426 #endif /* NTFS_RW */
52427 };
52428
52429 -const struct file_operations ntfs_empty_file_ops = {};
52430 +const struct file_operations ntfs_empty_file_ops __read_only;
52431
52432 -const struct inode_operations ntfs_empty_inode_ops = {};
52433 +const struct inode_operations ntfs_empty_inode_ops __read_only;
52434 diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
52435 index 1cd2934..880b5d2 100644
52436 --- a/fs/ocfs2/cluster/masklog.c
52437 +++ b/fs/ocfs2/cluster/masklog.c
52438 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
52439 return mlog_mask_store(mlog_attr->mask, buf, count);
52440 }
52441
52442 -static struct sysfs_ops mlog_attr_ops = {
52443 +static const struct sysfs_ops mlog_attr_ops = {
52444 .show = mlog_show,
52445 .store = mlog_store,
52446 };
52447 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
52448 index ac10f83..2cd2607 100644
52449 --- a/fs/ocfs2/localalloc.c
52450 +++ b/fs/ocfs2/localalloc.c
52451 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
52452 goto bail;
52453 }
52454
52455 - atomic_inc(&osb->alloc_stats.moves);
52456 + atomic_inc_unchecked(&osb->alloc_stats.moves);
52457
52458 status = 0;
52459 bail:
52460 diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
52461 index f010b22..9f9ed34 100644
52462 --- a/fs/ocfs2/namei.c
52463 +++ b/fs/ocfs2/namei.c
52464 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
52465 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
52466 struct ocfs2_dir_lookup_result target_insert = { NULL, };
52467
52468 + pax_track_stack();
52469 +
52470 /* At some point it might be nice to break this function up a
52471 * bit. */
52472
52473 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
52474 index d963d86..914cfbd 100644
52475 --- a/fs/ocfs2/ocfs2.h
52476 +++ b/fs/ocfs2/ocfs2.h
52477 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
52478
52479 struct ocfs2_alloc_stats
52480 {
52481 - atomic_t moves;
52482 - atomic_t local_data;
52483 - atomic_t bitmap_data;
52484 - atomic_t bg_allocs;
52485 - atomic_t bg_extends;
52486 + atomic_unchecked_t moves;
52487 + atomic_unchecked_t local_data;
52488 + atomic_unchecked_t bitmap_data;
52489 + atomic_unchecked_t bg_allocs;
52490 + atomic_unchecked_t bg_extends;
52491 };
52492
52493 enum ocfs2_local_alloc_state
52494 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
52495 index 79b5dac..d322952 100644
52496 --- a/fs/ocfs2/suballoc.c
52497 +++ b/fs/ocfs2/suballoc.c
52498 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
52499 mlog_errno(status);
52500 goto bail;
52501 }
52502 - atomic_inc(&osb->alloc_stats.bg_extends);
52503 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
52504
52505 /* You should never ask for this much metadata */
52506 BUG_ON(bits_wanted >
52507 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
52508 mlog_errno(status);
52509 goto bail;
52510 }
52511 - atomic_inc(&osb->alloc_stats.bg_allocs);
52512 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52513
52514 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
52515 ac->ac_bits_given += (*num_bits);
52516 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
52517 mlog_errno(status);
52518 goto bail;
52519 }
52520 - atomic_inc(&osb->alloc_stats.bg_allocs);
52521 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52522
52523 BUG_ON(num_bits != 1);
52524
52525 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52526 cluster_start,
52527 num_clusters);
52528 if (!status)
52529 - atomic_inc(&osb->alloc_stats.local_data);
52530 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
52531 } else {
52532 if (min_clusters > (osb->bitmap_cpg - 1)) {
52533 /* The only paths asking for contiguousness
52534 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52535 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
52536 bg_blkno,
52537 bg_bit_off);
52538 - atomic_inc(&osb->alloc_stats.bitmap_data);
52539 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
52540 }
52541 }
52542 if (status < 0) {
52543 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
52544 index 9f55be4..a3f8048 100644
52545 --- a/fs/ocfs2/super.c
52546 +++ b/fs/ocfs2/super.c
52547 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
52548 "%10s => GlobalAllocs: %d LocalAllocs: %d "
52549 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
52550 "Stats",
52551 - atomic_read(&osb->alloc_stats.bitmap_data),
52552 - atomic_read(&osb->alloc_stats.local_data),
52553 - atomic_read(&osb->alloc_stats.bg_allocs),
52554 - atomic_read(&osb->alloc_stats.moves),
52555 - atomic_read(&osb->alloc_stats.bg_extends));
52556 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
52557 + atomic_read_unchecked(&osb->alloc_stats.local_data),
52558 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
52559 + atomic_read_unchecked(&osb->alloc_stats.moves),
52560 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
52561
52562 out += snprintf(buf + out, len - out,
52563 "%10s => State: %u Descriptor: %llu Size: %u bits "
52564 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
52565 spin_lock_init(&osb->osb_xattr_lock);
52566 ocfs2_init_inode_steal_slot(osb);
52567
52568 - atomic_set(&osb->alloc_stats.moves, 0);
52569 - atomic_set(&osb->alloc_stats.local_data, 0);
52570 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
52571 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
52572 - atomic_set(&osb->alloc_stats.bg_extends, 0);
52573 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
52574 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
52575 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
52576 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
52577 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
52578
52579 /* Copy the blockcheck stats from the superblock probe */
52580 osb->osb_ecc_stats = *stats;
52581 diff --git a/fs/open.c b/fs/open.c
52582 index 4f01e06..2a8057a 100644
52583 --- a/fs/open.c
52584 +++ b/fs/open.c
52585 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
52586 error = locks_verify_truncate(inode, NULL, length);
52587 if (!error)
52588 error = security_path_truncate(&path, length, 0);
52589 +
52590 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
52591 + error = -EACCES;
52592 +
52593 if (!error) {
52594 vfs_dq_init(inode);
52595 error = do_truncate(path.dentry, length, 0, NULL);
52596 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
52597 if (__mnt_is_readonly(path.mnt))
52598 res = -EROFS;
52599
52600 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
52601 + res = -EACCES;
52602 +
52603 out_path_release:
52604 path_put(&path);
52605 out:
52606 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
52607 if (error)
52608 goto dput_and_out;
52609
52610 + gr_log_chdir(path.dentry, path.mnt);
52611 +
52612 set_fs_pwd(current->fs, &path);
52613
52614 dput_and_out:
52615 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
52616 goto out_putf;
52617
52618 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
52619 +
52620 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
52621 + error = -EPERM;
52622 +
52623 + if (!error)
52624 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
52625 +
52626 if (!error)
52627 set_fs_pwd(current->fs, &file->f_path);
52628 out_putf:
52629 @@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
52630 if (!capable(CAP_SYS_CHROOT))
52631 goto dput_and_out;
52632
52633 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
52634 + goto dput_and_out;
52635 +
52636 set_fs_root(current->fs, &path);
52637 +
52638 + gr_handle_chroot_chdir(&path);
52639 +
52640 error = 0;
52641 dput_and_out:
52642 path_put(&path);
52643 @@ -596,66 +618,57 @@ out:
52644 return error;
52645 }
52646
52647 +static int chmod_common(struct path *path, umode_t mode)
52648 +{
52649 + struct inode *inode = path->dentry->d_inode;
52650 + struct iattr newattrs;
52651 + int error;
52652 +
52653 + error = mnt_want_write(path->mnt);
52654 + if (error)
52655 + return error;
52656 + mutex_lock(&inode->i_mutex);
52657 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
52658 + error = -EACCES;
52659 + goto out_unlock;
52660 + }
52661 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
52662 + error = -EPERM;
52663 + goto out_unlock;
52664 + }
52665 + newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52666 + newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52667 + error = notify_change(path->dentry, &newattrs);
52668 +out_unlock:
52669 + mutex_unlock(&inode->i_mutex);
52670 + mnt_drop_write(path->mnt);
52671 + return error;
52672 +}
52673 +
52674 SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
52675 {
52676 - struct inode * inode;
52677 - struct dentry * dentry;
52678 struct file * file;
52679 int err = -EBADF;
52680 - struct iattr newattrs;
52681
52682 file = fget(fd);
52683 - if (!file)
52684 - goto out;
52685 -
52686 - dentry = file->f_path.dentry;
52687 - inode = dentry->d_inode;
52688 -
52689 - audit_inode(NULL, dentry);
52690 -
52691 - err = mnt_want_write_file(file);
52692 - if (err)
52693 - goto out_putf;
52694 - mutex_lock(&inode->i_mutex);
52695 - if (mode == (mode_t) -1)
52696 - mode = inode->i_mode;
52697 - newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52698 - newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52699 - err = notify_change(dentry, &newattrs);
52700 - mutex_unlock(&inode->i_mutex);
52701 - mnt_drop_write(file->f_path.mnt);
52702 -out_putf:
52703 - fput(file);
52704 -out:
52705 + if (file) {
52706 + audit_inode(NULL, file->f_path.dentry);
52707 + err = chmod_common(&file->f_path, mode);
52708 + fput(file);
52709 + }
52710 return err;
52711 }
52712
52713 SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
52714 {
52715 struct path path;
52716 - struct inode *inode;
52717 int error;
52718 - struct iattr newattrs;
52719
52720 error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
52721 - if (error)
52722 - goto out;
52723 - inode = path.dentry->d_inode;
52724 -
52725 - error = mnt_want_write(path.mnt);
52726 - if (error)
52727 - goto dput_and_out;
52728 - mutex_lock(&inode->i_mutex);
52729 - if (mode == (mode_t) -1)
52730 - mode = inode->i_mode;
52731 - newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52732 - newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52733 - error = notify_change(path.dentry, &newattrs);
52734 - mutex_unlock(&inode->i_mutex);
52735 - mnt_drop_write(path.mnt);
52736 -dput_and_out:
52737 - path_put(&path);
52738 -out:
52739 + if (!error) {
52740 + error = chmod_common(&path, mode);
52741 + path_put(&path);
52742 + }
52743 return error;
52744 }
52745
52746 @@ -664,12 +677,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
52747 return sys_fchmodat(AT_FDCWD, filename, mode);
52748 }
52749
52750 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
52751 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
52752 {
52753 struct inode *inode = dentry->d_inode;
52754 int error;
52755 struct iattr newattrs;
52756
52757 + if (!gr_acl_handle_chown(dentry, mnt))
52758 + return -EACCES;
52759 +
52760 newattrs.ia_valid = ATTR_CTIME;
52761 if (user != (uid_t) -1) {
52762 newattrs.ia_valid |= ATTR_UID;
52763 @@ -700,7 +716,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
52764 error = mnt_want_write(path.mnt);
52765 if (error)
52766 goto out_release;
52767 - error = chown_common(path.dentry, user, group);
52768 + error = chown_common(path.dentry, user, group, path.mnt);
52769 mnt_drop_write(path.mnt);
52770 out_release:
52771 path_put(&path);
52772 @@ -725,7 +741,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
52773 error = mnt_want_write(path.mnt);
52774 if (error)
52775 goto out_release;
52776 - error = chown_common(path.dentry, user, group);
52777 + error = chown_common(path.dentry, user, group, path.mnt);
52778 mnt_drop_write(path.mnt);
52779 out_release:
52780 path_put(&path);
52781 @@ -744,7 +760,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
52782 error = mnt_want_write(path.mnt);
52783 if (error)
52784 goto out_release;
52785 - error = chown_common(path.dentry, user, group);
52786 + error = chown_common(path.dentry, user, group, path.mnt);
52787 mnt_drop_write(path.mnt);
52788 out_release:
52789 path_put(&path);
52790 @@ -767,7 +783,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
52791 goto out_fput;
52792 dentry = file->f_path.dentry;
52793 audit_inode(NULL, dentry);
52794 - error = chown_common(dentry, user, group);
52795 + error = chown_common(dentry, user, group, file->f_path.mnt);
52796 mnt_drop_write(file->f_path.mnt);
52797 out_fput:
52798 fput(file);
52799 @@ -1036,7 +1052,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
52800 if (!IS_ERR(tmp)) {
52801 fd = get_unused_fd_flags(flags);
52802 if (fd >= 0) {
52803 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52804 + struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52805 if (IS_ERR(f)) {
52806 put_unused_fd(fd);
52807 fd = PTR_ERR(f);
52808 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
52809 index 6ab70f4..f4103d1 100644
52810 --- a/fs/partitions/efi.c
52811 +++ b/fs/partitions/efi.c
52812 @@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
52813 if (!bdev || !gpt)
52814 return NULL;
52815
52816 + if (!le32_to_cpu(gpt->num_partition_entries))
52817 + return NULL;
52818 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
52819 + if (!pte)
52820 + return NULL;
52821 +
52822 count = le32_to_cpu(gpt->num_partition_entries) *
52823 le32_to_cpu(gpt->sizeof_partition_entry);
52824 - if (!count)
52825 - return NULL;
52826 - pte = kzalloc(count, GFP_KERNEL);
52827 - if (!pte)
52828 - return NULL;
52829 -
52830 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
52831 (u8 *) pte,
52832 count) < count) {
52833 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
52834 index dd6efdb..3babc6c 100644
52835 --- a/fs/partitions/ldm.c
52836 +++ b/fs/partitions/ldm.c
52837 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52838 ldm_error ("A VBLK claims to have %d parts.", num);
52839 return false;
52840 }
52841 +
52842 if (rec >= num) {
52843 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
52844 return false;
52845 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52846 goto found;
52847 }
52848
52849 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
52850 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
52851 if (!f) {
52852 ldm_crit ("Out of memory.");
52853 return false;
52854 diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
52855 index 5765198..7f8e9e0 100644
52856 --- a/fs/partitions/mac.c
52857 +++ b/fs/partitions/mac.c
52858 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
52859 return 0; /* not a MacOS disk */
52860 }
52861 blocks_in_map = be32_to_cpu(part->map_count);
52862 - if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52863 - put_dev_sector(sect);
52864 - return 0;
52865 - }
52866 printk(" [mac]");
52867 + if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52868 + put_dev_sector(sect);
52869 + return 0;
52870 + }
52871 for (slot = 1; slot <= blocks_in_map; ++slot) {
52872 int pos = slot * secsize;
52873 put_dev_sector(sect);
52874 diff --git a/fs/pipe.c b/fs/pipe.c
52875 index d0cc080..8a6f211 100644
52876 --- a/fs/pipe.c
52877 +++ b/fs/pipe.c
52878 @@ -401,9 +401,9 @@ redo:
52879 }
52880 if (bufs) /* More to do? */
52881 continue;
52882 - if (!pipe->writers)
52883 + if (!atomic_read(&pipe->writers))
52884 break;
52885 - if (!pipe->waiting_writers) {
52886 + if (!atomic_read(&pipe->waiting_writers)) {
52887 /* syscall merging: Usually we must not sleep
52888 * if O_NONBLOCK is set, or if we got some data.
52889 * But if a writer sleeps in kernel space, then
52890 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
52891 mutex_lock(&inode->i_mutex);
52892 pipe = inode->i_pipe;
52893
52894 - if (!pipe->readers) {
52895 + if (!atomic_read(&pipe->readers)) {
52896 send_sig(SIGPIPE, current, 0);
52897 ret = -EPIPE;
52898 goto out;
52899 @@ -511,7 +511,7 @@ redo1:
52900 for (;;) {
52901 int bufs;
52902
52903 - if (!pipe->readers) {
52904 + if (!atomic_read(&pipe->readers)) {
52905 send_sig(SIGPIPE, current, 0);
52906 if (!ret)
52907 ret = -EPIPE;
52908 @@ -597,9 +597,9 @@ redo2:
52909 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
52910 do_wakeup = 0;
52911 }
52912 - pipe->waiting_writers++;
52913 + atomic_inc(&pipe->waiting_writers);
52914 pipe_wait(pipe);
52915 - pipe->waiting_writers--;
52916 + atomic_dec(&pipe->waiting_writers);
52917 }
52918 out:
52919 mutex_unlock(&inode->i_mutex);
52920 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52921 mask = 0;
52922 if (filp->f_mode & FMODE_READ) {
52923 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
52924 - if (!pipe->writers && filp->f_version != pipe->w_counter)
52925 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
52926 mask |= POLLHUP;
52927 }
52928
52929 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52930 * Most Unices do not set POLLERR for FIFOs but on Linux they
52931 * behave exactly like pipes for poll().
52932 */
52933 - if (!pipe->readers)
52934 + if (!atomic_read(&pipe->readers))
52935 mask |= POLLERR;
52936 }
52937
52938 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
52939
52940 mutex_lock(&inode->i_mutex);
52941 pipe = inode->i_pipe;
52942 - pipe->readers -= decr;
52943 - pipe->writers -= decw;
52944 + atomic_sub(decr, &pipe->readers);
52945 + atomic_sub(decw, &pipe->writers);
52946
52947 - if (!pipe->readers && !pipe->writers) {
52948 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
52949 free_pipe_info(inode);
52950 } else {
52951 wake_up_interruptible_sync(&pipe->wait);
52952 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
52953
52954 if (inode->i_pipe) {
52955 ret = 0;
52956 - inode->i_pipe->readers++;
52957 + atomic_inc(&inode->i_pipe->readers);
52958 }
52959
52960 mutex_unlock(&inode->i_mutex);
52961 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
52962
52963 if (inode->i_pipe) {
52964 ret = 0;
52965 - inode->i_pipe->writers++;
52966 + atomic_inc(&inode->i_pipe->writers);
52967 }
52968
52969 mutex_unlock(&inode->i_mutex);
52970 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
52971 if (inode->i_pipe) {
52972 ret = 0;
52973 if (filp->f_mode & FMODE_READ)
52974 - inode->i_pipe->readers++;
52975 + atomic_inc(&inode->i_pipe->readers);
52976 if (filp->f_mode & FMODE_WRITE)
52977 - inode->i_pipe->writers++;
52978 + atomic_inc(&inode->i_pipe->writers);
52979 }
52980
52981 mutex_unlock(&inode->i_mutex);
52982 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
52983 inode->i_pipe = NULL;
52984 }
52985
52986 -static struct vfsmount *pipe_mnt __read_mostly;
52987 +struct vfsmount *pipe_mnt __read_mostly;
52988 static int pipefs_delete_dentry(struct dentry *dentry)
52989 {
52990 /*
52991 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
52992 goto fail_iput;
52993 inode->i_pipe = pipe;
52994
52995 - pipe->readers = pipe->writers = 1;
52996 + atomic_set(&pipe->readers, 1);
52997 + atomic_set(&pipe->writers, 1);
52998 inode->i_fop = &rdwr_pipefifo_fops;
52999
53000 /*
53001 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
53002 index 50f8f06..c5755df 100644
53003 --- a/fs/proc/Kconfig
53004 +++ b/fs/proc/Kconfig
53005 @@ -30,12 +30,12 @@ config PROC_FS
53006
53007 config PROC_KCORE
53008 bool "/proc/kcore support" if !ARM
53009 - depends on PROC_FS && MMU
53010 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
53011
53012 config PROC_VMCORE
53013 bool "/proc/vmcore support (EXPERIMENTAL)"
53014 - depends on PROC_FS && CRASH_DUMP
53015 - default y
53016 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
53017 + default n
53018 help
53019 Exports the dump image of crashed kernel in ELF format.
53020
53021 @@ -59,8 +59,8 @@ config PROC_SYSCTL
53022 limited in memory.
53023
53024 config PROC_PAGE_MONITOR
53025 - default y
53026 - depends on PROC_FS && MMU
53027 + default n
53028 + depends on PROC_FS && MMU && !GRKERNSEC
53029 bool "Enable /proc page monitoring" if EMBEDDED
53030 help
53031 Various /proc files exist to monitor process memory utilization:
53032 diff --git a/fs/proc/array.c b/fs/proc/array.c
53033 index c5ef152..24a1b87 100644
53034 --- a/fs/proc/array.c
53035 +++ b/fs/proc/array.c
53036 @@ -60,6 +60,7 @@
53037 #include <linux/tty.h>
53038 #include <linux/string.h>
53039 #include <linux/mman.h>
53040 +#include <linux/grsecurity.h>
53041 #include <linux/proc_fs.h>
53042 #include <linux/ioport.h>
53043 #include <linux/uaccess.h>
53044 @@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
53045 p->nivcsw);
53046 }
53047
53048 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53049 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
53050 +{
53051 + if (p->mm)
53052 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
53053 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
53054 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
53055 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
53056 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
53057 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
53058 + else
53059 + seq_printf(m, "PaX:\t-----\n");
53060 +}
53061 +#endif
53062 +
53063 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53064 struct pid *pid, struct task_struct *task)
53065 {
53066 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53067 task_cap(m, task);
53068 cpuset_task_status_allowed(m, task);
53069 task_context_switch_counts(m, task);
53070 +
53071 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53072 + task_pax(m, task);
53073 +#endif
53074 +
53075 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
53076 + task_grsec_rbac(m, task);
53077 +#endif
53078 +
53079 return 0;
53080 }
53081
53082 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53083 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53084 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
53085 + _mm->pax_flags & MF_PAX_SEGMEXEC))
53086 +#endif
53087 +
53088 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53089 struct pid *pid, struct task_struct *task, int whole)
53090 {
53091 @@ -358,9 +389,18 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53092 cputime_t cutime, cstime, utime, stime;
53093 cputime_t cgtime, gtime;
53094 unsigned long rsslim = 0;
53095 - char tcomm[sizeof(task->comm)];
53096 + char tcomm[sizeof(task->comm)] = { 0 };
53097 unsigned long flags;
53098
53099 + pax_track_stack();
53100 +
53101 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53102 + if (current->exec_id != m->exec_id) {
53103 + gr_log_badprocpid("stat");
53104 + return 0;
53105 + }
53106 +#endif
53107 +
53108 state = *get_task_state(task);
53109 vsize = eip = esp = 0;
53110 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
53111 @@ -433,6 +473,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53112 gtime = task_gtime(task);
53113 }
53114
53115 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53116 + if (PAX_RAND_FLAGS(mm)) {
53117 + eip = 0;
53118 + esp = 0;
53119 + wchan = 0;
53120 + }
53121 +#endif
53122 +#ifdef CONFIG_GRKERNSEC_HIDESYM
53123 + wchan = 0;
53124 + eip =0;
53125 + esp =0;
53126 +#endif
53127 +
53128 /* scale priority and nice values from timeslices to -20..20 */
53129 /* to make it look like a "normal" Unix priority/nice value */
53130 priority = task_prio(task);
53131 @@ -473,9 +526,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53132 vsize,
53133 mm ? get_mm_rss(mm) : 0,
53134 rsslim,
53135 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53136 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
53137 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
53138 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
53139 +#else
53140 mm ? (permitted ? mm->start_code : 1) : 0,
53141 mm ? (permitted ? mm->end_code : 1) : 0,
53142 (permitted && mm) ? mm->start_stack : 0,
53143 +#endif
53144 esp,
53145 eip,
53146 /* The signal information here is obsolete.
53147 @@ -519,6 +578,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53148 int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
53149 struct mm_struct *mm = get_task_mm(task);
53150
53151 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53152 + if (current->exec_id != m->exec_id) {
53153 + gr_log_badprocpid("statm");
53154 + return 0;
53155 + }
53156 +#endif
53157 +
53158 if (mm) {
53159 size = task_statm(mm, &shared, &text, &data, &resident);
53160 mmput(mm);
53161 @@ -528,3 +594,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53162
53163 return 0;
53164 }
53165 +
53166 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53167 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
53168 +{
53169 + u32 curr_ip = 0;
53170 + unsigned long flags;
53171 +
53172 + if (lock_task_sighand(task, &flags)) {
53173 + curr_ip = task->signal->curr_ip;
53174 + unlock_task_sighand(task, &flags);
53175 + }
53176 +
53177 + return sprintf(buffer, "%pI4\n", &curr_ip);
53178 +}
53179 +#endif
53180 diff --git a/fs/proc/base.c b/fs/proc/base.c
53181 index 67f7dc0..a86ad9a 100644
53182 --- a/fs/proc/base.c
53183 +++ b/fs/proc/base.c
53184 @@ -102,6 +102,22 @@ struct pid_entry {
53185 union proc_op op;
53186 };
53187
53188 +struct getdents_callback {
53189 + struct linux_dirent __user * current_dir;
53190 + struct linux_dirent __user * previous;
53191 + struct file * file;
53192 + int count;
53193 + int error;
53194 +};
53195 +
53196 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
53197 + loff_t offset, u64 ino, unsigned int d_type)
53198 +{
53199 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
53200 + buf->error = -EINVAL;
53201 + return 0;
53202 +}
53203 +
53204 #define NOD(NAME, MODE, IOP, FOP, OP) { \
53205 .name = (NAME), \
53206 .len = sizeof(NAME) - 1, \
53207 @@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
53208 if (task == current)
53209 return 0;
53210
53211 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
53212 + return -EPERM;
53213 +
53214 /*
53215 * If current is actively ptrace'ing, and would also be
53216 * permitted to freshly attach with ptrace now, permit it.
53217 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
53218 if (!mm->arg_end)
53219 goto out_mm; /* Shh! No looking before we're done */
53220
53221 + if (gr_acl_handle_procpidmem(task))
53222 + goto out_mm;
53223 +
53224 len = mm->arg_end - mm->arg_start;
53225
53226 if (len > PAGE_SIZE)
53227 @@ -287,12 +309,28 @@ out:
53228 return res;
53229 }
53230
53231 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53232 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53233 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
53234 + _mm->pax_flags & MF_PAX_SEGMEXEC))
53235 +#endif
53236 +
53237 static int proc_pid_auxv(struct task_struct *task, char *buffer)
53238 {
53239 int res = 0;
53240 struct mm_struct *mm = get_task_mm(task);
53241 if (mm) {
53242 unsigned int nwords = 0;
53243 +
53244 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53245 + /* allow if we're currently ptracing this task */
53246 + if (PAX_RAND_FLAGS(mm) &&
53247 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
53248 + mmput(mm);
53249 + return 0;
53250 + }
53251 +#endif
53252 +
53253 do {
53254 nwords += 2;
53255 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
53256 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
53257 }
53258
53259
53260 -#ifdef CONFIG_KALLSYMS
53261 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53262 /*
53263 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
53264 * Returns the resolved symbol. If that fails, simply return the address.
53265 @@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
53266 mutex_unlock(&task->cred_guard_mutex);
53267 }
53268
53269 -#ifdef CONFIG_STACKTRACE
53270 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53271
53272 #define MAX_STACK_TRACE_DEPTH 64
53273
53274 @@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
53275 return count;
53276 }
53277
53278 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53279 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53280 static int proc_pid_syscall(struct task_struct *task, char *buffer)
53281 {
53282 long nr;
53283 @@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
53284 /************************************************************************/
53285
53286 /* permission checks */
53287 -static int proc_fd_access_allowed(struct inode *inode)
53288 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
53289 {
53290 struct task_struct *task;
53291 int allowed = 0;
53292 @@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
53293 */
53294 task = get_proc_task(inode);
53295 if (task) {
53296 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
53297 + if (log)
53298 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
53299 + else
53300 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
53301 put_task_struct(task);
53302 }
53303 return allowed;
53304 @@ -806,9 +847,16 @@ static const struct file_operations proc_single_file_operations = {
53305 static int mem_open(struct inode* inode, struct file* file)
53306 {
53307 file->private_data = (void*)((long)current->self_exec_id);
53308 +
53309 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53310 + file->f_version = current->exec_id;
53311 +#endif
53312 +
53313 return 0;
53314 }
53315
53316 +static int task_dumpable(struct task_struct *task);
53317 +
53318 static ssize_t mem_read(struct file * file, char __user * buf,
53319 size_t count, loff_t *ppos)
53320 {
53321 @@ -818,6 +866,13 @@ static ssize_t mem_read(struct file * file, char __user * buf,
53322 int ret = -ESRCH;
53323 struct mm_struct *mm;
53324
53325 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53326 + if (file->f_version != current->exec_id) {
53327 + gr_log_badprocpid("mem");
53328 + return 0;
53329 + }
53330 +#endif
53331 +
53332 if (!task)
53333 goto out_no_task;
53334
53335 @@ -963,6 +1018,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
53336 if (!task)
53337 goto out_no_task;
53338
53339 + if (gr_acl_handle_procpidmem(task))
53340 + goto out;
53341 +
53342 if (!ptrace_may_access(task, PTRACE_MODE_READ))
53343 goto out;
53344
53345 @@ -1377,7 +1435,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
53346 path_put(&nd->path);
53347
53348 /* Are we allowed to snoop on the tasks file descriptors? */
53349 - if (!proc_fd_access_allowed(inode))
53350 + if (!proc_fd_access_allowed(inode,0))
53351 goto out;
53352
53353 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
53354 @@ -1417,8 +1475,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
53355 struct path path;
53356
53357 /* Are we allowed to snoop on the tasks file descriptors? */
53358 - if (!proc_fd_access_allowed(inode))
53359 - goto out;
53360 + /* logging this is needed for learning on chromium to work properly,
53361 + but we don't want to flood the logs from 'ps' which does a readlink
53362 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
53363 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
53364 + */
53365 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
53366 + if (!proc_fd_access_allowed(inode,0))
53367 + goto out;
53368 + } else {
53369 + if (!proc_fd_access_allowed(inode,1))
53370 + goto out;
53371 + }
53372
53373 error = PROC_I(inode)->op.proc_get_link(inode, &path);
53374 if (error)
53375 @@ -1483,7 +1551,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
53376 rcu_read_lock();
53377 cred = __task_cred(task);
53378 inode->i_uid = cred->euid;
53379 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53380 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53381 +#else
53382 inode->i_gid = cred->egid;
53383 +#endif
53384 rcu_read_unlock();
53385 }
53386 security_task_to_inode(task, inode);
53387 @@ -1501,6 +1573,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
53388 struct inode *inode = dentry->d_inode;
53389 struct task_struct *task;
53390 const struct cred *cred;
53391 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53392 + const struct cred *tmpcred = current_cred();
53393 +#endif
53394
53395 generic_fillattr(inode, stat);
53396
53397 @@ -1508,13 +1583,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
53398 stat->uid = 0;
53399 stat->gid = 0;
53400 task = pid_task(proc_pid(inode), PIDTYPE_PID);
53401 +
53402 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
53403 + rcu_read_unlock();
53404 + return -ENOENT;
53405 + }
53406 +
53407 if (task) {
53408 + cred = __task_cred(task);
53409 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53410 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
53411 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53412 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53413 +#endif
53414 + ) {
53415 +#endif
53416 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53417 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53418 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53419 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53420 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53421 +#endif
53422 task_dumpable(task)) {
53423 - cred = __task_cred(task);
53424 stat->uid = cred->euid;
53425 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53426 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
53427 +#else
53428 stat->gid = cred->egid;
53429 +#endif
53430 }
53431 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53432 + } else {
53433 + rcu_read_unlock();
53434 + return -ENOENT;
53435 + }
53436 +#endif
53437 }
53438 rcu_read_unlock();
53439 return 0;
53440 @@ -1545,11 +1648,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
53441
53442 if (task) {
53443 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53444 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53445 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53446 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53447 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53448 +#endif
53449 task_dumpable(task)) {
53450 rcu_read_lock();
53451 cred = __task_cred(task);
53452 inode->i_uid = cred->euid;
53453 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53454 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53455 +#else
53456 inode->i_gid = cred->egid;
53457 +#endif
53458 rcu_read_unlock();
53459 } else {
53460 inode->i_uid = 0;
53461 @@ -1670,7 +1782,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
53462 int fd = proc_fd(inode);
53463
53464 if (task) {
53465 - files = get_files_struct(task);
53466 + if (!gr_acl_handle_procpidmem(task))
53467 + files = get_files_struct(task);
53468 put_task_struct(task);
53469 }
53470 if (files) {
53471 @@ -1922,12 +2035,22 @@ static const struct file_operations proc_fd_operations = {
53472 static int proc_fd_permission(struct inode *inode, int mask)
53473 {
53474 int rv;
53475 + struct task_struct *task;
53476
53477 rv = generic_permission(inode, mask, NULL);
53478 - if (rv == 0)
53479 - return 0;
53480 +
53481 if (task_pid(current) == proc_pid(inode))
53482 rv = 0;
53483 +
53484 + task = get_proc_task(inode);
53485 + if (task == NULL)
53486 + return rv;
53487 +
53488 + if (gr_acl_handle_procpidmem(task))
53489 + rv = -EACCES;
53490 +
53491 + put_task_struct(task);
53492 +
53493 return rv;
53494 }
53495
53496 @@ -2036,6 +2159,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
53497 if (!task)
53498 goto out_no_task;
53499
53500 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53501 + goto out;
53502 +
53503 /*
53504 * Yes, it does not scale. And it should not. Don't add
53505 * new entries into /proc/<tgid>/ without very good reasons.
53506 @@ -2080,6 +2206,9 @@ static int proc_pident_readdir(struct file *filp,
53507 if (!task)
53508 goto out_no_task;
53509
53510 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53511 + goto out;
53512 +
53513 ret = 0;
53514 i = filp->f_pos;
53515 switch (i) {
53516 @@ -2347,7 +2476,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
53517 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
53518 void *cookie)
53519 {
53520 - char *s = nd_get_link(nd);
53521 + const char *s = nd_get_link(nd);
53522 if (!IS_ERR(s))
53523 __putname(s);
53524 }
53525 @@ -2553,7 +2682,7 @@ static const struct pid_entry tgid_base_stuff[] = {
53526 #ifdef CONFIG_SCHED_DEBUG
53527 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53528 #endif
53529 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53530 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53531 INF("syscall", S_IRUGO, proc_pid_syscall),
53532 #endif
53533 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53534 @@ -2578,10 +2707,10 @@ static const struct pid_entry tgid_base_stuff[] = {
53535 #ifdef CONFIG_SECURITY
53536 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53537 #endif
53538 -#ifdef CONFIG_KALLSYMS
53539 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53540 INF("wchan", S_IRUGO, proc_pid_wchan),
53541 #endif
53542 -#ifdef CONFIG_STACKTRACE
53543 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53544 ONE("stack", S_IRUGO, proc_pid_stack),
53545 #endif
53546 #ifdef CONFIG_SCHEDSTATS
53547 @@ -2611,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
53548 #ifdef CONFIG_TASK_IO_ACCOUNTING
53549 INF("io", S_IRUSR, proc_tgid_io_accounting),
53550 #endif
53551 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53552 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
53553 +#endif
53554 };
53555
53556 static int proc_tgid_base_readdir(struct file * filp,
53557 @@ -2735,7 +2867,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
53558 if (!inode)
53559 goto out;
53560
53561 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53562 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
53563 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53564 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53565 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
53566 +#else
53567 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
53568 +#endif
53569 inode->i_op = &proc_tgid_base_inode_operations;
53570 inode->i_fop = &proc_tgid_base_operations;
53571 inode->i_flags|=S_IMMUTABLE;
53572 @@ -2777,7 +2916,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
53573 if (!task)
53574 goto out;
53575
53576 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53577 + goto out_put_task;
53578 +
53579 result = proc_pid_instantiate(dir, dentry, task, NULL);
53580 +out_put_task:
53581 put_task_struct(task);
53582 out:
53583 return result;
53584 @@ -2842,6 +2985,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53585 {
53586 unsigned int nr;
53587 struct task_struct *reaper;
53588 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53589 + const struct cred *tmpcred = current_cred();
53590 + const struct cred *itercred;
53591 +#endif
53592 + filldir_t __filldir = filldir;
53593 struct tgid_iter iter;
53594 struct pid_namespace *ns;
53595
53596 @@ -2865,8 +3013,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53597 for (iter = next_tgid(ns, iter);
53598 iter.task;
53599 iter.tgid += 1, iter = next_tgid(ns, iter)) {
53600 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53601 + rcu_read_lock();
53602 + itercred = __task_cred(iter.task);
53603 +#endif
53604 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
53605 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53606 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
53607 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53608 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53609 +#endif
53610 + )
53611 +#endif
53612 + )
53613 + __filldir = &gr_fake_filldir;
53614 + else
53615 + __filldir = filldir;
53616 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53617 + rcu_read_unlock();
53618 +#endif
53619 filp->f_pos = iter.tgid + TGID_OFFSET;
53620 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
53621 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
53622 put_task_struct(iter.task);
53623 goto out;
53624 }
53625 @@ -2892,7 +3059,7 @@ static const struct pid_entry tid_base_stuff[] = {
53626 #ifdef CONFIG_SCHED_DEBUG
53627 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53628 #endif
53629 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53630 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53631 INF("syscall", S_IRUGO, proc_pid_syscall),
53632 #endif
53633 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53634 @@ -2916,10 +3083,10 @@ static const struct pid_entry tid_base_stuff[] = {
53635 #ifdef CONFIG_SECURITY
53636 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53637 #endif
53638 -#ifdef CONFIG_KALLSYMS
53639 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53640 INF("wchan", S_IRUGO, proc_pid_wchan),
53641 #endif
53642 -#ifdef CONFIG_STACKTRACE
53643 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53644 ONE("stack", S_IRUGO, proc_pid_stack),
53645 #endif
53646 #ifdef CONFIG_SCHEDSTATS
53647 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
53648 index 82676e3..5f8518a 100644
53649 --- a/fs/proc/cmdline.c
53650 +++ b/fs/proc/cmdline.c
53651 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
53652
53653 static int __init proc_cmdline_init(void)
53654 {
53655 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
53656 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
53657 +#else
53658 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
53659 +#endif
53660 return 0;
53661 }
53662 module_init(proc_cmdline_init);
53663 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
53664 index 59ee7da..469b4b6 100644
53665 --- a/fs/proc/devices.c
53666 +++ b/fs/proc/devices.c
53667 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
53668
53669 static int __init proc_devices_init(void)
53670 {
53671 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
53672 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
53673 +#else
53674 proc_create("devices", 0, NULL, &proc_devinfo_operations);
53675 +#endif
53676 return 0;
53677 }
53678 module_init(proc_devices_init);
53679 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
53680 index d78ade3..81767f9 100644
53681 --- a/fs/proc/inode.c
53682 +++ b/fs/proc/inode.c
53683 @@ -18,12 +18,19 @@
53684 #include <linux/module.h>
53685 #include <linux/smp_lock.h>
53686 #include <linux/sysctl.h>
53687 +#include <linux/grsecurity.h>
53688
53689 #include <asm/system.h>
53690 #include <asm/uaccess.h>
53691
53692 #include "internal.h"
53693
53694 +#ifdef CONFIG_PROC_SYSCTL
53695 +extern const struct inode_operations proc_sys_inode_operations;
53696 +extern const struct inode_operations proc_sys_dir_operations;
53697 +#endif
53698 +
53699 +
53700 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
53701 {
53702 atomic_inc(&de->count);
53703 @@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
53704 de_put(de);
53705 if (PROC_I(inode)->sysctl)
53706 sysctl_head_put(PROC_I(inode)->sysctl);
53707 +
53708 +#ifdef CONFIG_PROC_SYSCTL
53709 + if (inode->i_op == &proc_sys_inode_operations ||
53710 + inode->i_op == &proc_sys_dir_operations)
53711 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
53712 +#endif
53713 +
53714 clear_inode(inode);
53715 }
53716
53717 @@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
53718 if (de->mode) {
53719 inode->i_mode = de->mode;
53720 inode->i_uid = de->uid;
53721 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53722 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53723 +#else
53724 inode->i_gid = de->gid;
53725 +#endif
53726 }
53727 if (de->size)
53728 inode->i_size = de->size;
53729 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
53730 index 753ca37..26bcf3b 100644
53731 --- a/fs/proc/internal.h
53732 +++ b/fs/proc/internal.h
53733 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53734 struct pid *pid, struct task_struct *task);
53735 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53736 struct pid *pid, struct task_struct *task);
53737 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53738 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
53739 +#endif
53740 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
53741
53742 extern const struct file_operations proc_maps_operations;
53743 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
53744 index b442dac..aab29cb 100644
53745 --- a/fs/proc/kcore.c
53746 +++ b/fs/proc/kcore.c
53747 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
53748 off_t offset = 0;
53749 struct kcore_list *m;
53750
53751 + pax_track_stack();
53752 +
53753 /* setup ELF header */
53754 elf = (struct elfhdr *) bufp;
53755 bufp += sizeof(struct elfhdr);
53756 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53757 * the addresses in the elf_phdr on our list.
53758 */
53759 start = kc_offset_to_vaddr(*fpos - elf_buflen);
53760 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
53761 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
53762 + if (tsz > buflen)
53763 tsz = buflen;
53764 -
53765 +
53766 while (buflen) {
53767 struct kcore_list *m;
53768
53769 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53770 kfree(elf_buf);
53771 } else {
53772 if (kern_addr_valid(start)) {
53773 - unsigned long n;
53774 + char *elf_buf;
53775 + mm_segment_t oldfs;
53776
53777 - n = copy_to_user(buffer, (char *)start, tsz);
53778 - /*
53779 - * We cannot distingush between fault on source
53780 - * and fault on destination. When this happens
53781 - * we clear too and hope it will trigger the
53782 - * EFAULT again.
53783 - */
53784 - if (n) {
53785 - if (clear_user(buffer + tsz - n,
53786 - n))
53787 + elf_buf = kmalloc(tsz, GFP_KERNEL);
53788 + if (!elf_buf)
53789 + return -ENOMEM;
53790 + oldfs = get_fs();
53791 + set_fs(KERNEL_DS);
53792 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
53793 + set_fs(oldfs);
53794 + if (copy_to_user(buffer, elf_buf, tsz)) {
53795 + kfree(elf_buf);
53796 return -EFAULT;
53797 + }
53798 }
53799 + set_fs(oldfs);
53800 + kfree(elf_buf);
53801 } else {
53802 if (clear_user(buffer, tsz))
53803 return -EFAULT;
53804 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53805
53806 static int open_kcore(struct inode *inode, struct file *filp)
53807 {
53808 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
53809 + return -EPERM;
53810 +#endif
53811 if (!capable(CAP_SYS_RAWIO))
53812 return -EPERM;
53813 if (kcore_need_update)
53814 diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
53815 index 7ca7834..cfe90a4 100644
53816 --- a/fs/proc/kmsg.c
53817 +++ b/fs/proc/kmsg.c
53818 @@ -12,37 +12,37 @@
53819 #include <linux/poll.h>
53820 #include <linux/proc_fs.h>
53821 #include <linux/fs.h>
53822 +#include <linux/syslog.h>
53823
53824 #include <asm/uaccess.h>
53825 #include <asm/io.h>
53826
53827 extern wait_queue_head_t log_wait;
53828
53829 -extern int do_syslog(int type, char __user *bug, int count);
53830 -
53831 static int kmsg_open(struct inode * inode, struct file * file)
53832 {
53833 - return do_syslog(1,NULL,0);
53834 + return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
53835 }
53836
53837 static int kmsg_release(struct inode * inode, struct file * file)
53838 {
53839 - (void) do_syslog(0,NULL,0);
53840 + (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
53841 return 0;
53842 }
53843
53844 static ssize_t kmsg_read(struct file *file, char __user *buf,
53845 size_t count, loff_t *ppos)
53846 {
53847 - if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
53848 + if ((file->f_flags & O_NONBLOCK) &&
53849 + !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
53850 return -EAGAIN;
53851 - return do_syslog(2, buf, count);
53852 + return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
53853 }
53854
53855 static unsigned int kmsg_poll(struct file *file, poll_table *wait)
53856 {
53857 poll_wait(file, &log_wait, wait);
53858 - if (do_syslog(9, NULL, 0))
53859 + if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
53860 return POLLIN | POLLRDNORM;
53861 return 0;
53862 }
53863 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
53864 index a65239c..ad1182a 100644
53865 --- a/fs/proc/meminfo.c
53866 +++ b/fs/proc/meminfo.c
53867 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
53868 unsigned long pages[NR_LRU_LISTS];
53869 int lru;
53870
53871 + pax_track_stack();
53872 +
53873 /*
53874 * display in kilobytes.
53875 */
53876 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
53877 vmi.used >> 10,
53878 vmi.largest_chunk >> 10
53879 #ifdef CONFIG_MEMORY_FAILURE
53880 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
53881 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
53882 #endif
53883 );
53884
53885 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
53886 index 9fe7d7e..cdb62c9 100644
53887 --- a/fs/proc/nommu.c
53888 +++ b/fs/proc/nommu.c
53889 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
53890 if (len < 1)
53891 len = 1;
53892 seq_printf(m, "%*c", len, ' ');
53893 - seq_path(m, &file->f_path, "");
53894 + seq_path(m, &file->f_path, "\n\\");
53895 }
53896
53897 seq_putc(m, '\n');
53898 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
53899 index 04d1270..25e1173 100644
53900 --- a/fs/proc/proc_net.c
53901 +++ b/fs/proc/proc_net.c
53902 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
53903 struct task_struct *task;
53904 struct nsproxy *ns;
53905 struct net *net = NULL;
53906 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53907 + const struct cred *cred = current_cred();
53908 +#endif
53909 +
53910 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53911 + if (cred->fsuid)
53912 + return net;
53913 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53914 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
53915 + return net;
53916 +#endif
53917
53918 rcu_read_lock();
53919 task = pid_task(proc_pid(dir), PIDTYPE_PID);
53920 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
53921 index f667e8a..55f4d96 100644
53922 --- a/fs/proc/proc_sysctl.c
53923 +++ b/fs/proc/proc_sysctl.c
53924 @@ -7,11 +7,13 @@
53925 #include <linux/security.h>
53926 #include "internal.h"
53927
53928 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
53929 +
53930 static const struct dentry_operations proc_sys_dentry_operations;
53931 static const struct file_operations proc_sys_file_operations;
53932 -static const struct inode_operations proc_sys_inode_operations;
53933 +const struct inode_operations proc_sys_inode_operations;
53934 static const struct file_operations proc_sys_dir_file_operations;
53935 -static const struct inode_operations proc_sys_dir_operations;
53936 +const struct inode_operations proc_sys_dir_operations;
53937
53938 static struct inode *proc_sys_make_inode(struct super_block *sb,
53939 struct ctl_table_header *head, struct ctl_table *table)
53940 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53941 if (!p)
53942 goto out;
53943
53944 + if (gr_handle_sysctl(p, MAY_EXEC))
53945 + goto out;
53946 +
53947 err = ERR_PTR(-ENOMEM);
53948 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
53949 if (h)
53950 @@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53951
53952 err = NULL;
53953 dentry->d_op = &proc_sys_dentry_operations;
53954 +
53955 + gr_handle_proc_create(dentry, inode);
53956 +
53957 d_add(dentry, inode);
53958
53959 out:
53960 @@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
53961 return -ENOMEM;
53962 } else {
53963 child->d_op = &proc_sys_dentry_operations;
53964 +
53965 + gr_handle_proc_create(child, inode);
53966 +
53967 d_add(child, inode);
53968 }
53969 } else {
53970 @@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
53971 if (*pos < file->f_pos)
53972 continue;
53973
53974 + if (gr_handle_sysctl(table, 0))
53975 + continue;
53976 +
53977 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
53978 if (res)
53979 return res;
53980 @@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
53981 if (IS_ERR(head))
53982 return PTR_ERR(head);
53983
53984 + if (table && gr_handle_sysctl(table, MAY_EXEC))
53985 + return -ENOENT;
53986 +
53987 generic_fillattr(inode, stat);
53988 if (table)
53989 stat->mode = (stat->mode & S_IFMT) | table->mode;
53990 @@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
53991 };
53992
53993 static const struct file_operations proc_sys_dir_file_operations = {
53994 + .read = generic_read_dir,
53995 .readdir = proc_sys_readdir,
53996 .llseek = generic_file_llseek,
53997 };
53998
53999 -static const struct inode_operations proc_sys_inode_operations = {
54000 +const struct inode_operations proc_sys_inode_operations = {
54001 .permission = proc_sys_permission,
54002 .setattr = proc_sys_setattr,
54003 .getattr = proc_sys_getattr,
54004 };
54005
54006 -static const struct inode_operations proc_sys_dir_operations = {
54007 +const struct inode_operations proc_sys_dir_operations = {
54008 .lookup = proc_sys_lookup,
54009 .permission = proc_sys_permission,
54010 .setattr = proc_sys_setattr,
54011 diff --git a/fs/proc/root.c b/fs/proc/root.c
54012 index b080b79..d957e63 100644
54013 --- a/fs/proc/root.c
54014 +++ b/fs/proc/root.c
54015 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
54016 #ifdef CONFIG_PROC_DEVICETREE
54017 proc_device_tree_init();
54018 #endif
54019 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
54020 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54021 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
54022 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54023 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
54024 +#endif
54025 +#else
54026 proc_mkdir("bus", NULL);
54027 +#endif
54028 proc_sys_init();
54029 }
54030
54031 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
54032 index 3b7b82a..4b420b0 100644
54033 --- a/fs/proc/task_mmu.c
54034 +++ b/fs/proc/task_mmu.c
54035 @@ -8,6 +8,7 @@
54036 #include <linux/mempolicy.h>
54037 #include <linux/swap.h>
54038 #include <linux/swapops.h>
54039 +#include <linux/grsecurity.h>
54040
54041 #include <asm/elf.h>
54042 #include <asm/uaccess.h>
54043 @@ -46,15 +47,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54044 "VmStk:\t%8lu kB\n"
54045 "VmExe:\t%8lu kB\n"
54046 "VmLib:\t%8lu kB\n"
54047 - "VmPTE:\t%8lu kB\n",
54048 - hiwater_vm << (PAGE_SHIFT-10),
54049 + "VmPTE:\t%8lu kB\n"
54050 +
54051 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54052 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
54053 +#endif
54054 +
54055 + ,hiwater_vm << (PAGE_SHIFT-10),
54056 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
54057 mm->locked_vm << (PAGE_SHIFT-10),
54058 hiwater_rss << (PAGE_SHIFT-10),
54059 total_rss << (PAGE_SHIFT-10),
54060 data << (PAGE_SHIFT-10),
54061 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
54062 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
54063 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
54064 +
54065 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54066 + , mm->context.user_cs_base, mm->context.user_cs_limit
54067 +#endif
54068 +
54069 + );
54070 }
54071
54072 unsigned long task_vsize(struct mm_struct *mm)
54073 @@ -175,7 +187,8 @@ static void m_stop(struct seq_file *m, void *v)
54074 struct proc_maps_private *priv = m->private;
54075 struct vm_area_struct *vma = v;
54076
54077 - vma_stop(priv, vma);
54078 + if (!IS_ERR(vma))
54079 + vma_stop(priv, vma);
54080 if (priv->task)
54081 put_task_struct(priv->task);
54082 }
54083 @@ -199,6 +212,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
54084 return ret;
54085 }
54086
54087 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54088 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
54089 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
54090 + _mm->pax_flags & MF_PAX_SEGMEXEC))
54091 +#endif
54092 +
54093 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54094 {
54095 struct mm_struct *mm = vma->vm_mm;
54096 @@ -206,7 +225,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54097 int flags = vma->vm_flags;
54098 unsigned long ino = 0;
54099 unsigned long long pgoff = 0;
54100 - unsigned long start;
54101 dev_t dev = 0;
54102 int len;
54103
54104 @@ -217,20 +235,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54105 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
54106 }
54107
54108 - /* We don't show the stack guard page in /proc/maps */
54109 - start = vma->vm_start;
54110 - if (vma->vm_flags & VM_GROWSDOWN)
54111 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
54112 - start += PAGE_SIZE;
54113 -
54114 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
54115 - start,
54116 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54117 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
54118 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
54119 +#else
54120 + vma->vm_start,
54121 vma->vm_end,
54122 +#endif
54123 flags & VM_READ ? 'r' : '-',
54124 flags & VM_WRITE ? 'w' : '-',
54125 flags & VM_EXEC ? 'x' : '-',
54126 flags & VM_MAYSHARE ? 's' : 'p',
54127 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54128 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
54129 +#else
54130 pgoff,
54131 +#endif
54132 MAJOR(dev), MINOR(dev), ino, &len);
54133
54134 /*
54135 @@ -239,7 +260,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54136 */
54137 if (file) {
54138 pad_len_spaces(m, len);
54139 - seq_path(m, &file->f_path, "\n");
54140 + seq_path(m, &file->f_path, "\n\\");
54141 } else {
54142 const char *name = arch_vma_name(vma);
54143 if (!name) {
54144 @@ -247,8 +268,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54145 if (vma->vm_start <= mm->brk &&
54146 vma->vm_end >= mm->start_brk) {
54147 name = "[heap]";
54148 - } else if (vma->vm_start <= mm->start_stack &&
54149 - vma->vm_end >= mm->start_stack) {
54150 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
54151 + (vma->vm_start <= mm->start_stack &&
54152 + vma->vm_end >= mm->start_stack)) {
54153 name = "[stack]";
54154 }
54155 } else {
54156 @@ -269,6 +291,13 @@ static int show_map(struct seq_file *m, void *v)
54157 struct proc_maps_private *priv = m->private;
54158 struct task_struct *task = priv->task;
54159
54160 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54161 + if (current->exec_id != m->exec_id) {
54162 + gr_log_badprocpid("maps");
54163 + return 0;
54164 + }
54165 +#endif
54166 +
54167 show_map_vma(m, vma);
54168
54169 if (m->count < m->size) /* vma is copied successfully */
54170 @@ -390,10 +419,23 @@ static int show_smap(struct seq_file *m, void *v)
54171 .private = &mss,
54172 };
54173
54174 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54175 + if (current->exec_id != m->exec_id) {
54176 + gr_log_badprocpid("smaps");
54177 + return 0;
54178 + }
54179 +#endif
54180 memset(&mss, 0, sizeof mss);
54181 - mss.vma = vma;
54182 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
54183 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
54184 +
54185 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54186 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
54187 +#endif
54188 + mss.vma = vma;
54189 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
54190 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
54191 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54192 + }
54193 +#endif
54194
54195 show_map_vma(m, vma);
54196
54197 @@ -409,7 +451,11 @@ static int show_smap(struct seq_file *m, void *v)
54198 "Swap: %8lu kB\n"
54199 "KernelPageSize: %8lu kB\n"
54200 "MMUPageSize: %8lu kB\n",
54201 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54202 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
54203 +#else
54204 (vma->vm_end - vma->vm_start) >> 10,
54205 +#endif
54206 mss.resident >> 10,
54207 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
54208 mss.shared_clean >> 10,
54209 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
54210 index 8f5c05d..c99c76d 100644
54211 --- a/fs/proc/task_nommu.c
54212 +++ b/fs/proc/task_nommu.c
54213 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54214 else
54215 bytes += kobjsize(mm);
54216
54217 - if (current->fs && current->fs->users > 1)
54218 + if (current->fs && atomic_read(&current->fs->users) > 1)
54219 sbytes += kobjsize(current->fs);
54220 else
54221 bytes += kobjsize(current->fs);
54222 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
54223 if (len < 1)
54224 len = 1;
54225 seq_printf(m, "%*c", len, ' ');
54226 - seq_path(m, &file->f_path, "");
54227 + seq_path(m, &file->f_path, "\n\\");
54228 }
54229
54230 seq_putc(m, '\n');
54231 diff --git a/fs/readdir.c b/fs/readdir.c
54232 index 7723401..30059a6 100644
54233 --- a/fs/readdir.c
54234 +++ b/fs/readdir.c
54235 @@ -16,6 +16,7 @@
54236 #include <linux/security.h>
54237 #include <linux/syscalls.h>
54238 #include <linux/unistd.h>
54239 +#include <linux/namei.h>
54240
54241 #include <asm/uaccess.h>
54242
54243 @@ -67,6 +68,7 @@ struct old_linux_dirent {
54244
54245 struct readdir_callback {
54246 struct old_linux_dirent __user * dirent;
54247 + struct file * file;
54248 int result;
54249 };
54250
54251 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
54252 buf->result = -EOVERFLOW;
54253 return -EOVERFLOW;
54254 }
54255 +
54256 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54257 + return 0;
54258 +
54259 buf->result++;
54260 dirent = buf->dirent;
54261 if (!access_ok(VERIFY_WRITE, dirent,
54262 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
54263
54264 buf.result = 0;
54265 buf.dirent = dirent;
54266 + buf.file = file;
54267
54268 error = vfs_readdir(file, fillonedir, &buf);
54269 if (buf.result)
54270 @@ -142,6 +149,7 @@ struct linux_dirent {
54271 struct getdents_callback {
54272 struct linux_dirent __user * current_dir;
54273 struct linux_dirent __user * previous;
54274 + struct file * file;
54275 int count;
54276 int error;
54277 };
54278 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
54279 buf->error = -EOVERFLOW;
54280 return -EOVERFLOW;
54281 }
54282 +
54283 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54284 + return 0;
54285 +
54286 dirent = buf->previous;
54287 if (dirent) {
54288 if (__put_user(offset, &dirent->d_off))
54289 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
54290 buf.previous = NULL;
54291 buf.count = count;
54292 buf.error = 0;
54293 + buf.file = file;
54294
54295 error = vfs_readdir(file, filldir, &buf);
54296 if (error >= 0)
54297 @@ -228,6 +241,7 @@ out:
54298 struct getdents_callback64 {
54299 struct linux_dirent64 __user * current_dir;
54300 struct linux_dirent64 __user * previous;
54301 + struct file *file;
54302 int count;
54303 int error;
54304 };
54305 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
54306 buf->error = -EINVAL; /* only used if we fail.. */
54307 if (reclen > buf->count)
54308 return -EINVAL;
54309 +
54310 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54311 + return 0;
54312 +
54313 dirent = buf->previous;
54314 if (dirent) {
54315 if (__put_user(offset, &dirent->d_off))
54316 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
54317
54318 buf.current_dir = dirent;
54319 buf.previous = NULL;
54320 + buf.file = file;
54321 buf.count = count;
54322 buf.error = 0;
54323
54324 @@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
54325 error = buf.error;
54326 lastdirent = buf.previous;
54327 if (lastdirent) {
54328 - typeof(lastdirent->d_off) d_off = file->f_pos;
54329 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
54330 if (__put_user(d_off, &lastdirent->d_off))
54331 error = -EFAULT;
54332 else
54333 diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
54334 index d42c30c..4fd8718 100644
54335 --- a/fs/reiserfs/dir.c
54336 +++ b/fs/reiserfs/dir.c
54337 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
54338 struct reiserfs_dir_entry de;
54339 int ret = 0;
54340
54341 + pax_track_stack();
54342 +
54343 reiserfs_write_lock(inode->i_sb);
54344
54345 reiserfs_check_lock_depth(inode->i_sb, "readdir");
54346 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
54347 index 128d3f7..8840d44 100644
54348 --- a/fs/reiserfs/do_balan.c
54349 +++ b/fs/reiserfs/do_balan.c
54350 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
54351 return;
54352 }
54353
54354 - atomic_inc(&(fs_generation(tb->tb_sb)));
54355 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
54356 do_balance_starts(tb);
54357
54358 /* balance leaf returns 0 except if combining L R and S into
54359 diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
54360 index 72cb1cc..d0e3181 100644
54361 --- a/fs/reiserfs/item_ops.c
54362 +++ b/fs/reiserfs/item_ops.c
54363 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
54364 vi->vi_index, vi->vi_type, vi->vi_ih);
54365 }
54366
54367 -static struct item_operations stat_data_ops = {
54368 +static const struct item_operations stat_data_ops = {
54369 .bytes_number = sd_bytes_number,
54370 .decrement_key = sd_decrement_key,
54371 .is_left_mergeable = sd_is_left_mergeable,
54372 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
54373 vi->vi_index, vi->vi_type, vi->vi_ih);
54374 }
54375
54376 -static struct item_operations direct_ops = {
54377 +static const struct item_operations direct_ops = {
54378 .bytes_number = direct_bytes_number,
54379 .decrement_key = direct_decrement_key,
54380 .is_left_mergeable = direct_is_left_mergeable,
54381 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
54382 vi->vi_index, vi->vi_type, vi->vi_ih);
54383 }
54384
54385 -static struct item_operations indirect_ops = {
54386 +static const struct item_operations indirect_ops = {
54387 .bytes_number = indirect_bytes_number,
54388 .decrement_key = indirect_decrement_key,
54389 .is_left_mergeable = indirect_is_left_mergeable,
54390 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
54391 printk("\n");
54392 }
54393
54394 -static struct item_operations direntry_ops = {
54395 +static const struct item_operations direntry_ops = {
54396 .bytes_number = direntry_bytes_number,
54397 .decrement_key = direntry_decrement_key,
54398 .is_left_mergeable = direntry_is_left_mergeable,
54399 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
54400 "Invalid item type observed, run fsck ASAP");
54401 }
54402
54403 -static struct item_operations errcatch_ops = {
54404 +static const struct item_operations errcatch_ops = {
54405 errcatch_bytes_number,
54406 errcatch_decrement_key,
54407 errcatch_is_left_mergeable,
54408 @@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
54409 #error Item types must use disk-format assigned values.
54410 #endif
54411
54412 -struct item_operations *item_ops[TYPE_ANY + 1] = {
54413 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
54414 &stat_data_ops,
54415 &indirect_ops,
54416 &direct_ops,
54417 diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
54418 index b5fe0aa..e0e25c4 100644
54419 --- a/fs/reiserfs/journal.c
54420 +++ b/fs/reiserfs/journal.c
54421 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
54422 struct buffer_head *bh;
54423 int i, j;
54424
54425 + pax_track_stack();
54426 +
54427 bh = __getblk(dev, block, bufsize);
54428 if (buffer_uptodate(bh))
54429 return (bh);
54430 diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
54431 index 2715791..b8996db 100644
54432 --- a/fs/reiserfs/namei.c
54433 +++ b/fs/reiserfs/namei.c
54434 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
54435 unsigned long savelink = 1;
54436 struct timespec ctime;
54437
54438 + pax_track_stack();
54439 +
54440 /* three balancings: (1) old name removal, (2) new name insertion
54441 and (3) maybe "save" link insertion
54442 stat data updates: (1) old directory,
54443 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
54444 index 9229e55..3d2e3b7 100644
54445 --- a/fs/reiserfs/procfs.c
54446 +++ b/fs/reiserfs/procfs.c
54447 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
54448 "SMALL_TAILS " : "NO_TAILS ",
54449 replay_only(sb) ? "REPLAY_ONLY " : "",
54450 convert_reiserfs(sb) ? "CONV " : "",
54451 - atomic_read(&r->s_generation_counter),
54452 + atomic_read_unchecked(&r->s_generation_counter),
54453 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
54454 SF(s_do_balance), SF(s_unneeded_left_neighbor),
54455 SF(s_good_search_by_key_reada), SF(s_bmaps),
54456 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
54457 struct journal_params *jp = &rs->s_v1.s_journal;
54458 char b[BDEVNAME_SIZE];
54459
54460 + pax_track_stack();
54461 +
54462 seq_printf(m, /* on-disk fields */
54463 "jp_journal_1st_block: \t%i\n"
54464 "jp_journal_dev: \t%s[%x]\n"
54465 diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
54466 index d036ee5..4c7dca1 100644
54467 --- a/fs/reiserfs/stree.c
54468 +++ b/fs/reiserfs/stree.c
54469 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
54470 int iter = 0;
54471 #endif
54472
54473 + pax_track_stack();
54474 +
54475 BUG_ON(!th->t_trans_id);
54476
54477 init_tb_struct(th, &s_del_balance, sb, path,
54478 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
54479 int retval;
54480 int quota_cut_bytes = 0;
54481
54482 + pax_track_stack();
54483 +
54484 BUG_ON(!th->t_trans_id);
54485
54486 le_key2cpu_key(&cpu_key, key);
54487 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
54488 int quota_cut_bytes;
54489 loff_t tail_pos = 0;
54490
54491 + pax_track_stack();
54492 +
54493 BUG_ON(!th->t_trans_id);
54494
54495 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
54496 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
54497 int retval;
54498 int fs_gen;
54499
54500 + pax_track_stack();
54501 +
54502 BUG_ON(!th->t_trans_id);
54503
54504 fs_gen = get_generation(inode->i_sb);
54505 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
54506 int fs_gen = 0;
54507 int quota_bytes = 0;
54508
54509 + pax_track_stack();
54510 +
54511 BUG_ON(!th->t_trans_id);
54512
54513 if (inode) { /* Do we count quotas for item? */
54514 diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
54515 index 7cb1285..c726cd0 100644
54516 --- a/fs/reiserfs/super.c
54517 +++ b/fs/reiserfs/super.c
54518 @@ -916,6 +916,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
54519 {.option_name = NULL}
54520 };
54521
54522 + pax_track_stack();
54523 +
54524 *blocks = 0;
54525 if (!options || !*options)
54526 /* use default configuration: create tails, journaling on, no
54527 diff --git a/fs/select.c b/fs/select.c
54528 index fd38ce2..f5381b8 100644
54529 --- a/fs/select.c
54530 +++ b/fs/select.c
54531 @@ -20,6 +20,7 @@
54532 #include <linux/module.h>
54533 #include <linux/slab.h>
54534 #include <linux/poll.h>
54535 +#include <linux/security.h>
54536 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
54537 #include <linux/file.h>
54538 #include <linux/fdtable.h>
54539 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
54540 int retval, i, timed_out = 0;
54541 unsigned long slack = 0;
54542
54543 + pax_track_stack();
54544 +
54545 rcu_read_lock();
54546 retval = max_select_fd(n, fds);
54547 rcu_read_unlock();
54548 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
54549 /* Allocate small arguments on the stack to save memory and be faster */
54550 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
54551
54552 + pax_track_stack();
54553 +
54554 ret = -EINVAL;
54555 if (n < 0)
54556 goto out_nofds;
54557 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
54558 struct poll_list *walk = head;
54559 unsigned long todo = nfds;
54560
54561 + pax_track_stack();
54562 +
54563 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
54564 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
54565 return -EINVAL;
54566
54567 diff --git a/fs/seq_file.c b/fs/seq_file.c
54568 index eae7d9d..4ddabe2 100644
54569 --- a/fs/seq_file.c
54570 +++ b/fs/seq_file.c
54571 @@ -9,6 +9,7 @@
54572 #include <linux/module.h>
54573 #include <linux/seq_file.h>
54574 #include <linux/slab.h>
54575 +#include <linux/sched.h>
54576
54577 #include <asm/uaccess.h>
54578 #include <asm/page.h>
54579 @@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
54580 memset(p, 0, sizeof(*p));
54581 mutex_init(&p->lock);
54582 p->op = op;
54583 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54584 + p->exec_id = current->exec_id;
54585 +#endif
54586
54587 /*
54588 * Wrappers around seq_open(e.g. swaps_open) need to be
54589 @@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54590 return 0;
54591 }
54592 if (!m->buf) {
54593 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54594 + m->size = PAGE_SIZE;
54595 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54596 if (!m->buf)
54597 return -ENOMEM;
54598 }
54599 @@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54600 Eoverflow:
54601 m->op->stop(m, p);
54602 kfree(m->buf);
54603 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54604 + m->size <<= 1;
54605 + m->buf = kmalloc(m->size, GFP_KERNEL);
54606 return !m->buf ? -ENOMEM : -EAGAIN;
54607 }
54608
54609 @@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54610 m->version = file->f_version;
54611 /* grab buffer if we didn't have one */
54612 if (!m->buf) {
54613 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54614 + m->size = PAGE_SIZE;
54615 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54616 if (!m->buf)
54617 goto Enomem;
54618 }
54619 @@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54620 goto Fill;
54621 m->op->stop(m, p);
54622 kfree(m->buf);
54623 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54624 + m->size <<= 1;
54625 + m->buf = kmalloc(m->size, GFP_KERNEL);
54626 if (!m->buf)
54627 goto Enomem;
54628 m->count = 0;
54629 @@ -551,7 +559,7 @@ static void single_stop(struct seq_file *p, void *v)
54630 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
54631 void *data)
54632 {
54633 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
54634 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
54635 int res = -ENOMEM;
54636
54637 if (op) {
54638 diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
54639 index 71c29b6..54694dd 100644
54640 --- a/fs/smbfs/proc.c
54641 +++ b/fs/smbfs/proc.c
54642 @@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
54643
54644 out:
54645 if (server->local_nls != NULL && server->remote_nls != NULL)
54646 - server->ops->convert = convert_cp;
54647 + *(void **)&server->ops->convert = convert_cp;
54648 else
54649 - server->ops->convert = convert_memcpy;
54650 + *(void **)&server->ops->convert = convert_memcpy;
54651
54652 smb_unlock_server(server);
54653 return n;
54654 @@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
54655
54656 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
54657 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
54658 - server->ops->getattr = smb_proc_getattr_core;
54659 + *(void **)&server->ops->getattr = smb_proc_getattr_core;
54660 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
54661 - server->ops->getattr = smb_proc_getattr_ff;
54662 + *(void **)&server->ops->getattr = smb_proc_getattr_ff;
54663 }
54664
54665 /* Decode server capabilities */
54666 @@ -3439,7 +3439,7 @@ out:
54667 static void
54668 install_ops(struct smb_ops *dst, struct smb_ops *src)
54669 {
54670 - memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54671 + memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54672 }
54673
54674 /* < LANMAN2 */
54675 diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
54676 index 00b2909..2ace383 100644
54677 --- a/fs/smbfs/symlink.c
54678 +++ b/fs/smbfs/symlink.c
54679 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
54680
54681 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
54682 {
54683 - char *s = nd_get_link(nd);
54684 + const char *s = nd_get_link(nd);
54685 if (!IS_ERR(s))
54686 __putname(s);
54687 }
54688 diff --git a/fs/splice.c b/fs/splice.c
54689 index bb92b7c..5aa72b0 100644
54690 --- a/fs/splice.c
54691 +++ b/fs/splice.c
54692 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54693 pipe_lock(pipe);
54694
54695 for (;;) {
54696 - if (!pipe->readers) {
54697 + if (!atomic_read(&pipe->readers)) {
54698 send_sig(SIGPIPE, current, 0);
54699 if (!ret)
54700 ret = -EPIPE;
54701 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54702 do_wakeup = 0;
54703 }
54704
54705 - pipe->waiting_writers++;
54706 + atomic_inc(&pipe->waiting_writers);
54707 pipe_wait(pipe);
54708 - pipe->waiting_writers--;
54709 + atomic_dec(&pipe->waiting_writers);
54710 }
54711
54712 pipe_unlock(pipe);
54713 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
54714 .spd_release = spd_release_page,
54715 };
54716
54717 + pax_track_stack();
54718 +
54719 index = *ppos >> PAGE_CACHE_SHIFT;
54720 loff = *ppos & ~PAGE_CACHE_MASK;
54721 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54722 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
54723 old_fs = get_fs();
54724 set_fs(get_ds());
54725 /* The cast to a user pointer is valid due to the set_fs() */
54726 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
54727 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
54728 set_fs(old_fs);
54729
54730 return res;
54731 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
54732 old_fs = get_fs();
54733 set_fs(get_ds());
54734 /* The cast to a user pointer is valid due to the set_fs() */
54735 - res = vfs_write(file, (const char __user *)buf, count, &pos);
54736 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
54737 set_fs(old_fs);
54738
54739 return res;
54740 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54741 .spd_release = spd_release_page,
54742 };
54743
54744 + pax_track_stack();
54745 +
54746 index = *ppos >> PAGE_CACHE_SHIFT;
54747 offset = *ppos & ~PAGE_CACHE_MASK;
54748 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54749 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54750 goto err;
54751
54752 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
54753 - vec[i].iov_base = (void __user *) page_address(page);
54754 + vec[i].iov_base = (__force void __user *) page_address(page);
54755 vec[i].iov_len = this_len;
54756 pages[i] = page;
54757 spd.nr_pages++;
54758 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
54759 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
54760 {
54761 while (!pipe->nrbufs) {
54762 - if (!pipe->writers)
54763 + if (!atomic_read(&pipe->writers))
54764 return 0;
54765
54766 - if (!pipe->waiting_writers && sd->num_spliced)
54767 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
54768 return 0;
54769
54770 if (sd->flags & SPLICE_F_NONBLOCK)
54771 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
54772 * out of the pipe right after the splice_to_pipe(). So set
54773 * PIPE_READERS appropriately.
54774 */
54775 - pipe->readers = 1;
54776 + atomic_set(&pipe->readers, 1);
54777
54778 current->splice_pipe = pipe;
54779 }
54780 @@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
54781 .spd_release = spd_release_page,
54782 };
54783
54784 + pax_track_stack();
54785 +
54786 pipe = pipe_info(file->f_path.dentry->d_inode);
54787 if (!pipe)
54788 return -EBADF;
54789 @@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54790 ret = -ERESTARTSYS;
54791 break;
54792 }
54793 - if (!pipe->writers)
54794 + if (!atomic_read(&pipe->writers))
54795 break;
54796 - if (!pipe->waiting_writers) {
54797 + if (!atomic_read(&pipe->waiting_writers)) {
54798 if (flags & SPLICE_F_NONBLOCK) {
54799 ret = -EAGAIN;
54800 break;
54801 @@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54802 pipe_lock(pipe);
54803
54804 while (pipe->nrbufs >= PIPE_BUFFERS) {
54805 - if (!pipe->readers) {
54806 + if (!atomic_read(&pipe->readers)) {
54807 send_sig(SIGPIPE, current, 0);
54808 ret = -EPIPE;
54809 break;
54810 @@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54811 ret = -ERESTARTSYS;
54812 break;
54813 }
54814 - pipe->waiting_writers++;
54815 + atomic_inc(&pipe->waiting_writers);
54816 pipe_wait(pipe);
54817 - pipe->waiting_writers--;
54818 + atomic_dec(&pipe->waiting_writers);
54819 }
54820
54821 pipe_unlock(pipe);
54822 @@ -1786,14 +1792,14 @@ retry:
54823 pipe_double_lock(ipipe, opipe);
54824
54825 do {
54826 - if (!opipe->readers) {
54827 + if (!atomic_read(&opipe->readers)) {
54828 send_sig(SIGPIPE, current, 0);
54829 if (!ret)
54830 ret = -EPIPE;
54831 break;
54832 }
54833
54834 - if (!ipipe->nrbufs && !ipipe->writers)
54835 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
54836 break;
54837
54838 /*
54839 @@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
54840 pipe_double_lock(ipipe, opipe);
54841
54842 do {
54843 - if (!opipe->readers) {
54844 + if (!atomic_read(&opipe->readers)) {
54845 send_sig(SIGPIPE, current, 0);
54846 if (!ret)
54847 ret = -EPIPE;
54848 @@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
54849 * return EAGAIN if we have the potential of some data in the
54850 * future, otherwise just return 0
54851 */
54852 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
54853 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
54854 ret = -EAGAIN;
54855
54856 pipe_unlock(ipipe);
54857 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
54858 index e020183..18d64b4 100644
54859 --- a/fs/sysfs/dir.c
54860 +++ b/fs/sysfs/dir.c
54861 @@ -678,6 +678,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
54862 struct sysfs_dirent *sd;
54863 int rc;
54864
54865 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
54866 + const char *parent_name = parent_sd->s_name;
54867 +
54868 + mode = S_IFDIR | S_IRWXU;
54869 +
54870 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
54871 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
54872 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
54873 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
54874 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
54875 +#endif
54876 +
54877 /* allocate */
54878 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
54879 if (!sd)
54880 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
54881 index 7118a38..70af853 100644
54882 --- a/fs/sysfs/file.c
54883 +++ b/fs/sysfs/file.c
54884 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
54885
54886 struct sysfs_open_dirent {
54887 atomic_t refcnt;
54888 - atomic_t event;
54889 + atomic_unchecked_t event;
54890 wait_queue_head_t poll;
54891 struct list_head buffers; /* goes through sysfs_buffer.list */
54892 };
54893 @@ -53,7 +53,7 @@ struct sysfs_buffer {
54894 size_t count;
54895 loff_t pos;
54896 char * page;
54897 - struct sysfs_ops * ops;
54898 + const struct sysfs_ops * ops;
54899 struct mutex mutex;
54900 int needs_read_fill;
54901 int event;
54902 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
54903 {
54904 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
54905 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54906 - struct sysfs_ops * ops = buffer->ops;
54907 + const struct sysfs_ops * ops = buffer->ops;
54908 int ret = 0;
54909 ssize_t count;
54910
54911 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
54912 if (!sysfs_get_active_two(attr_sd))
54913 return -ENODEV;
54914
54915 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
54916 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
54917 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
54918
54919 sysfs_put_active_two(attr_sd);
54920 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
54921 {
54922 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
54923 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54924 - struct sysfs_ops * ops = buffer->ops;
54925 + const struct sysfs_ops * ops = buffer->ops;
54926 int rc;
54927
54928 /* need attr_sd for attr and ops, its parent for kobj */
54929 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
54930 return -ENOMEM;
54931
54932 atomic_set(&new_od->refcnt, 0);
54933 - atomic_set(&new_od->event, 1);
54934 + atomic_set_unchecked(&new_od->event, 1);
54935 init_waitqueue_head(&new_od->poll);
54936 INIT_LIST_HEAD(&new_od->buffers);
54937 goto retry;
54938 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
54939 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
54940 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54941 struct sysfs_buffer *buffer;
54942 - struct sysfs_ops *ops;
54943 + const struct sysfs_ops *ops;
54944 int error = -EACCES;
54945 char *p;
54946
54947 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
54948
54949 sysfs_put_active_two(attr_sd);
54950
54951 - if (buffer->event != atomic_read(&od->event))
54952 + if (buffer->event != atomic_read_unchecked(&od->event))
54953 goto trigger;
54954
54955 return DEFAULT_POLLMASK;
54956 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
54957
54958 od = sd->s_attr.open;
54959 if (od) {
54960 - atomic_inc(&od->event);
54961 + atomic_inc_unchecked(&od->event);
54962 wake_up_interruptible(&od->poll);
54963 }
54964
54965 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
54966 index c5081ad..342ea86 100644
54967 --- a/fs/sysfs/symlink.c
54968 +++ b/fs/sysfs/symlink.c
54969 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
54970
54971 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
54972 {
54973 - char *page = nd_get_link(nd);
54974 + const char *page = nd_get_link(nd);
54975 if (!IS_ERR(page))
54976 free_page((unsigned long)page);
54977 }
54978 diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
54979 index 1e06853..b06d325 100644
54980 --- a/fs/udf/balloc.c
54981 +++ b/fs/udf/balloc.c
54982 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
54983
54984 mutex_lock(&sbi->s_alloc_mutex);
54985 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
54986 - if (bloc->logicalBlockNum < 0 ||
54987 - (bloc->logicalBlockNum + count) >
54988 - partmap->s_partition_len) {
54989 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
54990 udf_debug("%d < %d || %d + %d > %d\n",
54991 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
54992 count, partmap->s_partition_len);
54993 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
54994
54995 mutex_lock(&sbi->s_alloc_mutex);
54996 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
54997 - if (bloc->logicalBlockNum < 0 ||
54998 - (bloc->logicalBlockNum + count) >
54999 - partmap->s_partition_len) {
55000 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
55001 udf_debug("%d < %d || %d + %d > %d\n",
55002 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
55003 partmap->s_partition_len);
55004 diff --git a/fs/udf/inode.c b/fs/udf/inode.c
55005 index 6d24c2c..fff470f 100644
55006 --- a/fs/udf/inode.c
55007 +++ b/fs/udf/inode.c
55008 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
55009 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
55010 int lastblock = 0;
55011
55012 + pax_track_stack();
55013 +
55014 prev_epos.offset = udf_file_entry_alloc_offset(inode);
55015 prev_epos.block = iinfo->i_location;
55016 prev_epos.bh = NULL;
55017 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
55018 index 9215700..bf1f68e 100644
55019 --- a/fs/udf/misc.c
55020 +++ b/fs/udf/misc.c
55021 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
55022
55023 u8 udf_tag_checksum(const struct tag *t)
55024 {
55025 - u8 *data = (u8 *)t;
55026 + const u8 *data = (const u8 *)t;
55027 u8 checksum = 0;
55028 int i;
55029 for (i = 0; i < sizeof(struct tag); ++i)
55030 diff --git a/fs/utimes.c b/fs/utimes.c
55031 index e4c75db..b4df0e0 100644
55032 --- a/fs/utimes.c
55033 +++ b/fs/utimes.c
55034 @@ -1,6 +1,7 @@
55035 #include <linux/compiler.h>
55036 #include <linux/file.h>
55037 #include <linux/fs.h>
55038 +#include <linux/security.h>
55039 #include <linux/linkage.h>
55040 #include <linux/mount.h>
55041 #include <linux/namei.h>
55042 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
55043 goto mnt_drop_write_and_out;
55044 }
55045 }
55046 +
55047 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
55048 + error = -EACCES;
55049 + goto mnt_drop_write_and_out;
55050 + }
55051 +
55052 mutex_lock(&inode->i_mutex);
55053 error = notify_change(path->dentry, &newattrs);
55054 mutex_unlock(&inode->i_mutex);
55055 diff --git a/fs/xattr.c b/fs/xattr.c
55056 index 6d4f6d3..cda3958 100644
55057 --- a/fs/xattr.c
55058 +++ b/fs/xattr.c
55059 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
55060 * Extended attribute SET operations
55061 */
55062 static long
55063 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
55064 +setxattr(struct path *path, const char __user *name, const void __user *value,
55065 size_t size, int flags)
55066 {
55067 int error;
55068 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
55069 return PTR_ERR(kvalue);
55070 }
55071
55072 - error = vfs_setxattr(d, kname, kvalue, size, flags);
55073 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
55074 + error = -EACCES;
55075 + goto out;
55076 + }
55077 +
55078 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
55079 +out:
55080 kfree(kvalue);
55081 return error;
55082 }
55083 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
55084 return error;
55085 error = mnt_want_write(path.mnt);
55086 if (!error) {
55087 - error = setxattr(path.dentry, name, value, size, flags);
55088 + error = setxattr(&path, name, value, size, flags);
55089 mnt_drop_write(path.mnt);
55090 }
55091 path_put(&path);
55092 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
55093 return error;
55094 error = mnt_want_write(path.mnt);
55095 if (!error) {
55096 - error = setxattr(path.dentry, name, value, size, flags);
55097 + error = setxattr(&path, name, value, size, flags);
55098 mnt_drop_write(path.mnt);
55099 }
55100 path_put(&path);
55101 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
55102 const void __user *,value, size_t, size, int, flags)
55103 {
55104 struct file *f;
55105 - struct dentry *dentry;
55106 int error = -EBADF;
55107
55108 f = fget(fd);
55109 if (!f)
55110 return error;
55111 - dentry = f->f_path.dentry;
55112 - audit_inode(NULL, dentry);
55113 + audit_inode(NULL, f->f_path.dentry);
55114 error = mnt_want_write_file(f);
55115 if (!error) {
55116 - error = setxattr(dentry, name, value, size, flags);
55117 + error = setxattr(&f->f_path, name, value, size, flags);
55118 mnt_drop_write(f->f_path.mnt);
55119 }
55120 fput(f);
55121 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
55122 index c6ad7c7..f2847a7 100644
55123 --- a/fs/xattr_acl.c
55124 +++ b/fs/xattr_acl.c
55125 @@ -17,8 +17,8 @@
55126 struct posix_acl *
55127 posix_acl_from_xattr(const void *value, size_t size)
55128 {
55129 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
55130 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
55131 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
55132 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
55133 int count;
55134 struct posix_acl *acl;
55135 struct posix_acl_entry *acl_e;
55136 diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
55137 index 942362f..88f96f5 100644
55138 --- a/fs/xfs/linux-2.6/xfs_ioctl.c
55139 +++ b/fs/xfs/linux-2.6/xfs_ioctl.c
55140 @@ -134,7 +134,7 @@ xfs_find_handle(
55141 }
55142
55143 error = -EFAULT;
55144 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
55145 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
55146 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
55147 goto out_put;
55148
55149 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
55150 if (IS_ERR(dentry))
55151 return PTR_ERR(dentry);
55152
55153 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
55154 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
55155 if (!kbuf)
55156 goto out_dput;
55157
55158 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
55159 xfs_mount_t *mp,
55160 void __user *arg)
55161 {
55162 - xfs_fsop_geom_t fsgeo;
55163 + xfs_fsop_geom_t fsgeo;
55164 int error;
55165
55166 error = xfs_fs_geometry(mp, &fsgeo, 3);
55167 diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
55168 index bad485a..479bd32 100644
55169 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c
55170 +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
55171 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
55172 xfs_fsop_geom_t fsgeo;
55173 int error;
55174
55175 + memset(&fsgeo, 0, sizeof(fsgeo));
55176 error = xfs_fs_geometry(mp, &fsgeo, 3);
55177 if (error)
55178 return -error;
55179 diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
55180 index 1f3b4b8..6102f6d 100644
55181 --- a/fs/xfs/linux-2.6/xfs_iops.c
55182 +++ b/fs/xfs/linux-2.6/xfs_iops.c
55183 @@ -468,7 +468,7 @@ xfs_vn_put_link(
55184 struct nameidata *nd,
55185 void *p)
55186 {
55187 - char *s = nd_get_link(nd);
55188 + const char *s = nd_get_link(nd);
55189
55190 if (!IS_ERR(s))
55191 kfree(s);
55192 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
55193 index 8971fb0..5fc1eb2 100644
55194 --- a/fs/xfs/xfs_bmap.c
55195 +++ b/fs/xfs/xfs_bmap.c
55196 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
55197 int nmap,
55198 int ret_nmap);
55199 #else
55200 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
55201 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
55202 #endif /* DEBUG */
55203
55204 #if defined(XFS_RW_TRACE)
55205 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
55206 index e89734e..5e84d8d 100644
55207 --- a/fs/xfs/xfs_dir2_sf.c
55208 +++ b/fs/xfs/xfs_dir2_sf.c
55209 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
55210 }
55211
55212 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
55213 - if (filldir(dirent, sfep->name, sfep->namelen,
55214 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
55215 + char name[sfep->namelen];
55216 + memcpy(name, sfep->name, sfep->namelen);
55217 + if (filldir(dirent, name, sfep->namelen,
55218 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
55219 + *offset = off & 0x7fffffff;
55220 + return 0;
55221 + }
55222 + } else if (filldir(dirent, sfep->name, sfep->namelen,
55223 off & 0x7fffffff, ino, DT_UNKNOWN)) {
55224 *offset = off & 0x7fffffff;
55225 return 0;
55226 diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
55227 index 8f32f50..b6a41e8 100644
55228 --- a/fs/xfs/xfs_vnodeops.c
55229 +++ b/fs/xfs/xfs_vnodeops.c
55230 @@ -564,13 +564,18 @@ xfs_readlink(
55231
55232 xfs_ilock(ip, XFS_ILOCK_SHARED);
55233
55234 - ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
55235 - ASSERT(ip->i_d.di_size <= MAXPATHLEN);
55236 -
55237 pathlen = ip->i_d.di_size;
55238 if (!pathlen)
55239 goto out;
55240
55241 + if (pathlen > MAXPATHLEN) {
55242 + xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
55243 + __func__, (unsigned long long)ip->i_ino, pathlen);
55244 + ASSERT(0);
55245 + error = XFS_ERROR(EFSCORRUPTED);
55246 + goto out;
55247 + }
55248 +
55249 if (ip->i_df.if_flags & XFS_IFINLINE) {
55250 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
55251 link[pathlen] = '\0';
55252 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
55253 new file mode 100644
55254 index 0000000..7026cbd
55255 --- /dev/null
55256 +++ b/grsecurity/Kconfig
55257 @@ -0,0 +1,1074 @@
55258 +#
55259 +# grecurity configuration
55260 +#
55261 +
55262 +menu "Grsecurity"
55263 +
55264 +config GRKERNSEC
55265 + bool "Grsecurity"
55266 + select CRYPTO
55267 + select CRYPTO_SHA256
55268 + help
55269 + If you say Y here, you will be able to configure many features
55270 + that will enhance the security of your system. It is highly
55271 + recommended that you say Y here and read through the help
55272 + for each option so that you fully understand the features and
55273 + can evaluate their usefulness for your machine.
55274 +
55275 +choice
55276 + prompt "Security Level"
55277 + depends on GRKERNSEC
55278 + default GRKERNSEC_CUSTOM
55279 +
55280 +config GRKERNSEC_LOW
55281 + bool "Low"
55282 + select GRKERNSEC_LINK
55283 + select GRKERNSEC_FIFO
55284 + select GRKERNSEC_RANDNET
55285 + select GRKERNSEC_DMESG
55286 + select GRKERNSEC_CHROOT
55287 + select GRKERNSEC_CHROOT_CHDIR
55288 +
55289 + help
55290 + If you choose this option, several of the grsecurity options will
55291 + be enabled that will give you greater protection against a number
55292 + of attacks, while assuring that none of your software will have any
55293 + conflicts with the additional security measures. If you run a lot
55294 + of unusual software, or you are having problems with the higher
55295 + security levels, you should say Y here. With this option, the
55296 + following features are enabled:
55297 +
55298 + - Linking restrictions
55299 + - FIFO restrictions
55300 + - Restricted dmesg
55301 + - Enforced chdir("/") on chroot
55302 + - Runtime module disabling
55303 +
55304 +config GRKERNSEC_MEDIUM
55305 + bool "Medium"
55306 + select PAX
55307 + select PAX_EI_PAX
55308 + select PAX_PT_PAX_FLAGS
55309 + select PAX_HAVE_ACL_FLAGS
55310 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55311 + select GRKERNSEC_CHROOT
55312 + select GRKERNSEC_CHROOT_SYSCTL
55313 + select GRKERNSEC_LINK
55314 + select GRKERNSEC_FIFO
55315 + select GRKERNSEC_DMESG
55316 + select GRKERNSEC_RANDNET
55317 + select GRKERNSEC_FORKFAIL
55318 + select GRKERNSEC_TIME
55319 + select GRKERNSEC_SIGNAL
55320 + select GRKERNSEC_CHROOT
55321 + select GRKERNSEC_CHROOT_UNIX
55322 + select GRKERNSEC_CHROOT_MOUNT
55323 + select GRKERNSEC_CHROOT_PIVOT
55324 + select GRKERNSEC_CHROOT_DOUBLE
55325 + select GRKERNSEC_CHROOT_CHDIR
55326 + select GRKERNSEC_CHROOT_MKNOD
55327 + select GRKERNSEC_PROC
55328 + select GRKERNSEC_PROC_USERGROUP
55329 + select PAX_RANDUSTACK
55330 + select PAX_ASLR
55331 + select PAX_RANDMMAP
55332 + select PAX_REFCOUNT if (X86 || SPARC64)
55333 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55334 +
55335 + help
55336 + If you say Y here, several features in addition to those included
55337 + in the low additional security level will be enabled. These
55338 + features provide even more security to your system, though in rare
55339 + cases they may be incompatible with very old or poorly written
55340 + software. If you enable this option, make sure that your auth
55341 + service (identd) is running as gid 1001. With this option,
55342 + the following features (in addition to those provided in the
55343 + low additional security level) will be enabled:
55344 +
55345 + - Failed fork logging
55346 + - Time change logging
55347 + - Signal logging
55348 + - Deny mounts in chroot
55349 + - Deny double chrooting
55350 + - Deny sysctl writes in chroot
55351 + - Deny mknod in chroot
55352 + - Deny access to abstract AF_UNIX sockets out of chroot
55353 + - Deny pivot_root in chroot
55354 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
55355 + - /proc restrictions with special GID set to 10 (usually wheel)
55356 + - Address Space Layout Randomization (ASLR)
55357 + - Prevent exploitation of most refcount overflows
55358 + - Bounds checking of copying between the kernel and userland
55359 +
55360 +config GRKERNSEC_HIGH
55361 + bool "High"
55362 + select GRKERNSEC_LINK
55363 + select GRKERNSEC_FIFO
55364 + select GRKERNSEC_DMESG
55365 + select GRKERNSEC_FORKFAIL
55366 + select GRKERNSEC_TIME
55367 + select GRKERNSEC_SIGNAL
55368 + select GRKERNSEC_CHROOT
55369 + select GRKERNSEC_CHROOT_SHMAT
55370 + select GRKERNSEC_CHROOT_UNIX
55371 + select GRKERNSEC_CHROOT_MOUNT
55372 + select GRKERNSEC_CHROOT_FCHDIR
55373 + select GRKERNSEC_CHROOT_PIVOT
55374 + select GRKERNSEC_CHROOT_DOUBLE
55375 + select GRKERNSEC_CHROOT_CHDIR
55376 + select GRKERNSEC_CHROOT_MKNOD
55377 + select GRKERNSEC_CHROOT_CAPS
55378 + select GRKERNSEC_CHROOT_SYSCTL
55379 + select GRKERNSEC_CHROOT_FINDTASK
55380 + select GRKERNSEC_SYSFS_RESTRICT
55381 + select GRKERNSEC_PROC
55382 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55383 + select GRKERNSEC_HIDESYM
55384 + select GRKERNSEC_BRUTE
55385 + select GRKERNSEC_PROC_USERGROUP
55386 + select GRKERNSEC_KMEM
55387 + select GRKERNSEC_RESLOG
55388 + select GRKERNSEC_RANDNET
55389 + select GRKERNSEC_PROC_ADD
55390 + select GRKERNSEC_CHROOT_CHMOD
55391 + select GRKERNSEC_CHROOT_NICE
55392 + select GRKERNSEC_SETXID
55393 + select GRKERNSEC_AUDIT_MOUNT
55394 + select GRKERNSEC_MODHARDEN if (MODULES)
55395 + select GRKERNSEC_HARDEN_PTRACE
55396 + select GRKERNSEC_PTRACE_READEXEC
55397 + select GRKERNSEC_VM86 if (X86_32)
55398 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55399 + select PAX
55400 + select PAX_RANDUSTACK
55401 + select PAX_ASLR
55402 + select PAX_RANDMMAP
55403 + select PAX_NOEXEC
55404 + select PAX_MPROTECT
55405 + select PAX_EI_PAX
55406 + select PAX_PT_PAX_FLAGS
55407 + select PAX_HAVE_ACL_FLAGS
55408 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55409 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
55410 + select PAX_RANDKSTACK if (X86_TSC && X86)
55411 + select PAX_SEGMEXEC if (X86_32)
55412 + select PAX_PAGEEXEC
55413 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55414 + select PAX_EMUTRAMP if (PARISC)
55415 + select PAX_EMUSIGRT if (PARISC)
55416 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55417 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55418 + select PAX_REFCOUNT if (X86 || SPARC64)
55419 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55420 + help
55421 + If you say Y here, many of the features of grsecurity will be
55422 + enabled, which will protect you against many kinds of attacks
55423 + against your system. The heightened security comes at a cost
55424 + of an increased chance of incompatibilities with rare software
55425 + on your machine. Since this security level enables PaX, you should
55426 + view <http://pax.grsecurity.net> and read about the PaX
55427 + project. While you are there, download chpax and run it on
55428 + binaries that cause problems with PaX. Also remember that
55429 + since the /proc restrictions are enabled, you must run your
55430 + identd as gid 1001. This security level enables the following
55431 + features in addition to those listed in the low and medium
55432 + security levels:
55433 +
55434 + - Additional /proc restrictions
55435 + - Chmod restrictions in chroot
55436 + - No signals, ptrace, or viewing of processes outside of chroot
55437 + - Capability restrictions in chroot
55438 + - Deny fchdir out of chroot
55439 + - Priority restrictions in chroot
55440 + - Segmentation-based implementation of PaX
55441 + - Mprotect restrictions
55442 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55443 + - Kernel stack randomization
55444 + - Mount/unmount/remount logging
55445 + - Kernel symbol hiding
55446 + - Hardening of module auto-loading
55447 + - Ptrace restrictions
55448 + - Restricted vm86 mode
55449 + - Restricted sysfs/debugfs
55450 + - Active kernel exploit response
55451 +
55452 +config GRKERNSEC_CUSTOM
55453 + bool "Custom"
55454 + help
55455 + If you say Y here, you will be able to configure every grsecurity
55456 + option, which allows you to enable many more features that aren't
55457 + covered in the basic security levels. These additional features
55458 + include TPE, socket restrictions, and the sysctl system for
55459 + grsecurity. It is advised that you read through the help for
55460 + each option to determine its usefulness in your situation.
55461 +
55462 +endchoice
55463 +
55464 +menu "Memory Protections"
55465 +depends on GRKERNSEC
55466 +
55467 +config GRKERNSEC_KMEM
55468 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
55469 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55470 + help
55471 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55472 + be written to or read from to modify or leak the contents of the running
55473 + kernel. /dev/port will also not be allowed to be opened. If you have module
55474 + support disabled, enabling this will close up four ways that are
55475 + currently used to insert malicious code into the running kernel.
55476 + Even with all these features enabled, we still highly recommend that
55477 + you use the RBAC system, as it is still possible for an attacker to
55478 + modify the running kernel through privileged I/O granted by ioperm/iopl.
55479 + If you are not using XFree86, you may be able to stop this additional
55480 + case by enabling the 'Disable privileged I/O' option. Though nothing
55481 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55482 + but only to video memory, which is the only writing we allow in this
55483 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55484 + not be allowed to mprotect it with PROT_WRITE later.
55485 + It is highly recommended that you say Y here if you meet all the
55486 + conditions above.
55487 +
55488 +config GRKERNSEC_VM86
55489 + bool "Restrict VM86 mode"
55490 + depends on X86_32
55491 +
55492 + help
55493 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55494 + make use of a special execution mode on 32bit x86 processors called
55495 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55496 + video cards and will still work with this option enabled. The purpose
55497 + of the option is to prevent exploitation of emulation errors in
55498 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
55499 + Nearly all users should be able to enable this option.
55500 +
55501 +config GRKERNSEC_IO
55502 + bool "Disable privileged I/O"
55503 + depends on X86
55504 + select RTC_CLASS
55505 + select RTC_INTF_DEV
55506 + select RTC_DRV_CMOS
55507 +
55508 + help
55509 + If you say Y here, all ioperm and iopl calls will return an error.
55510 + Ioperm and iopl can be used to modify the running kernel.
55511 + Unfortunately, some programs need this access to operate properly,
55512 + the most notable of which are XFree86 and hwclock. hwclock can be
55513 + remedied by having RTC support in the kernel, so real-time
55514 + clock support is enabled if this option is enabled, to ensure
55515 + that hwclock operates correctly. XFree86 still will not
55516 + operate correctly with this option enabled, so DO NOT CHOOSE Y
55517 + IF YOU USE XFree86. If you use XFree86 and you still want to
55518 + protect your kernel against modification, use the RBAC system.
55519 +
55520 +config GRKERNSEC_PROC_MEMMAP
55521 + bool "Harden ASLR against information leaks and entropy reduction"
55522 + default y if (PAX_NOEXEC || PAX_ASLR)
55523 + depends on PAX_NOEXEC || PAX_ASLR
55524 + help
55525 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55526 + give no information about the addresses of its mappings if
55527 + PaX features that rely on random addresses are enabled on the task.
55528 + In addition to sanitizing this information and disabling other
55529 + dangerous sources of information, this option causes reads of sensitive
55530 + /proc/<pid> entries where the file descriptor was opened in a different
55531 + task than the one performing the read. Such attempts are logged.
55532 + Finally, this option limits argv/env strings for suid/sgid binaries
55533 + to 1MB to prevent a complete exhaustion of the stack entropy provided
55534 + by ASLR.
55535 + If you use PaX it is essential that you say Y here as it closes up
55536 + several holes that make full ASLR useless for suid/sgid binaries.
55537 +
55538 +config GRKERNSEC_BRUTE
55539 + bool "Deter exploit bruteforcing"
55540 + help
55541 + If you say Y here, attempts to bruteforce exploits against forking
55542 + daemons such as apache or sshd, as well as against suid/sgid binaries
55543 + will be deterred. When a child of a forking daemon is killed by PaX
55544 + or crashes due to an illegal instruction or other suspicious signal,
55545 + the parent process will be delayed 30 seconds upon every subsequent
55546 + fork until the administrator is able to assess the situation and
55547 + restart the daemon.
55548 + In the suid/sgid case, the attempt is logged, the user has all their
55549 + processes terminated, and they are prevented from executing any further
55550 + processes for 15 minutes.
55551 + It is recommended that you also enable signal logging in the auditing
55552 + section so that logs are generated when a process triggers a suspicious
55553 + signal.
55554 + If the sysctl option is enabled, a sysctl option with name
55555 + "deter_bruteforce" is created.
55556 +
55557 +config GRKERNSEC_MODHARDEN
55558 + bool "Harden module auto-loading"
55559 + depends on MODULES
55560 + help
55561 + If you say Y here, module auto-loading in response to use of some
55562 + feature implemented by an unloaded module will be restricted to
55563 + root users. Enabling this option helps defend against attacks
55564 + by unprivileged users who abuse the auto-loading behavior to
55565 + cause a vulnerable module to load that is then exploited.
55566 +
55567 + If this option prevents a legitimate use of auto-loading for a
55568 + non-root user, the administrator can execute modprobe manually
55569 + with the exact name of the module mentioned in the alert log.
55570 + Alternatively, the administrator can add the module to the list
55571 + of modules loaded at boot by modifying init scripts.
55572 +
55573 + Modification of init scripts will most likely be needed on
55574 + Ubuntu servers with encrypted home directory support enabled,
55575 + as the first non-root user logging in will cause the ecb(aes),
55576 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55577 +
55578 +config GRKERNSEC_HIDESYM
55579 + bool "Hide kernel symbols"
55580 + help
55581 + If you say Y here, getting information on loaded modules, and
55582 + displaying all kernel symbols through a syscall will be restricted
55583 + to users with CAP_SYS_MODULE. For software compatibility reasons,
55584 + /proc/kallsyms will be restricted to the root user. The RBAC
55585 + system can hide that entry even from root.
55586 +
55587 + This option also prevents leaking of kernel addresses through
55588 + several /proc entries.
55589 +
55590 + Note that this option is only effective provided the following
55591 + conditions are met:
55592 + 1) The kernel using grsecurity is not precompiled by some distribution
55593 + 2) You have also enabled GRKERNSEC_DMESG
55594 + 3) You are using the RBAC system and hiding other files such as your
55595 + kernel image and System.map. Alternatively, enabling this option
55596 + causes the permissions on /boot, /lib/modules, and the kernel
55597 + source directory to change at compile time to prevent
55598 + reading by non-root users.
55599 + If the above conditions are met, this option will aid in providing a
55600 + useful protection against local kernel exploitation of overflows
55601 + and arbitrary read/write vulnerabilities.
55602 +
55603 +config GRKERNSEC_KERN_LOCKOUT
55604 + bool "Active kernel exploit response"
55605 + depends on X86 || ARM || PPC || SPARC
55606 + help
55607 + If you say Y here, when a PaX alert is triggered due to suspicious
55608 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55609 + or an OOPs occurs due to bad memory accesses, instead of just
55610 + terminating the offending process (and potentially allowing
55611 + a subsequent exploit from the same user), we will take one of two
55612 + actions:
55613 + If the user was root, we will panic the system
55614 + If the user was non-root, we will log the attempt, terminate
55615 + all processes owned by the user, then prevent them from creating
55616 + any new processes until the system is restarted
55617 + This deters repeated kernel exploitation/bruteforcing attempts
55618 + and is useful for later forensics.
55619 +
55620 +endmenu
55621 +menu "Role Based Access Control Options"
55622 +depends on GRKERNSEC
55623 +
55624 +config GRKERNSEC_RBAC_DEBUG
55625 + bool
55626 +
55627 +config GRKERNSEC_NO_RBAC
55628 + bool "Disable RBAC system"
55629 + help
55630 + If you say Y here, the /dev/grsec device will be removed from the kernel,
55631 + preventing the RBAC system from being enabled. You should only say Y
55632 + here if you have no intention of using the RBAC system, so as to prevent
55633 + an attacker with root access from misusing the RBAC system to hide files
55634 + and processes when loadable module support and /dev/[k]mem have been
55635 + locked down.
55636 +
55637 +config GRKERNSEC_ACL_HIDEKERN
55638 + bool "Hide kernel processes"
55639 + help
55640 + If you say Y here, all kernel threads will be hidden to all
55641 + processes but those whose subject has the "view hidden processes"
55642 + flag.
55643 +
55644 +config GRKERNSEC_ACL_MAXTRIES
55645 + int "Maximum tries before password lockout"
55646 + default 3
55647 + help
55648 + This option enforces the maximum number of times a user can attempt
55649 + to authorize themselves with the grsecurity RBAC system before being
55650 + denied the ability to attempt authorization again for a specified time.
55651 + The lower the number, the harder it will be to brute-force a password.
55652 +
55653 +config GRKERNSEC_ACL_TIMEOUT
55654 + int "Time to wait after max password tries, in seconds"
55655 + default 30
55656 + help
55657 + This option specifies the time the user must wait after attempting to
55658 + authorize to the RBAC system with the maximum number of invalid
55659 + passwords. The higher the number, the harder it will be to brute-force
55660 + a password.
55661 +
55662 +endmenu
55663 +menu "Filesystem Protections"
55664 +depends on GRKERNSEC
55665 +
55666 +config GRKERNSEC_PROC
55667 + bool "Proc restrictions"
55668 + help
55669 + If you say Y here, the permissions of the /proc filesystem
55670 + will be altered to enhance system security and privacy. You MUST
55671 + choose either a user only restriction or a user and group restriction.
55672 + Depending upon the option you choose, you can either restrict users to
55673 + see only the processes they themselves run, or choose a group that can
55674 + view all processes and files normally restricted to root if you choose
55675 + the "restrict to user only" option. NOTE: If you're running identd as
55676 + a non-root user, you will have to run it as the group you specify here.
55677 +
55678 +config GRKERNSEC_PROC_USER
55679 + bool "Restrict /proc to user only"
55680 + depends on GRKERNSEC_PROC
55681 + help
55682 + If you say Y here, non-root users will only be able to view their own
55683 + processes, and restricts them from viewing network-related information,
55684 + and viewing kernel symbol and module information.
55685 +
55686 +config GRKERNSEC_PROC_USERGROUP
55687 + bool "Allow special group"
55688 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55689 + help
55690 + If you say Y here, you will be able to select a group that will be
55691 + able to view all processes and network-related information. If you've
55692 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55693 + remain hidden. This option is useful if you want to run identd as
55694 + a non-root user.
55695 +
55696 +config GRKERNSEC_PROC_GID
55697 + int "GID for special group"
55698 + depends on GRKERNSEC_PROC_USERGROUP
55699 + default 1001
55700 +
55701 +config GRKERNSEC_PROC_ADD
55702 + bool "Additional restrictions"
55703 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55704 + help
55705 + If you say Y here, additional restrictions will be placed on
55706 + /proc that keep normal users from viewing device information and
55707 + slabinfo information that could be useful for exploits.
55708 +
55709 +config GRKERNSEC_LINK
55710 + bool "Linking restrictions"
55711 + help
55712 + If you say Y here, /tmp race exploits will be prevented, since users
55713 + will no longer be able to follow symlinks owned by other users in
55714 + world-writable +t directories (e.g. /tmp), unless the owner of the
55715 + symlink is the owner of the directory. users will also not be
55716 + able to hardlink to files they do not own. If the sysctl option is
55717 + enabled, a sysctl option with name "linking_restrictions" is created.
55718 +
55719 +config GRKERNSEC_FIFO
55720 + bool "FIFO restrictions"
55721 + help
55722 + If you say Y here, users will not be able to write to FIFOs they don't
55723 + own in world-writable +t directories (e.g. /tmp), unless the owner of
55724 + the FIFO is the same owner of the directory it's held in. If the sysctl
55725 + option is enabled, a sysctl option with name "fifo_restrictions" is
55726 + created.
55727 +
55728 +config GRKERNSEC_SYSFS_RESTRICT
55729 + bool "Sysfs/debugfs restriction"
55730 + depends on SYSFS
55731 + help
55732 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
55733 + any filesystem normally mounted under it (e.g. debugfs) will be
55734 + mostly accessible only by root. These filesystems generally provide access
55735 + to hardware and debug information that isn't appropriate for unprivileged
55736 + users of the system. Sysfs and debugfs have also become a large source
55737 + of new vulnerabilities, ranging from infoleaks to local compromise.
55738 + There has been very little oversight with an eye toward security involved
55739 + in adding new exporters of information to these filesystems, so their
55740 + use is discouraged.
55741 + For reasons of compatibility, a few directories have been whitelisted
55742 + for access by non-root users:
55743 + /sys/fs/selinux
55744 + /sys/fs/fuse
55745 + /sys/devices/system/cpu
55746 +
55747 +config GRKERNSEC_ROFS
55748 + bool "Runtime read-only mount protection"
55749 + help
55750 + If you say Y here, a sysctl option with name "romount_protect" will
55751 + be created. By setting this option to 1 at runtime, filesystems
55752 + will be protected in the following ways:
55753 + * No new writable mounts will be allowed
55754 + * Existing read-only mounts won't be able to be remounted read/write
55755 + * Write operations will be denied on all block devices
55756 + This option acts independently of grsec_lock: once it is set to 1,
55757 + it cannot be turned off. Therefore, please be mindful of the resulting
55758 + behavior if this option is enabled in an init script on a read-only
55759 + filesystem. This feature is mainly intended for secure embedded systems.
55760 +
55761 +config GRKERNSEC_CHROOT
55762 + bool "Chroot jail restrictions"
55763 + help
55764 + If you say Y here, you will be able to choose several options that will
55765 + make breaking out of a chrooted jail much more difficult. If you
55766 + encounter no software incompatibilities with the following options, it
55767 + is recommended that you enable each one.
55768 +
55769 +config GRKERNSEC_CHROOT_MOUNT
55770 + bool "Deny mounts"
55771 + depends on GRKERNSEC_CHROOT
55772 + help
55773 + If you say Y here, processes inside a chroot will not be able to
55774 + mount or remount filesystems. If the sysctl option is enabled, a
55775 + sysctl option with name "chroot_deny_mount" is created.
55776 +
55777 +config GRKERNSEC_CHROOT_DOUBLE
55778 + bool "Deny double-chroots"
55779 + depends on GRKERNSEC_CHROOT
55780 + help
55781 + If you say Y here, processes inside a chroot will not be able to chroot
55782 + again outside the chroot. This is a widely used method of breaking
55783 + out of a chroot jail and should not be allowed. If the sysctl
55784 + option is enabled, a sysctl option with name
55785 + "chroot_deny_chroot" is created.
55786 +
55787 +config GRKERNSEC_CHROOT_PIVOT
55788 + bool "Deny pivot_root in chroot"
55789 + depends on GRKERNSEC_CHROOT
55790 + help
55791 + If you say Y here, processes inside a chroot will not be able to use
55792 + a function called pivot_root() that was introduced in Linux 2.3.41. It
55793 + works similar to chroot in that it changes the root filesystem. This
55794 + function could be misused in a chrooted process to attempt to break out
55795 + of the chroot, and therefore should not be allowed. If the sysctl
55796 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
55797 + created.
55798 +
55799 +config GRKERNSEC_CHROOT_CHDIR
55800 + bool "Enforce chdir(\"/\") on all chroots"
55801 + depends on GRKERNSEC_CHROOT
55802 + help
55803 + If you say Y here, the current working directory of all newly-chrooted
55804 + applications will be set to the the root directory of the chroot.
55805 + The man page on chroot(2) states:
55806 + Note that this call does not change the current working
55807 + directory, so that `.' can be outside the tree rooted at
55808 + `/'. In particular, the super-user can escape from a
55809 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
55810 +
55811 + It is recommended that you say Y here, since it's not known to break
55812 + any software. If the sysctl option is enabled, a sysctl option with
55813 + name "chroot_enforce_chdir" is created.
55814 +
55815 +config GRKERNSEC_CHROOT_CHMOD
55816 + bool "Deny (f)chmod +s"
55817 + depends on GRKERNSEC_CHROOT
55818 + help
55819 + If you say Y here, processes inside a chroot will not be able to chmod
55820 + or fchmod files to make them have suid or sgid bits. This protects
55821 + against another published method of breaking a chroot. If the sysctl
55822 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
55823 + created.
55824 +
55825 +config GRKERNSEC_CHROOT_FCHDIR
55826 + bool "Deny fchdir out of chroot"
55827 + depends on GRKERNSEC_CHROOT
55828 + help
55829 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
55830 + to a file descriptor of the chrooting process that points to a directory
55831 + outside the filesystem will be stopped. If the sysctl option
55832 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
55833 +
55834 +config GRKERNSEC_CHROOT_MKNOD
55835 + bool "Deny mknod"
55836 + depends on GRKERNSEC_CHROOT
55837 + help
55838 + If you say Y here, processes inside a chroot will not be allowed to
55839 + mknod. The problem with using mknod inside a chroot is that it
55840 + would allow an attacker to create a device entry that is the same
55841 + as one on the physical root of your system, which could range from
55842 + anything from the console device to a device for your harddrive (which
55843 + they could then use to wipe the drive or steal data). It is recommended
55844 + that you say Y here, unless you run into software incompatibilities.
55845 + If the sysctl option is enabled, a sysctl option with name
55846 + "chroot_deny_mknod" is created.
55847 +
55848 +config GRKERNSEC_CHROOT_SHMAT
55849 + bool "Deny shmat() out of chroot"
55850 + depends on GRKERNSEC_CHROOT
55851 + help
55852 + If you say Y here, processes inside a chroot will not be able to attach
55853 + to shared memory segments that were created outside of the chroot jail.
55854 + It is recommended that you say Y here. If the sysctl option is enabled,
55855 + a sysctl option with name "chroot_deny_shmat" is created.
55856 +
55857 +config GRKERNSEC_CHROOT_UNIX
55858 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
55859 + depends on GRKERNSEC_CHROOT
55860 + help
55861 + If you say Y here, processes inside a chroot will not be able to
55862 + connect to abstract (meaning not belonging to a filesystem) Unix
55863 + domain sockets that were bound outside of a chroot. It is recommended
55864 + that you say Y here. If the sysctl option is enabled, a sysctl option
55865 + with name "chroot_deny_unix" is created.
55866 +
55867 +config GRKERNSEC_CHROOT_FINDTASK
55868 + bool "Protect outside processes"
55869 + depends on GRKERNSEC_CHROOT
55870 + help
55871 + If you say Y here, processes inside a chroot will not be able to
55872 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
55873 + getsid, or view any process outside of the chroot. If the sysctl
55874 + option is enabled, a sysctl option with name "chroot_findtask" is
55875 + created.
55876 +
55877 +config GRKERNSEC_CHROOT_NICE
55878 + bool "Restrict priority changes"
55879 + depends on GRKERNSEC_CHROOT
55880 + help
55881 + If you say Y here, processes inside a chroot will not be able to raise
55882 + the priority of processes in the chroot, or alter the priority of
55883 + processes outside the chroot. This provides more security than simply
55884 + removing CAP_SYS_NICE from the process' capability set. If the
55885 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
55886 + is created.
55887 +
55888 +config GRKERNSEC_CHROOT_SYSCTL
55889 + bool "Deny sysctl writes"
55890 + depends on GRKERNSEC_CHROOT
55891 + help
55892 + If you say Y here, an attacker in a chroot will not be able to
55893 + write to sysctl entries, either by sysctl(2) or through a /proc
55894 + interface. It is strongly recommended that you say Y here. If the
55895 + sysctl option is enabled, a sysctl option with name
55896 + "chroot_deny_sysctl" is created.
55897 +
55898 +config GRKERNSEC_CHROOT_CAPS
55899 + bool "Capability restrictions"
55900 + depends on GRKERNSEC_CHROOT
55901 + help
55902 + If you say Y here, the capabilities on all processes within a
55903 + chroot jail will be lowered to stop module insertion, raw i/o,
55904 + system and net admin tasks, rebooting the system, modifying immutable
55905 + files, modifying IPC owned by another, and changing the system time.
55906 + This is left an option because it can break some apps. Disable this
55907 + if your chrooted apps are having problems performing those kinds of
55908 + tasks. If the sysctl option is enabled, a sysctl option with
55909 + name "chroot_caps" is created.
55910 +
55911 +endmenu
55912 +menu "Kernel Auditing"
55913 +depends on GRKERNSEC
55914 +
55915 +config GRKERNSEC_AUDIT_GROUP
55916 + bool "Single group for auditing"
55917 + help
55918 + If you say Y here, the exec, chdir, and (un)mount logging features
55919 + will only operate on a group you specify. This option is recommended
55920 + if you only want to watch certain users instead of having a large
55921 + amount of logs from the entire system. If the sysctl option is enabled,
55922 + a sysctl option with name "audit_group" is created.
55923 +
55924 +config GRKERNSEC_AUDIT_GID
55925 + int "GID for auditing"
55926 + depends on GRKERNSEC_AUDIT_GROUP
55927 + default 1007
55928 +
55929 +config GRKERNSEC_EXECLOG
55930 + bool "Exec logging"
55931 + help
55932 + If you say Y here, all execve() calls will be logged (since the
55933 + other exec*() calls are frontends to execve(), all execution
55934 + will be logged). Useful for shell-servers that like to keep track
55935 + of their users. If the sysctl option is enabled, a sysctl option with
55936 + name "exec_logging" is created.
55937 + WARNING: This option when enabled will produce a LOT of logs, especially
55938 + on an active system.
55939 +
55940 +config GRKERNSEC_RESLOG
55941 + bool "Resource logging"
55942 + help
55943 + If you say Y here, all attempts to overstep resource limits will
55944 + be logged with the resource name, the requested size, and the current
55945 + limit. It is highly recommended that you say Y here. If the sysctl
55946 + option is enabled, a sysctl option with name "resource_logging" is
55947 + created. If the RBAC system is enabled, the sysctl value is ignored.
55948 +
55949 +config GRKERNSEC_CHROOT_EXECLOG
55950 + bool "Log execs within chroot"
55951 + help
55952 + If you say Y here, all executions inside a chroot jail will be logged
55953 + to syslog. This can cause a large amount of logs if certain
55954 + applications (eg. djb's daemontools) are installed on the system, and
55955 + is therefore left as an option. If the sysctl option is enabled, a
55956 + sysctl option with name "chroot_execlog" is created.
55957 +
55958 +config GRKERNSEC_AUDIT_PTRACE
55959 + bool "Ptrace logging"
55960 + help
55961 + If you say Y here, all attempts to attach to a process via ptrace
55962 + will be logged. If the sysctl option is enabled, a sysctl option
55963 + with name "audit_ptrace" is created.
55964 +
55965 +config GRKERNSEC_AUDIT_CHDIR
55966 + bool "Chdir logging"
55967 + help
55968 + If you say Y here, all chdir() calls will be logged. If the sysctl
55969 + option is enabled, a sysctl option with name "audit_chdir" is created.
55970 +
55971 +config GRKERNSEC_AUDIT_MOUNT
55972 + bool "(Un)Mount logging"
55973 + help
55974 + If you say Y here, all mounts and unmounts will be logged. If the
55975 + sysctl option is enabled, a sysctl option with name "audit_mount" is
55976 + created.
55977 +
55978 +config GRKERNSEC_SIGNAL
55979 + bool "Signal logging"
55980 + help
55981 + If you say Y here, certain important signals will be logged, such as
55982 + SIGSEGV, which will as a result inform you of when a error in a program
55983 + occurred, which in some cases could mean a possible exploit attempt.
55984 + If the sysctl option is enabled, a sysctl option with name
55985 + "signal_logging" is created.
55986 +
55987 +config GRKERNSEC_FORKFAIL
55988 + bool "Fork failure logging"
55989 + help
55990 + If you say Y here, all failed fork() attempts will be logged.
55991 + This could suggest a fork bomb, or someone attempting to overstep
55992 + their process limit. If the sysctl option is enabled, a sysctl option
55993 + with name "forkfail_logging" is created.
55994 +
55995 +config GRKERNSEC_TIME
55996 + bool "Time change logging"
55997 + help
55998 + If you say Y here, any changes of the system clock will be logged.
55999 + If the sysctl option is enabled, a sysctl option with name
56000 + "timechange_logging" is created.
56001 +
56002 +config GRKERNSEC_PROC_IPADDR
56003 + bool "/proc/<pid>/ipaddr support"
56004 + help
56005 + If you say Y here, a new entry will be added to each /proc/<pid>
56006 + directory that contains the IP address of the person using the task.
56007 + The IP is carried across local TCP and AF_UNIX stream sockets.
56008 + This information can be useful for IDS/IPSes to perform remote response
56009 + to a local attack. The entry is readable by only the owner of the
56010 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56011 + the RBAC system), and thus does not create privacy concerns.
56012 +
56013 +config GRKERNSEC_RWXMAP_LOG
56014 + bool 'Denied RWX mmap/mprotect logging'
56015 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56016 + help
56017 + If you say Y here, calls to mmap() and mprotect() with explicit
56018 + usage of PROT_WRITE and PROT_EXEC together will be logged when
56019 + denied by the PAX_MPROTECT feature. If the sysctl option is
56020 + enabled, a sysctl option with name "rwxmap_logging" is created.
56021 +
56022 +config GRKERNSEC_AUDIT_TEXTREL
56023 + bool 'ELF text relocations logging (READ HELP)'
56024 + depends on PAX_MPROTECT
56025 + help
56026 + If you say Y here, text relocations will be logged with the filename
56027 + of the offending library or binary. The purpose of the feature is
56028 + to help Linux distribution developers get rid of libraries and
56029 + binaries that need text relocations which hinder the future progress
56030 + of PaX. Only Linux distribution developers should say Y here, and
56031 + never on a production machine, as this option creates an information
56032 + leak that could aid an attacker in defeating the randomization of
56033 + a single memory region. If the sysctl option is enabled, a sysctl
56034 + option with name "audit_textrel" is created.
56035 +
56036 +endmenu
56037 +
56038 +menu "Executable Protections"
56039 +depends on GRKERNSEC
56040 +
56041 +config GRKERNSEC_DMESG
56042 + bool "Dmesg(8) restriction"
56043 + help
56044 + If you say Y here, non-root users will not be able to use dmesg(8)
56045 + to view up to the last 4kb of messages in the kernel's log buffer.
56046 + The kernel's log buffer often contains kernel addresses and other
56047 + identifying information useful to an attacker in fingerprinting a
56048 + system for a targeted exploit.
56049 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
56050 + created.
56051 +
56052 +config GRKERNSEC_HARDEN_PTRACE
56053 + bool "Deter ptrace-based process snooping"
56054 + help
56055 + If you say Y here, TTY sniffers and other malicious monitoring
56056 + programs implemented through ptrace will be defeated. If you
56057 + have been using the RBAC system, this option has already been
56058 + enabled for several years for all users, with the ability to make
56059 + fine-grained exceptions.
56060 +
56061 + This option only affects the ability of non-root users to ptrace
56062 + processes that are not a descendent of the ptracing process.
56063 + This means that strace ./binary and gdb ./binary will still work,
56064 + but attaching to arbitrary processes will not. If the sysctl
56065 + option is enabled, a sysctl option with name "harden_ptrace" is
56066 + created.
56067 +
56068 +config GRKERNSEC_PTRACE_READEXEC
56069 + bool "Require read access to ptrace sensitive binaries"
56070 + help
56071 + If you say Y here, unprivileged users will not be able to ptrace unreadable
56072 + binaries. This option is useful in environments that
56073 + remove the read bits (e.g. file mode 4711) from suid binaries to
56074 + prevent infoleaking of their contents. This option adds
56075 + consistency to the use of that file mode, as the binary could normally
56076 + be read out when run without privileges while ptracing.
56077 +
56078 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
56079 + is created.
56080 +
56081 +config GRKERNSEC_SETXID
56082 + bool "Enforce consistent multithreaded privileges"
56083 + help
56084 + If you say Y here, a change from a root uid to a non-root uid
56085 + in a multithreaded application will cause the resulting uids,
56086 + gids, supplementary groups, and capabilities in that thread
56087 + to be propagated to the other threads of the process. In most
56088 + cases this is unnecessary, as glibc will emulate this behavior
56089 + on behalf of the application. Other libcs do not act in the
56090 + same way, allowing the other threads of the process to continue
56091 + running with root privileges. If the sysctl option is enabled,
56092 + a sysctl option with name "consistent_setxid" is created.
56093 +
56094 +config GRKERNSEC_TPE
56095 + bool "Trusted Path Execution (TPE)"
56096 + help
56097 + If you say Y here, you will be able to choose a gid to add to the
56098 + supplementary groups of users you want to mark as "untrusted."
56099 + These users will not be able to execute any files that are not in
56100 + root-owned directories writable only by root. If the sysctl option
56101 + is enabled, a sysctl option with name "tpe" is created.
56102 +
56103 +config GRKERNSEC_TPE_ALL
56104 + bool "Partially restrict all non-root users"
56105 + depends on GRKERNSEC_TPE
56106 + help
56107 + If you say Y here, all non-root users will be covered under
56108 + a weaker TPE restriction. This is separate from, and in addition to,
56109 + the main TPE options that you have selected elsewhere. Thus, if a
56110 + "trusted" GID is chosen, this restriction applies to even that GID.
56111 + Under this restriction, all non-root users will only be allowed to
56112 + execute files in directories they own that are not group or
56113 + world-writable, or in directories owned by root and writable only by
56114 + root. If the sysctl option is enabled, a sysctl option with name
56115 + "tpe_restrict_all" is created.
56116 +
56117 +config GRKERNSEC_TPE_INVERT
56118 + bool "Invert GID option"
56119 + depends on GRKERNSEC_TPE
56120 + help
56121 + If you say Y here, the group you specify in the TPE configuration will
56122 + decide what group TPE restrictions will be *disabled* for. This
56123 + option is useful if you want TPE restrictions to be applied to most
56124 + users on the system. If the sysctl option is enabled, a sysctl option
56125 + with name "tpe_invert" is created. Unlike other sysctl options, this
56126 + entry will default to on for backward-compatibility.
56127 +
56128 +config GRKERNSEC_TPE_GID
56129 + int "GID for untrusted users"
56130 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56131 + default 1005
56132 + help
56133 + Setting this GID determines what group TPE restrictions will be
56134 + *enabled* for. If the sysctl option is enabled, a sysctl option
56135 + with name "tpe_gid" is created.
56136 +
56137 +config GRKERNSEC_TPE_GID
56138 + int "GID for trusted users"
56139 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56140 + default 1005
56141 + help
56142 + Setting this GID determines what group TPE restrictions will be
56143 + *disabled* for. If the sysctl option is enabled, a sysctl option
56144 + with name "tpe_gid" is created.
56145 +
56146 +endmenu
56147 +menu "Network Protections"
56148 +depends on GRKERNSEC
56149 +
56150 +config GRKERNSEC_RANDNET
56151 + bool "Larger entropy pools"
56152 + help
56153 + If you say Y here, the entropy pools used for many features of Linux
56154 + and grsecurity will be doubled in size. Since several grsecurity
56155 + features use additional randomness, it is recommended that you say Y
56156 + here. Saying Y here has a similar effect as modifying
56157 + /proc/sys/kernel/random/poolsize.
56158 +
56159 +config GRKERNSEC_BLACKHOLE
56160 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56161 + depends on NET
56162 + help
56163 + If you say Y here, neither TCP resets nor ICMP
56164 + destination-unreachable packets will be sent in response to packets
56165 + sent to ports for which no associated listening process exists.
56166 + This feature supports both IPV4 and IPV6 and exempts the
56167 + loopback interface from blackholing. Enabling this feature
56168 + makes a host more resilient to DoS attacks and reduces network
56169 + visibility against scanners.
56170 +
56171 + The blackhole feature as-implemented is equivalent to the FreeBSD
56172 + blackhole feature, as it prevents RST responses to all packets, not
56173 + just SYNs. Under most application behavior this causes no
56174 + problems, but applications (like haproxy) may not close certain
56175 + connections in a way that cleanly terminates them on the remote
56176 + end, leaving the remote host in LAST_ACK state. Because of this
56177 + side-effect and to prevent intentional LAST_ACK DoSes, this
56178 + feature also adds automatic mitigation against such attacks.
56179 + The mitigation drastically reduces the amount of time a socket
56180 + can spend in LAST_ACK state. If you're using haproxy and not
56181 + all servers it connects to have this option enabled, consider
56182 + disabling this feature on the haproxy host.
56183 +
56184 + If the sysctl option is enabled, two sysctl options with names
56185 + "ip_blackhole" and "lastack_retries" will be created.
56186 + While "ip_blackhole" takes the standard zero/non-zero on/off
56187 + toggle, "lastack_retries" uses the same kinds of values as
56188 + "tcp_retries1" and "tcp_retries2". The default value of 4
56189 + prevents a socket from lasting more than 45 seconds in LAST_ACK
56190 + state.
56191 +
56192 +config GRKERNSEC_SOCKET
56193 + bool "Socket restrictions"
56194 + depends on NET
56195 + help
56196 + If you say Y here, you will be able to choose from several options.
56197 + If you assign a GID on your system and add it to the supplementary
56198 + groups of users you want to restrict socket access to, this patch
56199 + will perform up to three things, based on the option(s) you choose.
56200 +
56201 +config GRKERNSEC_SOCKET_ALL
56202 + bool "Deny any sockets to group"
56203 + depends on GRKERNSEC_SOCKET
56204 + help
56205 + If you say Y here, you will be able to choose a GID of whose users will
56206 + be unable to connect to other hosts from your machine or run server
56207 + applications from your machine. If the sysctl option is enabled, a
56208 + sysctl option with name "socket_all" is created.
56209 +
56210 +config GRKERNSEC_SOCKET_ALL_GID
56211 + int "GID to deny all sockets for"
56212 + depends on GRKERNSEC_SOCKET_ALL
56213 + default 1004
56214 + help
56215 + Here you can choose the GID to disable socket access for. Remember to
56216 + add the users you want socket access disabled for to the GID
56217 + specified here. If the sysctl option is enabled, a sysctl option
56218 + with name "socket_all_gid" is created.
56219 +
56220 +config GRKERNSEC_SOCKET_CLIENT
56221 + bool "Deny client sockets to group"
56222 + depends on GRKERNSEC_SOCKET
56223 + help
56224 + If you say Y here, you will be able to choose a GID of whose users will
56225 + be unable to connect to other hosts from your machine, but will be
56226 + able to run servers. If this option is enabled, all users in the group
56227 + you specify will have to use passive mode when initiating ftp transfers
56228 + from the shell on your machine. If the sysctl option is enabled, a
56229 + sysctl option with name "socket_client" is created.
56230 +
56231 +config GRKERNSEC_SOCKET_CLIENT_GID
56232 + int "GID to deny client sockets for"
56233 + depends on GRKERNSEC_SOCKET_CLIENT
56234 + default 1003
56235 + help
56236 + Here you can choose the GID to disable client socket access for.
56237 + Remember to add the users you want client socket access disabled for to
56238 + the GID specified here. If the sysctl option is enabled, a sysctl
56239 + option with name "socket_client_gid" is created.
56240 +
56241 +config GRKERNSEC_SOCKET_SERVER
56242 + bool "Deny server sockets to group"
56243 + depends on GRKERNSEC_SOCKET
56244 + help
56245 + If you say Y here, you will be able to choose a GID of whose users will
56246 + be unable to run server applications from your machine. If the sysctl
56247 + option is enabled, a sysctl option with name "socket_server" is created.
56248 +
56249 +config GRKERNSEC_SOCKET_SERVER_GID
56250 + int "GID to deny server sockets for"
56251 + depends on GRKERNSEC_SOCKET_SERVER
56252 + default 1002
56253 + help
56254 + Here you can choose the GID to disable server socket access for.
56255 + Remember to add the users you want server socket access disabled for to
56256 + the GID specified here. If the sysctl option is enabled, a sysctl
56257 + option with name "socket_server_gid" is created.
56258 +
56259 +endmenu
56260 +menu "Sysctl support"
56261 +depends on GRKERNSEC && SYSCTL
56262 +
56263 +config GRKERNSEC_SYSCTL
56264 + bool "Sysctl support"
56265 + help
56266 + If you say Y here, you will be able to change the options that
56267 + grsecurity runs with at bootup, without having to recompile your
56268 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56269 + to enable (1) or disable (0) various features. All the sysctl entries
56270 + are mutable until the "grsec_lock" entry is set to a non-zero value.
56271 + All features enabled in the kernel configuration are disabled at boot
56272 + if you do not say Y to the "Turn on features by default" option.
56273 + All options should be set at startup, and the grsec_lock entry should
56274 + be set to a non-zero value after all the options are set.
56275 + *THIS IS EXTREMELY IMPORTANT*
56276 +
56277 +config GRKERNSEC_SYSCTL_DISTRO
56278 + bool "Extra sysctl support for distro makers (READ HELP)"
56279 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56280 + help
56281 + If you say Y here, additional sysctl options will be created
56282 + for features that affect processes running as root. Therefore,
56283 + it is critical when using this option that the grsec_lock entry be
56284 + enabled after boot. Only distros with prebuilt kernel packages
56285 + with this option enabled that can ensure grsec_lock is enabled
56286 + after boot should use this option.
56287 + *Failure to set grsec_lock after boot makes all grsec features
56288 + this option covers useless*
56289 +
56290 + Currently this option creates the following sysctl entries:
56291 + "Disable Privileged I/O": "disable_priv_io"
56292 +
56293 +config GRKERNSEC_SYSCTL_ON
56294 + bool "Turn on features by default"
56295 + depends on GRKERNSEC_SYSCTL
56296 + help
56297 + If you say Y here, instead of having all features enabled in the
56298 + kernel configuration disabled at boot time, the features will be
56299 + enabled at boot time. It is recommended you say Y here unless
56300 + there is some reason you would want all sysctl-tunable features to
56301 + be disabled by default. As mentioned elsewhere, it is important
56302 + to enable the grsec_lock entry once you have finished modifying
56303 + the sysctl entries.
56304 +
56305 +endmenu
56306 +menu "Logging Options"
56307 +depends on GRKERNSEC
56308 +
56309 +config GRKERNSEC_FLOODTIME
56310 + int "Seconds in between log messages (minimum)"
56311 + default 10
56312 + help
56313 + This option allows you to enforce the number of seconds between
56314 + grsecurity log messages. The default should be suitable for most
56315 + people, however, if you choose to change it, choose a value small enough
56316 + to allow informative logs to be produced, but large enough to
56317 + prevent flooding.
56318 +
56319 +config GRKERNSEC_FLOODBURST
56320 + int "Number of messages in a burst (maximum)"
56321 + default 6
56322 + help
56323 + This option allows you to choose the maximum number of messages allowed
56324 + within the flood time interval you chose in a separate option. The
56325 + default should be suitable for most people, however if you find that
56326 + many of your logs are being interpreted as flooding, you may want to
56327 + raise this value.
56328 +
56329 +endmenu
56330 +
56331 +endmenu
56332 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
56333 new file mode 100644
56334 index 0000000..1b9afa9
56335 --- /dev/null
56336 +++ b/grsecurity/Makefile
56337 @@ -0,0 +1,38 @@
56338 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56339 +# during 2001-2009 it has been completely redesigned by Brad Spengler
56340 +# into an RBAC system
56341 +#
56342 +# All code in this directory and various hooks inserted throughout the kernel
56343 +# are copyright Brad Spengler - Open Source Security, Inc., and released
56344 +# under the GPL v2 or higher
56345 +
56346 +KBUILD_CFLAGS += -Werror
56347 +
56348 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56349 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
56350 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56351 +
56352 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56353 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56354 + gracl_learn.o grsec_log.o
56355 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56356 +
56357 +ifdef CONFIG_NET
56358 +obj-y += grsec_sock.o
56359 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56360 +endif
56361 +
56362 +ifndef CONFIG_GRKERNSEC
56363 +obj-y += grsec_disabled.o
56364 +endif
56365 +
56366 +ifdef CONFIG_GRKERNSEC_HIDESYM
56367 +extra-y := grsec_hidesym.o
56368 +$(obj)/grsec_hidesym.o:
56369 + @-chmod -f 500 /boot
56370 + @-chmod -f 500 /lib/modules
56371 + @-chmod -f 500 /lib64/modules
56372 + @-chmod -f 500 /lib32/modules
56373 + @-chmod -f 700 .
56374 + @echo ' grsec: protected kernel image paths'
56375 +endif
56376 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
56377 new file mode 100644
56378 index 0000000..b1c4f4d
56379 --- /dev/null
56380 +++ b/grsecurity/gracl.c
56381 @@ -0,0 +1,4149 @@
56382 +#include <linux/kernel.h>
56383 +#include <linux/module.h>
56384 +#include <linux/sched.h>
56385 +#include <linux/mm.h>
56386 +#include <linux/file.h>
56387 +#include <linux/fs.h>
56388 +#include <linux/namei.h>
56389 +#include <linux/mount.h>
56390 +#include <linux/tty.h>
56391 +#include <linux/proc_fs.h>
56392 +#include <linux/smp_lock.h>
56393 +#include <linux/slab.h>
56394 +#include <linux/vmalloc.h>
56395 +#include <linux/types.h>
56396 +#include <linux/sysctl.h>
56397 +#include <linux/netdevice.h>
56398 +#include <linux/ptrace.h>
56399 +#include <linux/gracl.h>
56400 +#include <linux/gralloc.h>
56401 +#include <linux/security.h>
56402 +#include <linux/grinternal.h>
56403 +#include <linux/pid_namespace.h>
56404 +#include <linux/fdtable.h>
56405 +#include <linux/percpu.h>
56406 +
56407 +#include <asm/uaccess.h>
56408 +#include <asm/errno.h>
56409 +#include <asm/mman.h>
56410 +
56411 +static struct acl_role_db acl_role_set;
56412 +static struct name_db name_set;
56413 +static struct inodev_db inodev_set;
56414 +
56415 +/* for keeping track of userspace pointers used for subjects, so we
56416 + can share references in the kernel as well
56417 +*/
56418 +
56419 +static struct dentry *real_root;
56420 +static struct vfsmount *real_root_mnt;
56421 +
56422 +static struct acl_subj_map_db subj_map_set;
56423 +
56424 +static struct acl_role_label *default_role;
56425 +
56426 +static struct acl_role_label *role_list;
56427 +
56428 +static u16 acl_sp_role_value;
56429 +
56430 +extern char *gr_shared_page[4];
56431 +static DEFINE_MUTEX(gr_dev_mutex);
56432 +DEFINE_RWLOCK(gr_inode_lock);
56433 +
56434 +struct gr_arg *gr_usermode;
56435 +
56436 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
56437 +
56438 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
56439 +extern void gr_clear_learn_entries(void);
56440 +
56441 +#ifdef CONFIG_GRKERNSEC_RESLOG
56442 +extern void gr_log_resource(const struct task_struct *task,
56443 + const int res, const unsigned long wanted, const int gt);
56444 +#endif
56445 +
56446 +unsigned char *gr_system_salt;
56447 +unsigned char *gr_system_sum;
56448 +
56449 +static struct sprole_pw **acl_special_roles = NULL;
56450 +static __u16 num_sprole_pws = 0;
56451 +
56452 +static struct acl_role_label *kernel_role = NULL;
56453 +
56454 +static unsigned int gr_auth_attempts = 0;
56455 +static unsigned long gr_auth_expires = 0UL;
56456 +
56457 +#ifdef CONFIG_NET
56458 +extern struct vfsmount *sock_mnt;
56459 +#endif
56460 +extern struct vfsmount *pipe_mnt;
56461 +extern struct vfsmount *shm_mnt;
56462 +#ifdef CONFIG_HUGETLBFS
56463 +extern struct vfsmount *hugetlbfs_vfsmount;
56464 +#endif
56465 +
56466 +static struct acl_object_label *fakefs_obj_rw;
56467 +static struct acl_object_label *fakefs_obj_rwx;
56468 +
56469 +extern int gr_init_uidset(void);
56470 +extern void gr_free_uidset(void);
56471 +extern void gr_remove_uid(uid_t uid);
56472 +extern int gr_find_uid(uid_t uid);
56473 +
56474 +__inline__ int
56475 +gr_acl_is_enabled(void)
56476 +{
56477 + return (gr_status & GR_READY);
56478 +}
56479 +
56480 +#ifdef CONFIG_BTRFS_FS
56481 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56482 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56483 +#endif
56484 +
56485 +static inline dev_t __get_dev(const struct dentry *dentry)
56486 +{
56487 +#ifdef CONFIG_BTRFS_FS
56488 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56489 + return get_btrfs_dev_from_inode(dentry->d_inode);
56490 + else
56491 +#endif
56492 + return dentry->d_inode->i_sb->s_dev;
56493 +}
56494 +
56495 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56496 +{
56497 + return __get_dev(dentry);
56498 +}
56499 +
56500 +static char gr_task_roletype_to_char(struct task_struct *task)
56501 +{
56502 + switch (task->role->roletype &
56503 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
56504 + GR_ROLE_SPECIAL)) {
56505 + case GR_ROLE_DEFAULT:
56506 + return 'D';
56507 + case GR_ROLE_USER:
56508 + return 'U';
56509 + case GR_ROLE_GROUP:
56510 + return 'G';
56511 + case GR_ROLE_SPECIAL:
56512 + return 'S';
56513 + }
56514 +
56515 + return 'X';
56516 +}
56517 +
56518 +char gr_roletype_to_char(void)
56519 +{
56520 + return gr_task_roletype_to_char(current);
56521 +}
56522 +
56523 +__inline__ int
56524 +gr_acl_tpe_check(void)
56525 +{
56526 + if (unlikely(!(gr_status & GR_READY)))
56527 + return 0;
56528 + if (current->role->roletype & GR_ROLE_TPE)
56529 + return 1;
56530 + else
56531 + return 0;
56532 +}
56533 +
56534 +int
56535 +gr_handle_rawio(const struct inode *inode)
56536 +{
56537 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56538 + if (inode && S_ISBLK(inode->i_mode) &&
56539 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56540 + !capable(CAP_SYS_RAWIO))
56541 + return 1;
56542 +#endif
56543 + return 0;
56544 +}
56545 +
56546 +static int
56547 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
56548 +{
56549 + if (likely(lena != lenb))
56550 + return 0;
56551 +
56552 + return !memcmp(a, b, lena);
56553 +}
56554 +
56555 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
56556 +{
56557 + *buflen -= namelen;
56558 + if (*buflen < 0)
56559 + return -ENAMETOOLONG;
56560 + *buffer -= namelen;
56561 + memcpy(*buffer, str, namelen);
56562 + return 0;
56563 +}
56564 +
56565 +/* this must be called with vfsmount_lock and dcache_lock held */
56566 +
56567 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56568 + struct dentry *root, struct vfsmount *rootmnt,
56569 + char *buffer, int buflen)
56570 +{
56571 + char * end = buffer+buflen;
56572 + char * retval;
56573 + int namelen;
56574 +
56575 + *--end = '\0';
56576 + buflen--;
56577 +
56578 + if (buflen < 1)
56579 + goto Elong;
56580 + /* Get '/' right */
56581 + retval = end-1;
56582 + *retval = '/';
56583 +
56584 + for (;;) {
56585 + struct dentry * parent;
56586 +
56587 + if (dentry == root && vfsmnt == rootmnt)
56588 + break;
56589 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
56590 + /* Global root? */
56591 + if (vfsmnt->mnt_parent == vfsmnt)
56592 + goto global_root;
56593 + dentry = vfsmnt->mnt_mountpoint;
56594 + vfsmnt = vfsmnt->mnt_parent;
56595 + continue;
56596 + }
56597 + parent = dentry->d_parent;
56598 + prefetch(parent);
56599 + namelen = dentry->d_name.len;
56600 + buflen -= namelen + 1;
56601 + if (buflen < 0)
56602 + goto Elong;
56603 + end -= namelen;
56604 + memcpy(end, dentry->d_name.name, namelen);
56605 + *--end = '/';
56606 + retval = end;
56607 + dentry = parent;
56608 + }
56609 +
56610 +out:
56611 + return retval;
56612 +
56613 +global_root:
56614 + namelen = dentry->d_name.len;
56615 + buflen -= namelen;
56616 + if (buflen < 0)
56617 + goto Elong;
56618 + retval -= namelen-1; /* hit the slash */
56619 + memcpy(retval, dentry->d_name.name, namelen);
56620 + goto out;
56621 +Elong:
56622 + retval = ERR_PTR(-ENAMETOOLONG);
56623 + goto out;
56624 +}
56625 +
56626 +static char *
56627 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56628 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
56629 +{
56630 + char *retval;
56631 +
56632 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
56633 + if (unlikely(IS_ERR(retval)))
56634 + retval = strcpy(buf, "<path too long>");
56635 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
56636 + retval[1] = '\0';
56637 +
56638 + return retval;
56639 +}
56640 +
56641 +static char *
56642 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56643 + char *buf, int buflen)
56644 +{
56645 + char *res;
56646 +
56647 + /* we can use real_root, real_root_mnt, because this is only called
56648 + by the RBAC system */
56649 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
56650 +
56651 + return res;
56652 +}
56653 +
56654 +static char *
56655 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56656 + char *buf, int buflen)
56657 +{
56658 + char *res;
56659 + struct dentry *root;
56660 + struct vfsmount *rootmnt;
56661 + struct task_struct *reaper = &init_task;
56662 +
56663 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
56664 + read_lock(&reaper->fs->lock);
56665 + root = dget(reaper->fs->root.dentry);
56666 + rootmnt = mntget(reaper->fs->root.mnt);
56667 + read_unlock(&reaper->fs->lock);
56668 +
56669 + spin_lock(&dcache_lock);
56670 + spin_lock(&vfsmount_lock);
56671 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
56672 + spin_unlock(&vfsmount_lock);
56673 + spin_unlock(&dcache_lock);
56674 +
56675 + dput(root);
56676 + mntput(rootmnt);
56677 + return res;
56678 +}
56679 +
56680 +static char *
56681 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56682 +{
56683 + char *ret;
56684 + spin_lock(&dcache_lock);
56685 + spin_lock(&vfsmount_lock);
56686 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56687 + PAGE_SIZE);
56688 + spin_unlock(&vfsmount_lock);
56689 + spin_unlock(&dcache_lock);
56690 + return ret;
56691 +}
56692 +
56693 +static char *
56694 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56695 +{
56696 + char *ret;
56697 + char *buf;
56698 + int buflen;
56699 +
56700 + spin_lock(&dcache_lock);
56701 + spin_lock(&vfsmount_lock);
56702 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
56703 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
56704 + buflen = (int)(ret - buf);
56705 + if (buflen >= 5)
56706 + prepend(&ret, &buflen, "/proc", 5);
56707 + else
56708 + ret = strcpy(buf, "<path too long>");
56709 + spin_unlock(&vfsmount_lock);
56710 + spin_unlock(&dcache_lock);
56711 + return ret;
56712 +}
56713 +
56714 +char *
56715 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
56716 +{
56717 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56718 + PAGE_SIZE);
56719 +}
56720 +
56721 +char *
56722 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
56723 +{
56724 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
56725 + PAGE_SIZE);
56726 +}
56727 +
56728 +char *
56729 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
56730 +{
56731 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
56732 + PAGE_SIZE);
56733 +}
56734 +
56735 +char *
56736 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
56737 +{
56738 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
56739 + PAGE_SIZE);
56740 +}
56741 +
56742 +char *
56743 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
56744 +{
56745 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
56746 + PAGE_SIZE);
56747 +}
56748 +
56749 +__inline__ __u32
56750 +to_gr_audit(const __u32 reqmode)
56751 +{
56752 + /* masks off auditable permission flags, then shifts them to create
56753 + auditing flags, and adds the special case of append auditing if
56754 + we're requesting write */
56755 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
56756 +}
56757 +
56758 +struct acl_subject_label *
56759 +lookup_subject_map(const struct acl_subject_label *userp)
56760 +{
56761 + unsigned int index = shash(userp, subj_map_set.s_size);
56762 + struct subject_map *match;
56763 +
56764 + match = subj_map_set.s_hash[index];
56765 +
56766 + while (match && match->user != userp)
56767 + match = match->next;
56768 +
56769 + if (match != NULL)
56770 + return match->kernel;
56771 + else
56772 + return NULL;
56773 +}
56774 +
56775 +static void
56776 +insert_subj_map_entry(struct subject_map *subjmap)
56777 +{
56778 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
56779 + struct subject_map **curr;
56780 +
56781 + subjmap->prev = NULL;
56782 +
56783 + curr = &subj_map_set.s_hash[index];
56784 + if (*curr != NULL)
56785 + (*curr)->prev = subjmap;
56786 +
56787 + subjmap->next = *curr;
56788 + *curr = subjmap;
56789 +
56790 + return;
56791 +}
56792 +
56793 +static struct acl_role_label *
56794 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
56795 + const gid_t gid)
56796 +{
56797 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
56798 + struct acl_role_label *match;
56799 + struct role_allowed_ip *ipp;
56800 + unsigned int x;
56801 + u32 curr_ip = task->signal->curr_ip;
56802 +
56803 + task->signal->saved_ip = curr_ip;
56804 +
56805 + match = acl_role_set.r_hash[index];
56806 +
56807 + while (match) {
56808 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
56809 + for (x = 0; x < match->domain_child_num; x++) {
56810 + if (match->domain_children[x] == uid)
56811 + goto found;
56812 + }
56813 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
56814 + break;
56815 + match = match->next;
56816 + }
56817 +found:
56818 + if (match == NULL) {
56819 + try_group:
56820 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
56821 + match = acl_role_set.r_hash[index];
56822 +
56823 + while (match) {
56824 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
56825 + for (x = 0; x < match->domain_child_num; x++) {
56826 + if (match->domain_children[x] == gid)
56827 + goto found2;
56828 + }
56829 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
56830 + break;
56831 + match = match->next;
56832 + }
56833 +found2:
56834 + if (match == NULL)
56835 + match = default_role;
56836 + if (match->allowed_ips == NULL)
56837 + return match;
56838 + else {
56839 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
56840 + if (likely
56841 + ((ntohl(curr_ip) & ipp->netmask) ==
56842 + (ntohl(ipp->addr) & ipp->netmask)))
56843 + return match;
56844 + }
56845 + match = default_role;
56846 + }
56847 + } else if (match->allowed_ips == NULL) {
56848 + return match;
56849 + } else {
56850 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
56851 + if (likely
56852 + ((ntohl(curr_ip) & ipp->netmask) ==
56853 + (ntohl(ipp->addr) & ipp->netmask)))
56854 + return match;
56855 + }
56856 + goto try_group;
56857 + }
56858 +
56859 + return match;
56860 +}
56861 +
56862 +struct acl_subject_label *
56863 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
56864 + const struct acl_role_label *role)
56865 +{
56866 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
56867 + struct acl_subject_label *match;
56868 +
56869 + match = role->subj_hash[index];
56870 +
56871 + while (match && (match->inode != ino || match->device != dev ||
56872 + (match->mode & GR_DELETED))) {
56873 + match = match->next;
56874 + }
56875 +
56876 + if (match && !(match->mode & GR_DELETED))
56877 + return match;
56878 + else
56879 + return NULL;
56880 +}
56881 +
56882 +struct acl_subject_label *
56883 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
56884 + const struct acl_role_label *role)
56885 +{
56886 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
56887 + struct acl_subject_label *match;
56888 +
56889 + match = role->subj_hash[index];
56890 +
56891 + while (match && (match->inode != ino || match->device != dev ||
56892 + !(match->mode & GR_DELETED))) {
56893 + match = match->next;
56894 + }
56895 +
56896 + if (match && (match->mode & GR_DELETED))
56897 + return match;
56898 + else
56899 + return NULL;
56900 +}
56901 +
56902 +static struct acl_object_label *
56903 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
56904 + const struct acl_subject_label *subj)
56905 +{
56906 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
56907 + struct acl_object_label *match;
56908 +
56909 + match = subj->obj_hash[index];
56910 +
56911 + while (match && (match->inode != ino || match->device != dev ||
56912 + (match->mode & GR_DELETED))) {
56913 + match = match->next;
56914 + }
56915 +
56916 + if (match && !(match->mode & GR_DELETED))
56917 + return match;
56918 + else
56919 + return NULL;
56920 +}
56921 +
56922 +static struct acl_object_label *
56923 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
56924 + const struct acl_subject_label *subj)
56925 +{
56926 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
56927 + struct acl_object_label *match;
56928 +
56929 + match = subj->obj_hash[index];
56930 +
56931 + while (match && (match->inode != ino || match->device != dev ||
56932 + !(match->mode & GR_DELETED))) {
56933 + match = match->next;
56934 + }
56935 +
56936 + if (match && (match->mode & GR_DELETED))
56937 + return match;
56938 +
56939 + match = subj->obj_hash[index];
56940 +
56941 + while (match && (match->inode != ino || match->device != dev ||
56942 + (match->mode & GR_DELETED))) {
56943 + match = match->next;
56944 + }
56945 +
56946 + if (match && !(match->mode & GR_DELETED))
56947 + return match;
56948 + else
56949 + return NULL;
56950 +}
56951 +
56952 +static struct name_entry *
56953 +lookup_name_entry(const char *name)
56954 +{
56955 + unsigned int len = strlen(name);
56956 + unsigned int key = full_name_hash(name, len);
56957 + unsigned int index = key % name_set.n_size;
56958 + struct name_entry *match;
56959 +
56960 + match = name_set.n_hash[index];
56961 +
56962 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
56963 + match = match->next;
56964 +
56965 + return match;
56966 +}
56967 +
56968 +static struct name_entry *
56969 +lookup_name_entry_create(const char *name)
56970 +{
56971 + unsigned int len = strlen(name);
56972 + unsigned int key = full_name_hash(name, len);
56973 + unsigned int index = key % name_set.n_size;
56974 + struct name_entry *match;
56975 +
56976 + match = name_set.n_hash[index];
56977 +
56978 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
56979 + !match->deleted))
56980 + match = match->next;
56981 +
56982 + if (match && match->deleted)
56983 + return match;
56984 +
56985 + match = name_set.n_hash[index];
56986 +
56987 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
56988 + match->deleted))
56989 + match = match->next;
56990 +
56991 + if (match && !match->deleted)
56992 + return match;
56993 + else
56994 + return NULL;
56995 +}
56996 +
56997 +static struct inodev_entry *
56998 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
56999 +{
57000 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
57001 + struct inodev_entry *match;
57002 +
57003 + match = inodev_set.i_hash[index];
57004 +
57005 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
57006 + match = match->next;
57007 +
57008 + return match;
57009 +}
57010 +
57011 +static void
57012 +insert_inodev_entry(struct inodev_entry *entry)
57013 +{
57014 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
57015 + inodev_set.i_size);
57016 + struct inodev_entry **curr;
57017 +
57018 + entry->prev = NULL;
57019 +
57020 + curr = &inodev_set.i_hash[index];
57021 + if (*curr != NULL)
57022 + (*curr)->prev = entry;
57023 +
57024 + entry->next = *curr;
57025 + *curr = entry;
57026 +
57027 + return;
57028 +}
57029 +
57030 +static void
57031 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
57032 +{
57033 + unsigned int index =
57034 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
57035 + struct acl_role_label **curr;
57036 + struct acl_role_label *tmp;
57037 +
57038 + curr = &acl_role_set.r_hash[index];
57039 +
57040 + /* if role was already inserted due to domains and already has
57041 + a role in the same bucket as it attached, then we need to
57042 + combine these two buckets
57043 + */
57044 + if (role->next) {
57045 + tmp = role->next;
57046 + while (tmp->next)
57047 + tmp = tmp->next;
57048 + tmp->next = *curr;
57049 + } else
57050 + role->next = *curr;
57051 + *curr = role;
57052 +
57053 + return;
57054 +}
57055 +
57056 +static void
57057 +insert_acl_role_label(struct acl_role_label *role)
57058 +{
57059 + int i;
57060 +
57061 + if (role_list == NULL) {
57062 + role_list = role;
57063 + role->prev = NULL;
57064 + } else {
57065 + role->prev = role_list;
57066 + role_list = role;
57067 + }
57068 +
57069 + /* used for hash chains */
57070 + role->next = NULL;
57071 +
57072 + if (role->roletype & GR_ROLE_DOMAIN) {
57073 + for (i = 0; i < role->domain_child_num; i++)
57074 + __insert_acl_role_label(role, role->domain_children[i]);
57075 + } else
57076 + __insert_acl_role_label(role, role->uidgid);
57077 +}
57078 +
57079 +static int
57080 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
57081 +{
57082 + struct name_entry **curr, *nentry;
57083 + struct inodev_entry *ientry;
57084 + unsigned int len = strlen(name);
57085 + unsigned int key = full_name_hash(name, len);
57086 + unsigned int index = key % name_set.n_size;
57087 +
57088 + curr = &name_set.n_hash[index];
57089 +
57090 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
57091 + curr = &((*curr)->next);
57092 +
57093 + if (*curr != NULL)
57094 + return 1;
57095 +
57096 + nentry = acl_alloc(sizeof (struct name_entry));
57097 + if (nentry == NULL)
57098 + return 0;
57099 + ientry = acl_alloc(sizeof (struct inodev_entry));
57100 + if (ientry == NULL)
57101 + return 0;
57102 + ientry->nentry = nentry;
57103 +
57104 + nentry->key = key;
57105 + nentry->name = name;
57106 + nentry->inode = inode;
57107 + nentry->device = device;
57108 + nentry->len = len;
57109 + nentry->deleted = deleted;
57110 +
57111 + nentry->prev = NULL;
57112 + curr = &name_set.n_hash[index];
57113 + if (*curr != NULL)
57114 + (*curr)->prev = nentry;
57115 + nentry->next = *curr;
57116 + *curr = nentry;
57117 +
57118 + /* insert us into the table searchable by inode/dev */
57119 + insert_inodev_entry(ientry);
57120 +
57121 + return 1;
57122 +}
57123 +
57124 +static void
57125 +insert_acl_obj_label(struct acl_object_label *obj,
57126 + struct acl_subject_label *subj)
57127 +{
57128 + unsigned int index =
57129 + fhash(obj->inode, obj->device, subj->obj_hash_size);
57130 + struct acl_object_label **curr;
57131 +
57132 +
57133 + obj->prev = NULL;
57134 +
57135 + curr = &subj->obj_hash[index];
57136 + if (*curr != NULL)
57137 + (*curr)->prev = obj;
57138 +
57139 + obj->next = *curr;
57140 + *curr = obj;
57141 +
57142 + return;
57143 +}
57144 +
57145 +static void
57146 +insert_acl_subj_label(struct acl_subject_label *obj,
57147 + struct acl_role_label *role)
57148 +{
57149 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
57150 + struct acl_subject_label **curr;
57151 +
57152 + obj->prev = NULL;
57153 +
57154 + curr = &role->subj_hash[index];
57155 + if (*curr != NULL)
57156 + (*curr)->prev = obj;
57157 +
57158 + obj->next = *curr;
57159 + *curr = obj;
57160 +
57161 + return;
57162 +}
57163 +
57164 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
57165 +
57166 +static void *
57167 +create_table(__u32 * len, int elementsize)
57168 +{
57169 + unsigned int table_sizes[] = {
57170 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
57171 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
57172 + 4194301, 8388593, 16777213, 33554393, 67108859
57173 + };
57174 + void *newtable = NULL;
57175 + unsigned int pwr = 0;
57176 +
57177 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
57178 + table_sizes[pwr] <= *len)
57179 + pwr++;
57180 +
57181 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
57182 + return newtable;
57183 +
57184 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
57185 + newtable =
57186 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
57187 + else
57188 + newtable = vmalloc(table_sizes[pwr] * elementsize);
57189 +
57190 + *len = table_sizes[pwr];
57191 +
57192 + return newtable;
57193 +}
57194 +
57195 +static int
57196 +init_variables(const struct gr_arg *arg)
57197 +{
57198 + struct task_struct *reaper = &init_task;
57199 + unsigned int stacksize;
57200 +
57201 + subj_map_set.s_size = arg->role_db.num_subjects;
57202 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
57203 + name_set.n_size = arg->role_db.num_objects;
57204 + inodev_set.i_size = arg->role_db.num_objects;
57205 +
57206 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
57207 + !name_set.n_size || !inodev_set.i_size)
57208 + return 1;
57209 +
57210 + if (!gr_init_uidset())
57211 + return 1;
57212 +
57213 + /* set up the stack that holds allocation info */
57214 +
57215 + stacksize = arg->role_db.num_pointers + 5;
57216 +
57217 + if (!acl_alloc_stack_init(stacksize))
57218 + return 1;
57219 +
57220 + /* grab reference for the real root dentry and vfsmount */
57221 + read_lock(&reaper->fs->lock);
57222 + real_root = dget(reaper->fs->root.dentry);
57223 + real_root_mnt = mntget(reaper->fs->root.mnt);
57224 + read_unlock(&reaper->fs->lock);
57225 +
57226 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57227 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
57228 +#endif
57229 +
57230 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
57231 + if (fakefs_obj_rw == NULL)
57232 + return 1;
57233 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
57234 +
57235 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
57236 + if (fakefs_obj_rwx == NULL)
57237 + return 1;
57238 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
57239 +
57240 + subj_map_set.s_hash =
57241 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
57242 + acl_role_set.r_hash =
57243 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
57244 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
57245 + inodev_set.i_hash =
57246 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
57247 +
57248 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
57249 + !name_set.n_hash || !inodev_set.i_hash)
57250 + return 1;
57251 +
57252 + memset(subj_map_set.s_hash, 0,
57253 + sizeof(struct subject_map *) * subj_map_set.s_size);
57254 + memset(acl_role_set.r_hash, 0,
57255 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
57256 + memset(name_set.n_hash, 0,
57257 + sizeof (struct name_entry *) * name_set.n_size);
57258 + memset(inodev_set.i_hash, 0,
57259 + sizeof (struct inodev_entry *) * inodev_set.i_size);
57260 +
57261 + return 0;
57262 +}
57263 +
57264 +/* free information not needed after startup
57265 + currently contains user->kernel pointer mappings for subjects
57266 +*/
57267 +
57268 +static void
57269 +free_init_variables(void)
57270 +{
57271 + __u32 i;
57272 +
57273 + if (subj_map_set.s_hash) {
57274 + for (i = 0; i < subj_map_set.s_size; i++) {
57275 + if (subj_map_set.s_hash[i]) {
57276 + kfree(subj_map_set.s_hash[i]);
57277 + subj_map_set.s_hash[i] = NULL;
57278 + }
57279 + }
57280 +
57281 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
57282 + PAGE_SIZE)
57283 + kfree(subj_map_set.s_hash);
57284 + else
57285 + vfree(subj_map_set.s_hash);
57286 + }
57287 +
57288 + return;
57289 +}
57290 +
57291 +static void
57292 +free_variables(void)
57293 +{
57294 + struct acl_subject_label *s;
57295 + struct acl_role_label *r;
57296 + struct task_struct *task, *task2;
57297 + unsigned int x;
57298 +
57299 + gr_clear_learn_entries();
57300 +
57301 + read_lock(&tasklist_lock);
57302 + do_each_thread(task2, task) {
57303 + task->acl_sp_role = 0;
57304 + task->acl_role_id = 0;
57305 + task->acl = NULL;
57306 + task->role = NULL;
57307 + } while_each_thread(task2, task);
57308 + read_unlock(&tasklist_lock);
57309 +
57310 + /* release the reference to the real root dentry and vfsmount */
57311 + if (real_root)
57312 + dput(real_root);
57313 + real_root = NULL;
57314 + if (real_root_mnt)
57315 + mntput(real_root_mnt);
57316 + real_root_mnt = NULL;
57317 +
57318 + /* free all object hash tables */
57319 +
57320 + FOR_EACH_ROLE_START(r)
57321 + if (r->subj_hash == NULL)
57322 + goto next_role;
57323 + FOR_EACH_SUBJECT_START(r, s, x)
57324 + if (s->obj_hash == NULL)
57325 + break;
57326 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57327 + kfree(s->obj_hash);
57328 + else
57329 + vfree(s->obj_hash);
57330 + FOR_EACH_SUBJECT_END(s, x)
57331 + FOR_EACH_NESTED_SUBJECT_START(r, s)
57332 + if (s->obj_hash == NULL)
57333 + break;
57334 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57335 + kfree(s->obj_hash);
57336 + else
57337 + vfree(s->obj_hash);
57338 + FOR_EACH_NESTED_SUBJECT_END(s)
57339 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
57340 + kfree(r->subj_hash);
57341 + else
57342 + vfree(r->subj_hash);
57343 + r->subj_hash = NULL;
57344 +next_role:
57345 + FOR_EACH_ROLE_END(r)
57346 +
57347 + acl_free_all();
57348 +
57349 + if (acl_role_set.r_hash) {
57350 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
57351 + PAGE_SIZE)
57352 + kfree(acl_role_set.r_hash);
57353 + else
57354 + vfree(acl_role_set.r_hash);
57355 + }
57356 + if (name_set.n_hash) {
57357 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
57358 + PAGE_SIZE)
57359 + kfree(name_set.n_hash);
57360 + else
57361 + vfree(name_set.n_hash);
57362 + }
57363 +
57364 + if (inodev_set.i_hash) {
57365 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
57366 + PAGE_SIZE)
57367 + kfree(inodev_set.i_hash);
57368 + else
57369 + vfree(inodev_set.i_hash);
57370 + }
57371 +
57372 + gr_free_uidset();
57373 +
57374 + memset(&name_set, 0, sizeof (struct name_db));
57375 + memset(&inodev_set, 0, sizeof (struct inodev_db));
57376 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
57377 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
57378 +
57379 + default_role = NULL;
57380 + role_list = NULL;
57381 +
57382 + return;
57383 +}
57384 +
57385 +static __u32
57386 +count_user_objs(struct acl_object_label *userp)
57387 +{
57388 + struct acl_object_label o_tmp;
57389 + __u32 num = 0;
57390 +
57391 + while (userp) {
57392 + if (copy_from_user(&o_tmp, userp,
57393 + sizeof (struct acl_object_label)))
57394 + break;
57395 +
57396 + userp = o_tmp.prev;
57397 + num++;
57398 + }
57399 +
57400 + return num;
57401 +}
57402 +
57403 +static struct acl_subject_label *
57404 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
57405 +
57406 +static int
57407 +copy_user_glob(struct acl_object_label *obj)
57408 +{
57409 + struct acl_object_label *g_tmp, **guser;
57410 + unsigned int len;
57411 + char *tmp;
57412 +
57413 + if (obj->globbed == NULL)
57414 + return 0;
57415 +
57416 + guser = &obj->globbed;
57417 + while (*guser) {
57418 + g_tmp = (struct acl_object_label *)
57419 + acl_alloc(sizeof (struct acl_object_label));
57420 + if (g_tmp == NULL)
57421 + return -ENOMEM;
57422 +
57423 + if (copy_from_user(g_tmp, *guser,
57424 + sizeof (struct acl_object_label)))
57425 + return -EFAULT;
57426 +
57427 + len = strnlen_user(g_tmp->filename, PATH_MAX);
57428 +
57429 + if (!len || len >= PATH_MAX)
57430 + return -EINVAL;
57431 +
57432 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57433 + return -ENOMEM;
57434 +
57435 + if (copy_from_user(tmp, g_tmp->filename, len))
57436 + return -EFAULT;
57437 + tmp[len-1] = '\0';
57438 + g_tmp->filename = tmp;
57439 +
57440 + *guser = g_tmp;
57441 + guser = &(g_tmp->next);
57442 + }
57443 +
57444 + return 0;
57445 +}
57446 +
57447 +static int
57448 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
57449 + struct acl_role_label *role)
57450 +{
57451 + struct acl_object_label *o_tmp;
57452 + unsigned int len;
57453 + int ret;
57454 + char *tmp;
57455 +
57456 + while (userp) {
57457 + if ((o_tmp = (struct acl_object_label *)
57458 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
57459 + return -ENOMEM;
57460 +
57461 + if (copy_from_user(o_tmp, userp,
57462 + sizeof (struct acl_object_label)))
57463 + return -EFAULT;
57464 +
57465 + userp = o_tmp->prev;
57466 +
57467 + len = strnlen_user(o_tmp->filename, PATH_MAX);
57468 +
57469 + if (!len || len >= PATH_MAX)
57470 + return -EINVAL;
57471 +
57472 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57473 + return -ENOMEM;
57474 +
57475 + if (copy_from_user(tmp, o_tmp->filename, len))
57476 + return -EFAULT;
57477 + tmp[len-1] = '\0';
57478 + o_tmp->filename = tmp;
57479 +
57480 + insert_acl_obj_label(o_tmp, subj);
57481 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
57482 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
57483 + return -ENOMEM;
57484 +
57485 + ret = copy_user_glob(o_tmp);
57486 + if (ret)
57487 + return ret;
57488 +
57489 + if (o_tmp->nested) {
57490 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
57491 + if (IS_ERR(o_tmp->nested))
57492 + return PTR_ERR(o_tmp->nested);
57493 +
57494 + /* insert into nested subject list */
57495 + o_tmp->nested->next = role->hash->first;
57496 + role->hash->first = o_tmp->nested;
57497 + }
57498 + }
57499 +
57500 + return 0;
57501 +}
57502 +
57503 +static __u32
57504 +count_user_subjs(struct acl_subject_label *userp)
57505 +{
57506 + struct acl_subject_label s_tmp;
57507 + __u32 num = 0;
57508 +
57509 + while (userp) {
57510 + if (copy_from_user(&s_tmp, userp,
57511 + sizeof (struct acl_subject_label)))
57512 + break;
57513 +
57514 + userp = s_tmp.prev;
57515 + /* do not count nested subjects against this count, since
57516 + they are not included in the hash table, but are
57517 + attached to objects. We have already counted
57518 + the subjects in userspace for the allocation
57519 + stack
57520 + */
57521 + if (!(s_tmp.mode & GR_NESTED))
57522 + num++;
57523 + }
57524 +
57525 + return num;
57526 +}
57527 +
57528 +static int
57529 +copy_user_allowedips(struct acl_role_label *rolep)
57530 +{
57531 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
57532 +
57533 + ruserip = rolep->allowed_ips;
57534 +
57535 + while (ruserip) {
57536 + rlast = rtmp;
57537 +
57538 + if ((rtmp = (struct role_allowed_ip *)
57539 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
57540 + return -ENOMEM;
57541 +
57542 + if (copy_from_user(rtmp, ruserip,
57543 + sizeof (struct role_allowed_ip)))
57544 + return -EFAULT;
57545 +
57546 + ruserip = rtmp->prev;
57547 +
57548 + if (!rlast) {
57549 + rtmp->prev = NULL;
57550 + rolep->allowed_ips = rtmp;
57551 + } else {
57552 + rlast->next = rtmp;
57553 + rtmp->prev = rlast;
57554 + }
57555 +
57556 + if (!ruserip)
57557 + rtmp->next = NULL;
57558 + }
57559 +
57560 + return 0;
57561 +}
57562 +
57563 +static int
57564 +copy_user_transitions(struct acl_role_label *rolep)
57565 +{
57566 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
57567 +
57568 + unsigned int len;
57569 + char *tmp;
57570 +
57571 + rusertp = rolep->transitions;
57572 +
57573 + while (rusertp) {
57574 + rlast = rtmp;
57575 +
57576 + if ((rtmp = (struct role_transition *)
57577 + acl_alloc(sizeof (struct role_transition))) == NULL)
57578 + return -ENOMEM;
57579 +
57580 + if (copy_from_user(rtmp, rusertp,
57581 + sizeof (struct role_transition)))
57582 + return -EFAULT;
57583 +
57584 + rusertp = rtmp->prev;
57585 +
57586 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
57587 +
57588 + if (!len || len >= GR_SPROLE_LEN)
57589 + return -EINVAL;
57590 +
57591 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57592 + return -ENOMEM;
57593 +
57594 + if (copy_from_user(tmp, rtmp->rolename, len))
57595 + return -EFAULT;
57596 + tmp[len-1] = '\0';
57597 + rtmp->rolename = tmp;
57598 +
57599 + if (!rlast) {
57600 + rtmp->prev = NULL;
57601 + rolep->transitions = rtmp;
57602 + } else {
57603 + rlast->next = rtmp;
57604 + rtmp->prev = rlast;
57605 + }
57606 +
57607 + if (!rusertp)
57608 + rtmp->next = NULL;
57609 + }
57610 +
57611 + return 0;
57612 +}
57613 +
57614 +static struct acl_subject_label *
57615 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
57616 +{
57617 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
57618 + unsigned int len;
57619 + char *tmp;
57620 + __u32 num_objs;
57621 + struct acl_ip_label **i_tmp, *i_utmp2;
57622 + struct gr_hash_struct ghash;
57623 + struct subject_map *subjmap;
57624 + unsigned int i_num;
57625 + int err;
57626 +
57627 + s_tmp = lookup_subject_map(userp);
57628 +
57629 + /* we've already copied this subject into the kernel, just return
57630 + the reference to it, and don't copy it over again
57631 + */
57632 + if (s_tmp)
57633 + return(s_tmp);
57634 +
57635 + if ((s_tmp = (struct acl_subject_label *)
57636 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
57637 + return ERR_PTR(-ENOMEM);
57638 +
57639 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
57640 + if (subjmap == NULL)
57641 + return ERR_PTR(-ENOMEM);
57642 +
57643 + subjmap->user = userp;
57644 + subjmap->kernel = s_tmp;
57645 + insert_subj_map_entry(subjmap);
57646 +
57647 + if (copy_from_user(s_tmp, userp,
57648 + sizeof (struct acl_subject_label)))
57649 + return ERR_PTR(-EFAULT);
57650 +
57651 + len = strnlen_user(s_tmp->filename, PATH_MAX);
57652 +
57653 + if (!len || len >= PATH_MAX)
57654 + return ERR_PTR(-EINVAL);
57655 +
57656 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57657 + return ERR_PTR(-ENOMEM);
57658 +
57659 + if (copy_from_user(tmp, s_tmp->filename, len))
57660 + return ERR_PTR(-EFAULT);
57661 + tmp[len-1] = '\0';
57662 + s_tmp->filename = tmp;
57663 +
57664 + if (!strcmp(s_tmp->filename, "/"))
57665 + role->root_label = s_tmp;
57666 +
57667 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
57668 + return ERR_PTR(-EFAULT);
57669 +
57670 + /* copy user and group transition tables */
57671 +
57672 + if (s_tmp->user_trans_num) {
57673 + uid_t *uidlist;
57674 +
57675 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
57676 + if (uidlist == NULL)
57677 + return ERR_PTR(-ENOMEM);
57678 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
57679 + return ERR_PTR(-EFAULT);
57680 +
57681 + s_tmp->user_transitions = uidlist;
57682 + }
57683 +
57684 + if (s_tmp->group_trans_num) {
57685 + gid_t *gidlist;
57686 +
57687 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
57688 + if (gidlist == NULL)
57689 + return ERR_PTR(-ENOMEM);
57690 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
57691 + return ERR_PTR(-EFAULT);
57692 +
57693 + s_tmp->group_transitions = gidlist;
57694 + }
57695 +
57696 + /* set up object hash table */
57697 + num_objs = count_user_objs(ghash.first);
57698 +
57699 + s_tmp->obj_hash_size = num_objs;
57700 + s_tmp->obj_hash =
57701 + (struct acl_object_label **)
57702 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
57703 +
57704 + if (!s_tmp->obj_hash)
57705 + return ERR_PTR(-ENOMEM);
57706 +
57707 + memset(s_tmp->obj_hash, 0,
57708 + s_tmp->obj_hash_size *
57709 + sizeof (struct acl_object_label *));
57710 +
57711 + /* add in objects */
57712 + err = copy_user_objs(ghash.first, s_tmp, role);
57713 +
57714 + if (err)
57715 + return ERR_PTR(err);
57716 +
57717 + /* set pointer for parent subject */
57718 + if (s_tmp->parent_subject) {
57719 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
57720 +
57721 + if (IS_ERR(s_tmp2))
57722 + return s_tmp2;
57723 +
57724 + s_tmp->parent_subject = s_tmp2;
57725 + }
57726 +
57727 + /* add in ip acls */
57728 +
57729 + if (!s_tmp->ip_num) {
57730 + s_tmp->ips = NULL;
57731 + goto insert;
57732 + }
57733 +
57734 + i_tmp =
57735 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
57736 + sizeof (struct acl_ip_label *));
57737 +
57738 + if (!i_tmp)
57739 + return ERR_PTR(-ENOMEM);
57740 +
57741 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
57742 + *(i_tmp + i_num) =
57743 + (struct acl_ip_label *)
57744 + acl_alloc(sizeof (struct acl_ip_label));
57745 + if (!*(i_tmp + i_num))
57746 + return ERR_PTR(-ENOMEM);
57747 +
57748 + if (copy_from_user
57749 + (&i_utmp2, s_tmp->ips + i_num,
57750 + sizeof (struct acl_ip_label *)))
57751 + return ERR_PTR(-EFAULT);
57752 +
57753 + if (copy_from_user
57754 + (*(i_tmp + i_num), i_utmp2,
57755 + sizeof (struct acl_ip_label)))
57756 + return ERR_PTR(-EFAULT);
57757 +
57758 + if ((*(i_tmp + i_num))->iface == NULL)
57759 + continue;
57760 +
57761 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
57762 + if (!len || len >= IFNAMSIZ)
57763 + return ERR_PTR(-EINVAL);
57764 + tmp = acl_alloc(len);
57765 + if (tmp == NULL)
57766 + return ERR_PTR(-ENOMEM);
57767 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
57768 + return ERR_PTR(-EFAULT);
57769 + (*(i_tmp + i_num))->iface = tmp;
57770 + }
57771 +
57772 + s_tmp->ips = i_tmp;
57773 +
57774 +insert:
57775 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
57776 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
57777 + return ERR_PTR(-ENOMEM);
57778 +
57779 + return s_tmp;
57780 +}
57781 +
57782 +static int
57783 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
57784 +{
57785 + struct acl_subject_label s_pre;
57786 + struct acl_subject_label * ret;
57787 + int err;
57788 +
57789 + while (userp) {
57790 + if (copy_from_user(&s_pre, userp,
57791 + sizeof (struct acl_subject_label)))
57792 + return -EFAULT;
57793 +
57794 + /* do not add nested subjects here, add
57795 + while parsing objects
57796 + */
57797 +
57798 + if (s_pre.mode & GR_NESTED) {
57799 + userp = s_pre.prev;
57800 + continue;
57801 + }
57802 +
57803 + ret = do_copy_user_subj(userp, role);
57804 +
57805 + err = PTR_ERR(ret);
57806 + if (IS_ERR(ret))
57807 + return err;
57808 +
57809 + insert_acl_subj_label(ret, role);
57810 +
57811 + userp = s_pre.prev;
57812 + }
57813 +
57814 + return 0;
57815 +}
57816 +
57817 +static int
57818 +copy_user_acl(struct gr_arg *arg)
57819 +{
57820 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
57821 + struct sprole_pw *sptmp;
57822 + struct gr_hash_struct *ghash;
57823 + uid_t *domainlist;
57824 + unsigned int r_num;
57825 + unsigned int len;
57826 + char *tmp;
57827 + int err = 0;
57828 + __u16 i;
57829 + __u32 num_subjs;
57830 +
57831 + /* we need a default and kernel role */
57832 + if (arg->role_db.num_roles < 2)
57833 + return -EINVAL;
57834 +
57835 + /* copy special role authentication info from userspace */
57836 +
57837 + num_sprole_pws = arg->num_sprole_pws;
57838 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
57839 +
57840 + if (!acl_special_roles) {
57841 + err = -ENOMEM;
57842 + goto cleanup;
57843 + }
57844 +
57845 + for (i = 0; i < num_sprole_pws; i++) {
57846 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
57847 + if (!sptmp) {
57848 + err = -ENOMEM;
57849 + goto cleanup;
57850 + }
57851 + if (copy_from_user(sptmp, arg->sprole_pws + i,
57852 + sizeof (struct sprole_pw))) {
57853 + err = -EFAULT;
57854 + goto cleanup;
57855 + }
57856 +
57857 + len =
57858 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
57859 +
57860 + if (!len || len >= GR_SPROLE_LEN) {
57861 + err = -EINVAL;
57862 + goto cleanup;
57863 + }
57864 +
57865 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
57866 + err = -ENOMEM;
57867 + goto cleanup;
57868 + }
57869 +
57870 + if (copy_from_user(tmp, sptmp->rolename, len)) {
57871 + err = -EFAULT;
57872 + goto cleanup;
57873 + }
57874 + tmp[len-1] = '\0';
57875 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57876 + printk(KERN_ALERT "Copying special role %s\n", tmp);
57877 +#endif
57878 + sptmp->rolename = tmp;
57879 + acl_special_roles[i] = sptmp;
57880 + }
57881 +
57882 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
57883 +
57884 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
57885 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
57886 +
57887 + if (!r_tmp) {
57888 + err = -ENOMEM;
57889 + goto cleanup;
57890 + }
57891 +
57892 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
57893 + sizeof (struct acl_role_label *))) {
57894 + err = -EFAULT;
57895 + goto cleanup;
57896 + }
57897 +
57898 + if (copy_from_user(r_tmp, r_utmp2,
57899 + sizeof (struct acl_role_label))) {
57900 + err = -EFAULT;
57901 + goto cleanup;
57902 + }
57903 +
57904 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
57905 +
57906 + if (!len || len >= PATH_MAX) {
57907 + err = -EINVAL;
57908 + goto cleanup;
57909 + }
57910 +
57911 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
57912 + err = -ENOMEM;
57913 + goto cleanup;
57914 + }
57915 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
57916 + err = -EFAULT;
57917 + goto cleanup;
57918 + }
57919 + tmp[len-1] = '\0';
57920 + r_tmp->rolename = tmp;
57921 +
57922 + if (!strcmp(r_tmp->rolename, "default")
57923 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
57924 + default_role = r_tmp;
57925 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
57926 + kernel_role = r_tmp;
57927 + }
57928 +
57929 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
57930 + err = -ENOMEM;
57931 + goto cleanup;
57932 + }
57933 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
57934 + err = -EFAULT;
57935 + goto cleanup;
57936 + }
57937 +
57938 + r_tmp->hash = ghash;
57939 +
57940 + num_subjs = count_user_subjs(r_tmp->hash->first);
57941 +
57942 + r_tmp->subj_hash_size = num_subjs;
57943 + r_tmp->subj_hash =
57944 + (struct acl_subject_label **)
57945 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
57946 +
57947 + if (!r_tmp->subj_hash) {
57948 + err = -ENOMEM;
57949 + goto cleanup;
57950 + }
57951 +
57952 + err = copy_user_allowedips(r_tmp);
57953 + if (err)
57954 + goto cleanup;
57955 +
57956 + /* copy domain info */
57957 + if (r_tmp->domain_children != NULL) {
57958 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
57959 + if (domainlist == NULL) {
57960 + err = -ENOMEM;
57961 + goto cleanup;
57962 + }
57963 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
57964 + err = -EFAULT;
57965 + goto cleanup;
57966 + }
57967 + r_tmp->domain_children = domainlist;
57968 + }
57969 +
57970 + err = copy_user_transitions(r_tmp);
57971 + if (err)
57972 + goto cleanup;
57973 +
57974 + memset(r_tmp->subj_hash, 0,
57975 + r_tmp->subj_hash_size *
57976 + sizeof (struct acl_subject_label *));
57977 +
57978 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
57979 +
57980 + if (err)
57981 + goto cleanup;
57982 +
57983 + /* set nested subject list to null */
57984 + r_tmp->hash->first = NULL;
57985 +
57986 + insert_acl_role_label(r_tmp);
57987 + }
57988 +
57989 + goto return_err;
57990 + cleanup:
57991 + free_variables();
57992 + return_err:
57993 + return err;
57994 +
57995 +}
57996 +
57997 +static int
57998 +gracl_init(struct gr_arg *args)
57999 +{
58000 + int error = 0;
58001 +
58002 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
58003 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
58004 +
58005 + if (init_variables(args)) {
58006 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
58007 + error = -ENOMEM;
58008 + free_variables();
58009 + goto out;
58010 + }
58011 +
58012 + error = copy_user_acl(args);
58013 + free_init_variables();
58014 + if (error) {
58015 + free_variables();
58016 + goto out;
58017 + }
58018 +
58019 + if ((error = gr_set_acls(0))) {
58020 + free_variables();
58021 + goto out;
58022 + }
58023 +
58024 + pax_open_kernel();
58025 + gr_status |= GR_READY;
58026 + pax_close_kernel();
58027 +
58028 + out:
58029 + return error;
58030 +}
58031 +
58032 +/* derived from glibc fnmatch() 0: match, 1: no match*/
58033 +
58034 +static int
58035 +glob_match(const char *p, const char *n)
58036 +{
58037 + char c;
58038 +
58039 + while ((c = *p++) != '\0') {
58040 + switch (c) {
58041 + case '?':
58042 + if (*n == '\0')
58043 + return 1;
58044 + else if (*n == '/')
58045 + return 1;
58046 + break;
58047 + case '\\':
58048 + if (*n != c)
58049 + return 1;
58050 + break;
58051 + case '*':
58052 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
58053 + if (*n == '/')
58054 + return 1;
58055 + else if (c == '?') {
58056 + if (*n == '\0')
58057 + return 1;
58058 + else
58059 + ++n;
58060 + }
58061 + }
58062 + if (c == '\0') {
58063 + return 0;
58064 + } else {
58065 + const char *endp;
58066 +
58067 + if ((endp = strchr(n, '/')) == NULL)
58068 + endp = n + strlen(n);
58069 +
58070 + if (c == '[') {
58071 + for (--p; n < endp; ++n)
58072 + if (!glob_match(p, n))
58073 + return 0;
58074 + } else if (c == '/') {
58075 + while (*n != '\0' && *n != '/')
58076 + ++n;
58077 + if (*n == '/' && !glob_match(p, n + 1))
58078 + return 0;
58079 + } else {
58080 + for (--p; n < endp; ++n)
58081 + if (*n == c && !glob_match(p, n))
58082 + return 0;
58083 + }
58084 +
58085 + return 1;
58086 + }
58087 + case '[':
58088 + {
58089 + int not;
58090 + char cold;
58091 +
58092 + if (*n == '\0' || *n == '/')
58093 + return 1;
58094 +
58095 + not = (*p == '!' || *p == '^');
58096 + if (not)
58097 + ++p;
58098 +
58099 + c = *p++;
58100 + for (;;) {
58101 + unsigned char fn = (unsigned char)*n;
58102 +
58103 + if (c == '\0')
58104 + return 1;
58105 + else {
58106 + if (c == fn)
58107 + goto matched;
58108 + cold = c;
58109 + c = *p++;
58110 +
58111 + if (c == '-' && *p != ']') {
58112 + unsigned char cend = *p++;
58113 +
58114 + if (cend == '\0')
58115 + return 1;
58116 +
58117 + if (cold <= fn && fn <= cend)
58118 + goto matched;
58119 +
58120 + c = *p++;
58121 + }
58122 + }
58123 +
58124 + if (c == ']')
58125 + break;
58126 + }
58127 + if (!not)
58128 + return 1;
58129 + break;
58130 + matched:
58131 + while (c != ']') {
58132 + if (c == '\0')
58133 + return 1;
58134 +
58135 + c = *p++;
58136 + }
58137 + if (not)
58138 + return 1;
58139 + }
58140 + break;
58141 + default:
58142 + if (c != *n)
58143 + return 1;
58144 + }
58145 +
58146 + ++n;
58147 + }
58148 +
58149 + if (*n == '\0')
58150 + return 0;
58151 +
58152 + if (*n == '/')
58153 + return 0;
58154 +
58155 + return 1;
58156 +}
58157 +
58158 +static struct acl_object_label *
58159 +chk_glob_label(struct acl_object_label *globbed,
58160 + struct dentry *dentry, struct vfsmount *mnt, char **path)
58161 +{
58162 + struct acl_object_label *tmp;
58163 +
58164 + if (*path == NULL)
58165 + *path = gr_to_filename_nolock(dentry, mnt);
58166 +
58167 + tmp = globbed;
58168 +
58169 + while (tmp) {
58170 + if (!glob_match(tmp->filename, *path))
58171 + return tmp;
58172 + tmp = tmp->next;
58173 + }
58174 +
58175 + return NULL;
58176 +}
58177 +
58178 +static struct acl_object_label *
58179 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
58180 + const ino_t curr_ino, const dev_t curr_dev,
58181 + const struct acl_subject_label *subj, char **path, const int checkglob)
58182 +{
58183 + struct acl_subject_label *tmpsubj;
58184 + struct acl_object_label *retval;
58185 + struct acl_object_label *retval2;
58186 +
58187 + tmpsubj = (struct acl_subject_label *) subj;
58188 + read_lock(&gr_inode_lock);
58189 + do {
58190 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
58191 + if (retval) {
58192 + if (checkglob && retval->globbed) {
58193 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
58194 + (struct vfsmount *)orig_mnt, path);
58195 + if (retval2)
58196 + retval = retval2;
58197 + }
58198 + break;
58199 + }
58200 + } while ((tmpsubj = tmpsubj->parent_subject));
58201 + read_unlock(&gr_inode_lock);
58202 +
58203 + return retval;
58204 +}
58205 +
58206 +static __inline__ struct acl_object_label *
58207 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
58208 + const struct dentry *curr_dentry,
58209 + const struct acl_subject_label *subj, char **path, const int checkglob)
58210 +{
58211 + int newglob = checkglob;
58212 +
58213 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
58214 + as we don't want a / * rule to match instead of the / object
58215 + don't do this for create lookups that call this function though, since they're looking up
58216 + on the parent and thus need globbing checks on all paths
58217 + */
58218 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
58219 + newglob = GR_NO_GLOB;
58220 +
58221 + return __full_lookup(orig_dentry, orig_mnt,
58222 + curr_dentry->d_inode->i_ino,
58223 + __get_dev(curr_dentry), subj, path, newglob);
58224 +}
58225 +
58226 +static struct acl_object_label *
58227 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58228 + const struct acl_subject_label *subj, char *path, const int checkglob)
58229 +{
58230 + struct dentry *dentry = (struct dentry *) l_dentry;
58231 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
58232 + struct acl_object_label *retval;
58233 +
58234 + spin_lock(&dcache_lock);
58235 + spin_lock(&vfsmount_lock);
58236 +
58237 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
58238 +#ifdef CONFIG_NET
58239 + mnt == sock_mnt ||
58240 +#endif
58241 +#ifdef CONFIG_HUGETLBFS
58242 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
58243 +#endif
58244 + /* ignore Eric Biederman */
58245 + IS_PRIVATE(l_dentry->d_inode))) {
58246 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
58247 + goto out;
58248 + }
58249 +
58250 + for (;;) {
58251 + if (dentry == real_root && mnt == real_root_mnt)
58252 + break;
58253 +
58254 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
58255 + if (mnt->mnt_parent == mnt)
58256 + break;
58257 +
58258 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58259 + if (retval != NULL)
58260 + goto out;
58261 +
58262 + dentry = mnt->mnt_mountpoint;
58263 + mnt = mnt->mnt_parent;
58264 + continue;
58265 + }
58266 +
58267 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58268 + if (retval != NULL)
58269 + goto out;
58270 +
58271 + dentry = dentry->d_parent;
58272 + }
58273 +
58274 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58275 +
58276 + if (retval == NULL)
58277 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
58278 +out:
58279 + spin_unlock(&vfsmount_lock);
58280 + spin_unlock(&dcache_lock);
58281 +
58282 + BUG_ON(retval == NULL);
58283 +
58284 + return retval;
58285 +}
58286 +
58287 +static __inline__ struct acl_object_label *
58288 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58289 + const struct acl_subject_label *subj)
58290 +{
58291 + char *path = NULL;
58292 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
58293 +}
58294 +
58295 +static __inline__ struct acl_object_label *
58296 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58297 + const struct acl_subject_label *subj)
58298 +{
58299 + char *path = NULL;
58300 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
58301 +}
58302 +
58303 +static __inline__ struct acl_object_label *
58304 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58305 + const struct acl_subject_label *subj, char *path)
58306 +{
58307 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
58308 +}
58309 +
58310 +static struct acl_subject_label *
58311 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58312 + const struct acl_role_label *role)
58313 +{
58314 + struct dentry *dentry = (struct dentry *) l_dentry;
58315 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
58316 + struct acl_subject_label *retval;
58317 +
58318 + spin_lock(&dcache_lock);
58319 + spin_lock(&vfsmount_lock);
58320 +
58321 + for (;;) {
58322 + if (dentry == real_root && mnt == real_root_mnt)
58323 + break;
58324 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
58325 + if (mnt->mnt_parent == mnt)
58326 + break;
58327 +
58328 + read_lock(&gr_inode_lock);
58329 + retval =
58330 + lookup_acl_subj_label(dentry->d_inode->i_ino,
58331 + __get_dev(dentry), role);
58332 + read_unlock(&gr_inode_lock);
58333 + if (retval != NULL)
58334 + goto out;
58335 +
58336 + dentry = mnt->mnt_mountpoint;
58337 + mnt = mnt->mnt_parent;
58338 + continue;
58339 + }
58340 +
58341 + read_lock(&gr_inode_lock);
58342 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58343 + __get_dev(dentry), role);
58344 + read_unlock(&gr_inode_lock);
58345 + if (retval != NULL)
58346 + goto out;
58347 +
58348 + dentry = dentry->d_parent;
58349 + }
58350 +
58351 + read_lock(&gr_inode_lock);
58352 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58353 + __get_dev(dentry), role);
58354 + read_unlock(&gr_inode_lock);
58355 +
58356 + if (unlikely(retval == NULL)) {
58357 + read_lock(&gr_inode_lock);
58358 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
58359 + __get_dev(real_root), role);
58360 + read_unlock(&gr_inode_lock);
58361 + }
58362 +out:
58363 + spin_unlock(&vfsmount_lock);
58364 + spin_unlock(&dcache_lock);
58365 +
58366 + BUG_ON(retval == NULL);
58367 +
58368 + return retval;
58369 +}
58370 +
58371 +static void
58372 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
58373 +{
58374 + struct task_struct *task = current;
58375 + const struct cred *cred = current_cred();
58376 +
58377 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
58378 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58379 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58380 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
58381 +
58382 + return;
58383 +}
58384 +
58385 +static void
58386 +gr_log_learn_sysctl(const char *path, const __u32 mode)
58387 +{
58388 + struct task_struct *task = current;
58389 + const struct cred *cred = current_cred();
58390 +
58391 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
58392 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58393 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58394 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
58395 +
58396 + return;
58397 +}
58398 +
58399 +static void
58400 +gr_log_learn_id_change(const char type, const unsigned int real,
58401 + const unsigned int effective, const unsigned int fs)
58402 +{
58403 + struct task_struct *task = current;
58404 + const struct cred *cred = current_cred();
58405 +
58406 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
58407 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58408 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58409 + type, real, effective, fs, &task->signal->saved_ip);
58410 +
58411 + return;
58412 +}
58413 +
58414 +__u32
58415 +gr_search_file(const struct dentry * dentry, const __u32 mode,
58416 + const struct vfsmount * mnt)
58417 +{
58418 + __u32 retval = mode;
58419 + struct acl_subject_label *curracl;
58420 + struct acl_object_label *currobj;
58421 +
58422 + if (unlikely(!(gr_status & GR_READY)))
58423 + return (mode & ~GR_AUDITS);
58424 +
58425 + curracl = current->acl;
58426 +
58427 + currobj = chk_obj_label(dentry, mnt, curracl);
58428 + retval = currobj->mode & mode;
58429 +
58430 + /* if we're opening a specified transfer file for writing
58431 + (e.g. /dev/initctl), then transfer our role to init
58432 + */
58433 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
58434 + current->role->roletype & GR_ROLE_PERSIST)) {
58435 + struct task_struct *task = init_pid_ns.child_reaper;
58436 +
58437 + if (task->role != current->role) {
58438 + task->acl_sp_role = 0;
58439 + task->acl_role_id = current->acl_role_id;
58440 + task->role = current->role;
58441 + rcu_read_lock();
58442 + read_lock(&grsec_exec_file_lock);
58443 + gr_apply_subject_to_task(task);
58444 + read_unlock(&grsec_exec_file_lock);
58445 + rcu_read_unlock();
58446 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
58447 + }
58448 + }
58449 +
58450 + if (unlikely
58451 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
58452 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
58453 + __u32 new_mode = mode;
58454 +
58455 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58456 +
58457 + retval = new_mode;
58458 +
58459 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
58460 + new_mode |= GR_INHERIT;
58461 +
58462 + if (!(mode & GR_NOLEARN))
58463 + gr_log_learn(dentry, mnt, new_mode);
58464 + }
58465 +
58466 + return retval;
58467 +}
58468 +
58469 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
58470 + const struct dentry *parent,
58471 + const struct vfsmount *mnt)
58472 +{
58473 + struct name_entry *match;
58474 + struct acl_object_label *matchpo;
58475 + struct acl_subject_label *curracl;
58476 + char *path;
58477 +
58478 + if (unlikely(!(gr_status & GR_READY)))
58479 + return NULL;
58480 +
58481 + preempt_disable();
58482 + path = gr_to_filename_rbac(new_dentry, mnt);
58483 + match = lookup_name_entry_create(path);
58484 +
58485 + curracl = current->acl;
58486 +
58487 + if (match) {
58488 + read_lock(&gr_inode_lock);
58489 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
58490 + read_unlock(&gr_inode_lock);
58491 +
58492 + if (matchpo) {
58493 + preempt_enable();
58494 + return matchpo;
58495 + }
58496 + }
58497 +
58498 + // lookup parent
58499 +
58500 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
58501 +
58502 + preempt_enable();
58503 + return matchpo;
58504 +}
58505 +
58506 +__u32
58507 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
58508 + const struct vfsmount * mnt, const __u32 mode)
58509 +{
58510 + struct acl_object_label *matchpo;
58511 + __u32 retval;
58512 +
58513 + if (unlikely(!(gr_status & GR_READY)))
58514 + return (mode & ~GR_AUDITS);
58515 +
58516 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
58517 +
58518 + retval = matchpo->mode & mode;
58519 +
58520 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
58521 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58522 + __u32 new_mode = mode;
58523 +
58524 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58525 +
58526 + gr_log_learn(new_dentry, mnt, new_mode);
58527 + return new_mode;
58528 + }
58529 +
58530 + return retval;
58531 +}
58532 +
58533 +__u32
58534 +gr_check_link(const struct dentry * new_dentry,
58535 + const struct dentry * parent_dentry,
58536 + const struct vfsmount * parent_mnt,
58537 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
58538 +{
58539 + struct acl_object_label *obj;
58540 + __u32 oldmode, newmode;
58541 + __u32 needmode;
58542 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
58543 + GR_DELETE | GR_INHERIT;
58544 +
58545 + if (unlikely(!(gr_status & GR_READY)))
58546 + return (GR_CREATE | GR_LINK);
58547 +
58548 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
58549 + oldmode = obj->mode;
58550 +
58551 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
58552 + newmode = obj->mode;
58553 +
58554 + needmode = newmode & checkmodes;
58555 +
58556 + // old name for hardlink must have at least the permissions of the new name
58557 + if ((oldmode & needmode) != needmode)
58558 + goto bad;
58559 +
58560 + // if old name had restrictions/auditing, make sure the new name does as well
58561 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
58562 +
58563 + // don't allow hardlinking of suid/sgid files without permission
58564 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58565 + needmode |= GR_SETID;
58566 +
58567 + if ((newmode & needmode) != needmode)
58568 + goto bad;
58569 +
58570 + // enforce minimum permissions
58571 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
58572 + return newmode;
58573 +bad:
58574 + needmode = oldmode;
58575 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58576 + needmode |= GR_SETID;
58577 +
58578 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
58579 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
58580 + return (GR_CREATE | GR_LINK);
58581 + } else if (newmode & GR_SUPPRESS)
58582 + return GR_SUPPRESS;
58583 + else
58584 + return 0;
58585 +}
58586 +
58587 +int
58588 +gr_check_hidden_task(const struct task_struct *task)
58589 +{
58590 + if (unlikely(!(gr_status & GR_READY)))
58591 + return 0;
58592 +
58593 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
58594 + return 1;
58595 +
58596 + return 0;
58597 +}
58598 +
58599 +int
58600 +gr_check_protected_task(const struct task_struct *task)
58601 +{
58602 + if (unlikely(!(gr_status & GR_READY) || !task))
58603 + return 0;
58604 +
58605 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58606 + task->acl != current->acl)
58607 + return 1;
58608 +
58609 + return 0;
58610 +}
58611 +
58612 +int
58613 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
58614 +{
58615 + struct task_struct *p;
58616 + int ret = 0;
58617 +
58618 + if (unlikely(!(gr_status & GR_READY) || !pid))
58619 + return ret;
58620 +
58621 + read_lock(&tasklist_lock);
58622 + do_each_pid_task(pid, type, p) {
58623 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58624 + p->acl != current->acl) {
58625 + ret = 1;
58626 + goto out;
58627 + }
58628 + } while_each_pid_task(pid, type, p);
58629 +out:
58630 + read_unlock(&tasklist_lock);
58631 +
58632 + return ret;
58633 +}
58634 +
58635 +void
58636 +gr_copy_label(struct task_struct *tsk)
58637 +{
58638 + /* plain copying of fields is already done by dup_task_struct */
58639 + tsk->signal->used_accept = 0;
58640 + tsk->acl_sp_role = 0;
58641 + //tsk->acl_role_id = current->acl_role_id;
58642 + //tsk->acl = current->acl;
58643 + //tsk->role = current->role;
58644 + tsk->signal->curr_ip = current->signal->curr_ip;
58645 + tsk->signal->saved_ip = current->signal->saved_ip;
58646 + if (current->exec_file)
58647 + get_file(current->exec_file);
58648 + //tsk->exec_file = current->exec_file;
58649 + //tsk->is_writable = current->is_writable;
58650 + if (unlikely(current->signal->used_accept)) {
58651 + current->signal->curr_ip = 0;
58652 + current->signal->saved_ip = 0;
58653 + }
58654 +
58655 + return;
58656 +}
58657 +
58658 +static void
58659 +gr_set_proc_res(struct task_struct *task)
58660 +{
58661 + struct acl_subject_label *proc;
58662 + unsigned short i;
58663 +
58664 + proc = task->acl;
58665 +
58666 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
58667 + return;
58668 +
58669 + for (i = 0; i < RLIM_NLIMITS; i++) {
58670 + if (!(proc->resmask & (1 << i)))
58671 + continue;
58672 +
58673 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
58674 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
58675 + }
58676 +
58677 + return;
58678 +}
58679 +
58680 +extern int __gr_process_user_ban(struct user_struct *user);
58681 +
58682 +int
58683 +gr_check_user_change(int real, int effective, int fs)
58684 +{
58685 + unsigned int i;
58686 + __u16 num;
58687 + uid_t *uidlist;
58688 + int curuid;
58689 + int realok = 0;
58690 + int effectiveok = 0;
58691 + int fsok = 0;
58692 +
58693 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58694 + struct user_struct *user;
58695 +
58696 + if (real == -1)
58697 + goto skipit;
58698 +
58699 + user = find_user(real);
58700 + if (user == NULL)
58701 + goto skipit;
58702 +
58703 + if (__gr_process_user_ban(user)) {
58704 + /* for find_user */
58705 + free_uid(user);
58706 + return 1;
58707 + }
58708 +
58709 + /* for find_user */
58710 + free_uid(user);
58711 +
58712 +skipit:
58713 +#endif
58714 +
58715 + if (unlikely(!(gr_status & GR_READY)))
58716 + return 0;
58717 +
58718 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58719 + gr_log_learn_id_change('u', real, effective, fs);
58720 +
58721 + num = current->acl->user_trans_num;
58722 + uidlist = current->acl->user_transitions;
58723 +
58724 + if (uidlist == NULL)
58725 + return 0;
58726 +
58727 + if (real == -1)
58728 + realok = 1;
58729 + if (effective == -1)
58730 + effectiveok = 1;
58731 + if (fs == -1)
58732 + fsok = 1;
58733 +
58734 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
58735 + for (i = 0; i < num; i++) {
58736 + curuid = (int)uidlist[i];
58737 + if (real == curuid)
58738 + realok = 1;
58739 + if (effective == curuid)
58740 + effectiveok = 1;
58741 + if (fs == curuid)
58742 + fsok = 1;
58743 + }
58744 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
58745 + for (i = 0; i < num; i++) {
58746 + curuid = (int)uidlist[i];
58747 + if (real == curuid)
58748 + break;
58749 + if (effective == curuid)
58750 + break;
58751 + if (fs == curuid)
58752 + break;
58753 + }
58754 + /* not in deny list */
58755 + if (i == num) {
58756 + realok = 1;
58757 + effectiveok = 1;
58758 + fsok = 1;
58759 + }
58760 + }
58761 +
58762 + if (realok && effectiveok && fsok)
58763 + return 0;
58764 + else {
58765 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58766 + return 1;
58767 + }
58768 +}
58769 +
58770 +int
58771 +gr_check_group_change(int real, int effective, int fs)
58772 +{
58773 + unsigned int i;
58774 + __u16 num;
58775 + gid_t *gidlist;
58776 + int curgid;
58777 + int realok = 0;
58778 + int effectiveok = 0;
58779 + int fsok = 0;
58780 +
58781 + if (unlikely(!(gr_status & GR_READY)))
58782 + return 0;
58783 +
58784 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58785 + gr_log_learn_id_change('g', real, effective, fs);
58786 +
58787 + num = current->acl->group_trans_num;
58788 + gidlist = current->acl->group_transitions;
58789 +
58790 + if (gidlist == NULL)
58791 + return 0;
58792 +
58793 + if (real == -1)
58794 + realok = 1;
58795 + if (effective == -1)
58796 + effectiveok = 1;
58797 + if (fs == -1)
58798 + fsok = 1;
58799 +
58800 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
58801 + for (i = 0; i < num; i++) {
58802 + curgid = (int)gidlist[i];
58803 + if (real == curgid)
58804 + realok = 1;
58805 + if (effective == curgid)
58806 + effectiveok = 1;
58807 + if (fs == curgid)
58808 + fsok = 1;
58809 + }
58810 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
58811 + for (i = 0; i < num; i++) {
58812 + curgid = (int)gidlist[i];
58813 + if (real == curgid)
58814 + break;
58815 + if (effective == curgid)
58816 + break;
58817 + if (fs == curgid)
58818 + break;
58819 + }
58820 + /* not in deny list */
58821 + if (i == num) {
58822 + realok = 1;
58823 + effectiveok = 1;
58824 + fsok = 1;
58825 + }
58826 + }
58827 +
58828 + if (realok && effectiveok && fsok)
58829 + return 0;
58830 + else {
58831 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58832 + return 1;
58833 + }
58834 +}
58835 +
58836 +extern int gr_acl_is_capable(const int cap);
58837 +
58838 +void
58839 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
58840 +{
58841 + struct acl_role_label *role = task->role;
58842 + struct acl_subject_label *subj = NULL;
58843 + struct acl_object_label *obj;
58844 + struct file *filp;
58845 +
58846 + if (unlikely(!(gr_status & GR_READY)))
58847 + return;
58848 +
58849 + filp = task->exec_file;
58850 +
58851 + /* kernel process, we'll give them the kernel role */
58852 + if (unlikely(!filp)) {
58853 + task->role = kernel_role;
58854 + task->acl = kernel_role->root_label;
58855 + return;
58856 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
58857 + role = lookup_acl_role_label(task, uid, gid);
58858 +
58859 + /* don't change the role if we're not a privileged process */
58860 + if (role && task->role != role &&
58861 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
58862 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
58863 + return;
58864 +
58865 + /* perform subject lookup in possibly new role
58866 + we can use this result below in the case where role == task->role
58867 + */
58868 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
58869 +
58870 + /* if we changed uid/gid, but result in the same role
58871 + and are using inheritance, don't lose the inherited subject
58872 + if current subject is other than what normal lookup
58873 + would result in, we arrived via inheritance, don't
58874 + lose subject
58875 + */
58876 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
58877 + (subj == task->acl)))
58878 + task->acl = subj;
58879 +
58880 + task->role = role;
58881 +
58882 + task->is_writable = 0;
58883 +
58884 + /* ignore additional mmap checks for processes that are writable
58885 + by the default ACL */
58886 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58887 + if (unlikely(obj->mode & GR_WRITE))
58888 + task->is_writable = 1;
58889 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
58890 + if (unlikely(obj->mode & GR_WRITE))
58891 + task->is_writable = 1;
58892 +
58893 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58894 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58895 +#endif
58896 +
58897 + gr_set_proc_res(task);
58898 +
58899 + return;
58900 +}
58901 +
58902 +int
58903 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
58904 + const int unsafe_flags)
58905 +{
58906 + struct task_struct *task = current;
58907 + struct acl_subject_label *newacl;
58908 + struct acl_object_label *obj;
58909 + __u32 retmode;
58910 +
58911 + if (unlikely(!(gr_status & GR_READY)))
58912 + return 0;
58913 +
58914 + newacl = chk_subj_label(dentry, mnt, task->role);
58915 +
58916 + task_lock(task);
58917 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
58918 + !(task->role->roletype & GR_ROLE_GOD) &&
58919 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
58920 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58921 + task_unlock(task);
58922 + if (unsafe_flags & LSM_UNSAFE_SHARE)
58923 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
58924 + else
58925 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
58926 + return -EACCES;
58927 + }
58928 + task_unlock(task);
58929 +
58930 + obj = chk_obj_label(dentry, mnt, task->acl);
58931 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
58932 +
58933 + if (!(task->acl->mode & GR_INHERITLEARN) &&
58934 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
58935 + if (obj->nested)
58936 + task->acl = obj->nested;
58937 + else
58938 + task->acl = newacl;
58939 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
58940 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
58941 +
58942 + task->is_writable = 0;
58943 +
58944 + /* ignore additional mmap checks for processes that are writable
58945 + by the default ACL */
58946 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
58947 + if (unlikely(obj->mode & GR_WRITE))
58948 + task->is_writable = 1;
58949 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
58950 + if (unlikely(obj->mode & GR_WRITE))
58951 + task->is_writable = 1;
58952 +
58953 + gr_set_proc_res(task);
58954 +
58955 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58956 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58957 +#endif
58958 + return 0;
58959 +}
58960 +
58961 +/* always called with valid inodev ptr */
58962 +static void
58963 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
58964 +{
58965 + struct acl_object_label *matchpo;
58966 + struct acl_subject_label *matchps;
58967 + struct acl_subject_label *subj;
58968 + struct acl_role_label *role;
58969 + unsigned int x;
58970 +
58971 + FOR_EACH_ROLE_START(role)
58972 + FOR_EACH_SUBJECT_START(role, subj, x)
58973 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
58974 + matchpo->mode |= GR_DELETED;
58975 + FOR_EACH_SUBJECT_END(subj,x)
58976 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
58977 + if (subj->inode == ino && subj->device == dev)
58978 + subj->mode |= GR_DELETED;
58979 + FOR_EACH_NESTED_SUBJECT_END(subj)
58980 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
58981 + matchps->mode |= GR_DELETED;
58982 + FOR_EACH_ROLE_END(role)
58983 +
58984 + inodev->nentry->deleted = 1;
58985 +
58986 + return;
58987 +}
58988 +
58989 +void
58990 +gr_handle_delete(const ino_t ino, const dev_t dev)
58991 +{
58992 + struct inodev_entry *inodev;
58993 +
58994 + if (unlikely(!(gr_status & GR_READY)))
58995 + return;
58996 +
58997 + write_lock(&gr_inode_lock);
58998 + inodev = lookup_inodev_entry(ino, dev);
58999 + if (inodev != NULL)
59000 + do_handle_delete(inodev, ino, dev);
59001 + write_unlock(&gr_inode_lock);
59002 +
59003 + return;
59004 +}
59005 +
59006 +static void
59007 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
59008 + const ino_t newinode, const dev_t newdevice,
59009 + struct acl_subject_label *subj)
59010 +{
59011 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
59012 + struct acl_object_label *match;
59013 +
59014 + match = subj->obj_hash[index];
59015 +
59016 + while (match && (match->inode != oldinode ||
59017 + match->device != olddevice ||
59018 + !(match->mode & GR_DELETED)))
59019 + match = match->next;
59020 +
59021 + if (match && (match->inode == oldinode)
59022 + && (match->device == olddevice)
59023 + && (match->mode & GR_DELETED)) {
59024 + if (match->prev == NULL) {
59025 + subj->obj_hash[index] = match->next;
59026 + if (match->next != NULL)
59027 + match->next->prev = NULL;
59028 + } else {
59029 + match->prev->next = match->next;
59030 + if (match->next != NULL)
59031 + match->next->prev = match->prev;
59032 + }
59033 + match->prev = NULL;
59034 + match->next = NULL;
59035 + match->inode = newinode;
59036 + match->device = newdevice;
59037 + match->mode &= ~GR_DELETED;
59038 +
59039 + insert_acl_obj_label(match, subj);
59040 + }
59041 +
59042 + return;
59043 +}
59044 +
59045 +static void
59046 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
59047 + const ino_t newinode, const dev_t newdevice,
59048 + struct acl_role_label *role)
59049 +{
59050 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
59051 + struct acl_subject_label *match;
59052 +
59053 + match = role->subj_hash[index];
59054 +
59055 + while (match && (match->inode != oldinode ||
59056 + match->device != olddevice ||
59057 + !(match->mode & GR_DELETED)))
59058 + match = match->next;
59059 +
59060 + if (match && (match->inode == oldinode)
59061 + && (match->device == olddevice)
59062 + && (match->mode & GR_DELETED)) {
59063 + if (match->prev == NULL) {
59064 + role->subj_hash[index] = match->next;
59065 + if (match->next != NULL)
59066 + match->next->prev = NULL;
59067 + } else {
59068 + match->prev->next = match->next;
59069 + if (match->next != NULL)
59070 + match->next->prev = match->prev;
59071 + }
59072 + match->prev = NULL;
59073 + match->next = NULL;
59074 + match->inode = newinode;
59075 + match->device = newdevice;
59076 + match->mode &= ~GR_DELETED;
59077 +
59078 + insert_acl_subj_label(match, role);
59079 + }
59080 +
59081 + return;
59082 +}
59083 +
59084 +static void
59085 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
59086 + const ino_t newinode, const dev_t newdevice)
59087 +{
59088 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
59089 + struct inodev_entry *match;
59090 +
59091 + match = inodev_set.i_hash[index];
59092 +
59093 + while (match && (match->nentry->inode != oldinode ||
59094 + match->nentry->device != olddevice || !match->nentry->deleted))
59095 + match = match->next;
59096 +
59097 + if (match && (match->nentry->inode == oldinode)
59098 + && (match->nentry->device == olddevice) &&
59099 + match->nentry->deleted) {
59100 + if (match->prev == NULL) {
59101 + inodev_set.i_hash[index] = match->next;
59102 + if (match->next != NULL)
59103 + match->next->prev = NULL;
59104 + } else {
59105 + match->prev->next = match->next;
59106 + if (match->next != NULL)
59107 + match->next->prev = match->prev;
59108 + }
59109 + match->prev = NULL;
59110 + match->next = NULL;
59111 + match->nentry->inode = newinode;
59112 + match->nentry->device = newdevice;
59113 + match->nentry->deleted = 0;
59114 +
59115 + insert_inodev_entry(match);
59116 + }
59117 +
59118 + return;
59119 +}
59120 +
59121 +static void
59122 +__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
59123 +{
59124 + struct acl_subject_label *subj;
59125 + struct acl_role_label *role;
59126 + unsigned int x;
59127 +
59128 + FOR_EACH_ROLE_START(role)
59129 + update_acl_subj_label(matchn->inode, matchn->device,
59130 + inode, dev, role);
59131 +
59132 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
59133 + if ((subj->inode == inode) && (subj->device == dev)) {
59134 + subj->inode = inode;
59135 + subj->device = dev;
59136 + }
59137 + FOR_EACH_NESTED_SUBJECT_END(subj)
59138 + FOR_EACH_SUBJECT_START(role, subj, x)
59139 + update_acl_obj_label(matchn->inode, matchn->device,
59140 + inode, dev, subj);
59141 + FOR_EACH_SUBJECT_END(subj,x)
59142 + FOR_EACH_ROLE_END(role)
59143 +
59144 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
59145 +
59146 + return;
59147 +}
59148 +
59149 +static void
59150 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
59151 + const struct vfsmount *mnt)
59152 +{
59153 + ino_t ino = dentry->d_inode->i_ino;
59154 + dev_t dev = __get_dev(dentry);
59155 +
59156 + __do_handle_create(matchn, ino, dev);
59157 +
59158 + return;
59159 +}
59160 +
59161 +void
59162 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
59163 +{
59164 + struct name_entry *matchn;
59165 +
59166 + if (unlikely(!(gr_status & GR_READY)))
59167 + return;
59168 +
59169 + preempt_disable();
59170 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
59171 +
59172 + if (unlikely((unsigned long)matchn)) {
59173 + write_lock(&gr_inode_lock);
59174 + do_handle_create(matchn, dentry, mnt);
59175 + write_unlock(&gr_inode_lock);
59176 + }
59177 + preempt_enable();
59178 +
59179 + return;
59180 +}
59181 +
59182 +void
59183 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
59184 +{
59185 + struct name_entry *matchn;
59186 +
59187 + if (unlikely(!(gr_status & GR_READY)))
59188 + return;
59189 +
59190 + preempt_disable();
59191 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
59192 +
59193 + if (unlikely((unsigned long)matchn)) {
59194 + write_lock(&gr_inode_lock);
59195 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
59196 + write_unlock(&gr_inode_lock);
59197 + }
59198 + preempt_enable();
59199 +
59200 + return;
59201 +}
59202 +
59203 +void
59204 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59205 + struct dentry *old_dentry,
59206 + struct dentry *new_dentry,
59207 + struct vfsmount *mnt, const __u8 replace)
59208 +{
59209 + struct name_entry *matchn;
59210 + struct inodev_entry *inodev;
59211 + struct inode *inode = new_dentry->d_inode;
59212 + ino_t oldinode = old_dentry->d_inode->i_ino;
59213 + dev_t olddev = __get_dev(old_dentry);
59214 +
59215 + /* vfs_rename swaps the name and parent link for old_dentry and
59216 + new_dentry
59217 + at this point, old_dentry has the new name, parent link, and inode
59218 + for the renamed file
59219 + if a file is being replaced by a rename, new_dentry has the inode
59220 + and name for the replaced file
59221 + */
59222 +
59223 + if (unlikely(!(gr_status & GR_READY)))
59224 + return;
59225 +
59226 + preempt_disable();
59227 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
59228 +
59229 + /* we wouldn't have to check d_inode if it weren't for
59230 + NFS silly-renaming
59231 + */
59232 +
59233 + write_lock(&gr_inode_lock);
59234 + if (unlikely(replace && inode)) {
59235 + ino_t newinode = inode->i_ino;
59236 + dev_t newdev = __get_dev(new_dentry);
59237 + inodev = lookup_inodev_entry(newinode, newdev);
59238 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
59239 + do_handle_delete(inodev, newinode, newdev);
59240 + }
59241 +
59242 + inodev = lookup_inodev_entry(oldinode, olddev);
59243 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
59244 + do_handle_delete(inodev, oldinode, olddev);
59245 +
59246 + if (unlikely((unsigned long)matchn))
59247 + do_handle_create(matchn, old_dentry, mnt);
59248 +
59249 + write_unlock(&gr_inode_lock);
59250 + preempt_enable();
59251 +
59252 + return;
59253 +}
59254 +
59255 +static int
59256 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
59257 + unsigned char **sum)
59258 +{
59259 + struct acl_role_label *r;
59260 + struct role_allowed_ip *ipp;
59261 + struct role_transition *trans;
59262 + unsigned int i;
59263 + int found = 0;
59264 + u32 curr_ip = current->signal->curr_ip;
59265 +
59266 + current->signal->saved_ip = curr_ip;
59267 +
59268 + /* check transition table */
59269 +
59270 + for (trans = current->role->transitions; trans; trans = trans->next) {
59271 + if (!strcmp(rolename, trans->rolename)) {
59272 + found = 1;
59273 + break;
59274 + }
59275 + }
59276 +
59277 + if (!found)
59278 + return 0;
59279 +
59280 + /* handle special roles that do not require authentication
59281 + and check ip */
59282 +
59283 + FOR_EACH_ROLE_START(r)
59284 + if (!strcmp(rolename, r->rolename) &&
59285 + (r->roletype & GR_ROLE_SPECIAL)) {
59286 + found = 0;
59287 + if (r->allowed_ips != NULL) {
59288 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
59289 + if ((ntohl(curr_ip) & ipp->netmask) ==
59290 + (ntohl(ipp->addr) & ipp->netmask))
59291 + found = 1;
59292 + }
59293 + } else
59294 + found = 2;
59295 + if (!found)
59296 + return 0;
59297 +
59298 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
59299 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
59300 + *salt = NULL;
59301 + *sum = NULL;
59302 + return 1;
59303 + }
59304 + }
59305 + FOR_EACH_ROLE_END(r)
59306 +
59307 + for (i = 0; i < num_sprole_pws; i++) {
59308 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
59309 + *salt = acl_special_roles[i]->salt;
59310 + *sum = acl_special_roles[i]->sum;
59311 + return 1;
59312 + }
59313 + }
59314 +
59315 + return 0;
59316 +}
59317 +
59318 +static void
59319 +assign_special_role(char *rolename)
59320 +{
59321 + struct acl_object_label *obj;
59322 + struct acl_role_label *r;
59323 + struct acl_role_label *assigned = NULL;
59324 + struct task_struct *tsk;
59325 + struct file *filp;
59326 +
59327 + FOR_EACH_ROLE_START(r)
59328 + if (!strcmp(rolename, r->rolename) &&
59329 + (r->roletype & GR_ROLE_SPECIAL)) {
59330 + assigned = r;
59331 + break;
59332 + }
59333 + FOR_EACH_ROLE_END(r)
59334 +
59335 + if (!assigned)
59336 + return;
59337 +
59338 + read_lock(&tasklist_lock);
59339 + read_lock(&grsec_exec_file_lock);
59340 +
59341 + tsk = current->real_parent;
59342 + if (tsk == NULL)
59343 + goto out_unlock;
59344 +
59345 + filp = tsk->exec_file;
59346 + if (filp == NULL)
59347 + goto out_unlock;
59348 +
59349 + tsk->is_writable = 0;
59350 +
59351 + tsk->acl_sp_role = 1;
59352 + tsk->acl_role_id = ++acl_sp_role_value;
59353 + tsk->role = assigned;
59354 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
59355 +
59356 + /* ignore additional mmap checks for processes that are writable
59357 + by the default ACL */
59358 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59359 + if (unlikely(obj->mode & GR_WRITE))
59360 + tsk->is_writable = 1;
59361 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
59362 + if (unlikely(obj->mode & GR_WRITE))
59363 + tsk->is_writable = 1;
59364 +
59365 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59366 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
59367 +#endif
59368 +
59369 +out_unlock:
59370 + read_unlock(&grsec_exec_file_lock);
59371 + read_unlock(&tasklist_lock);
59372 + return;
59373 +}
59374 +
59375 +int gr_check_secure_terminal(struct task_struct *task)
59376 +{
59377 + struct task_struct *p, *p2, *p3;
59378 + struct files_struct *files;
59379 + struct fdtable *fdt;
59380 + struct file *our_file = NULL, *file;
59381 + int i;
59382 +
59383 + if (task->signal->tty == NULL)
59384 + return 1;
59385 +
59386 + files = get_files_struct(task);
59387 + if (files != NULL) {
59388 + rcu_read_lock();
59389 + fdt = files_fdtable(files);
59390 + for (i=0; i < fdt->max_fds; i++) {
59391 + file = fcheck_files(files, i);
59392 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
59393 + get_file(file);
59394 + our_file = file;
59395 + }
59396 + }
59397 + rcu_read_unlock();
59398 + put_files_struct(files);
59399 + }
59400 +
59401 + if (our_file == NULL)
59402 + return 1;
59403 +
59404 + read_lock(&tasklist_lock);
59405 + do_each_thread(p2, p) {
59406 + files = get_files_struct(p);
59407 + if (files == NULL ||
59408 + (p->signal && p->signal->tty == task->signal->tty)) {
59409 + if (files != NULL)
59410 + put_files_struct(files);
59411 + continue;
59412 + }
59413 + rcu_read_lock();
59414 + fdt = files_fdtable(files);
59415 + for (i=0; i < fdt->max_fds; i++) {
59416 + file = fcheck_files(files, i);
59417 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
59418 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
59419 + p3 = task;
59420 + while (p3->pid > 0) {
59421 + if (p3 == p)
59422 + break;
59423 + p3 = p3->real_parent;
59424 + }
59425 + if (p3 == p)
59426 + break;
59427 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
59428 + gr_handle_alertkill(p);
59429 + rcu_read_unlock();
59430 + put_files_struct(files);
59431 + read_unlock(&tasklist_lock);
59432 + fput(our_file);
59433 + return 0;
59434 + }
59435 + }
59436 + rcu_read_unlock();
59437 + put_files_struct(files);
59438 + } while_each_thread(p2, p);
59439 + read_unlock(&tasklist_lock);
59440 +
59441 + fput(our_file);
59442 + return 1;
59443 +}
59444 +
59445 +ssize_t
59446 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
59447 +{
59448 + struct gr_arg_wrapper uwrap;
59449 + unsigned char *sprole_salt = NULL;
59450 + unsigned char *sprole_sum = NULL;
59451 + int error = sizeof (struct gr_arg_wrapper);
59452 + int error2 = 0;
59453 +
59454 + mutex_lock(&gr_dev_mutex);
59455 +
59456 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
59457 + error = -EPERM;
59458 + goto out;
59459 + }
59460 +
59461 + if (count != sizeof (struct gr_arg_wrapper)) {
59462 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
59463 + error = -EINVAL;
59464 + goto out;
59465 + }
59466 +
59467 +
59468 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
59469 + gr_auth_expires = 0;
59470 + gr_auth_attempts = 0;
59471 + }
59472 +
59473 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
59474 + error = -EFAULT;
59475 + goto out;
59476 + }
59477 +
59478 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
59479 + error = -EINVAL;
59480 + goto out;
59481 + }
59482 +
59483 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
59484 + error = -EFAULT;
59485 + goto out;
59486 + }
59487 +
59488 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59489 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59490 + time_after(gr_auth_expires, get_seconds())) {
59491 + error = -EBUSY;
59492 + goto out;
59493 + }
59494 +
59495 + /* if non-root trying to do anything other than use a special role,
59496 + do not attempt authentication, do not count towards authentication
59497 + locking
59498 + */
59499 +
59500 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
59501 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59502 + current_uid()) {
59503 + error = -EPERM;
59504 + goto out;
59505 + }
59506 +
59507 + /* ensure pw and special role name are null terminated */
59508 +
59509 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
59510 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
59511 +
59512 + /* Okay.
59513 + * We have our enough of the argument structure..(we have yet
59514 + * to copy_from_user the tables themselves) . Copy the tables
59515 + * only if we need them, i.e. for loading operations. */
59516 +
59517 + switch (gr_usermode->mode) {
59518 + case GR_STATUS:
59519 + if (gr_status & GR_READY) {
59520 + error = 1;
59521 + if (!gr_check_secure_terminal(current))
59522 + error = 3;
59523 + } else
59524 + error = 2;
59525 + goto out;
59526 + case GR_SHUTDOWN:
59527 + if ((gr_status & GR_READY)
59528 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59529 + pax_open_kernel();
59530 + gr_status &= ~GR_READY;
59531 + pax_close_kernel();
59532 +
59533 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
59534 + free_variables();
59535 + memset(gr_usermode, 0, sizeof (struct gr_arg));
59536 + memset(gr_system_salt, 0, GR_SALT_LEN);
59537 + memset(gr_system_sum, 0, GR_SHA_LEN);
59538 + } else if (gr_status & GR_READY) {
59539 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
59540 + error = -EPERM;
59541 + } else {
59542 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
59543 + error = -EAGAIN;
59544 + }
59545 + break;
59546 + case GR_ENABLE:
59547 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
59548 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
59549 + else {
59550 + if (gr_status & GR_READY)
59551 + error = -EAGAIN;
59552 + else
59553 + error = error2;
59554 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
59555 + }
59556 + break;
59557 + case GR_RELOAD:
59558 + if (!(gr_status & GR_READY)) {
59559 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
59560 + error = -EAGAIN;
59561 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59562 + lock_kernel();
59563 +
59564 + pax_open_kernel();
59565 + gr_status &= ~GR_READY;
59566 + pax_close_kernel();
59567 +
59568 + free_variables();
59569 + if (!(error2 = gracl_init(gr_usermode))) {
59570 + unlock_kernel();
59571 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
59572 + } else {
59573 + unlock_kernel();
59574 + error = error2;
59575 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59576 + }
59577 + } else {
59578 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59579 + error = -EPERM;
59580 + }
59581 + break;
59582 + case GR_SEGVMOD:
59583 + if (unlikely(!(gr_status & GR_READY))) {
59584 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
59585 + error = -EAGAIN;
59586 + break;
59587 + }
59588 +
59589 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59590 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
59591 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
59592 + struct acl_subject_label *segvacl;
59593 + segvacl =
59594 + lookup_acl_subj_label(gr_usermode->segv_inode,
59595 + gr_usermode->segv_device,
59596 + current->role);
59597 + if (segvacl) {
59598 + segvacl->crashes = 0;
59599 + segvacl->expires = 0;
59600 + }
59601 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
59602 + gr_remove_uid(gr_usermode->segv_uid);
59603 + }
59604 + } else {
59605 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
59606 + error = -EPERM;
59607 + }
59608 + break;
59609 + case GR_SPROLE:
59610 + case GR_SPROLEPAM:
59611 + if (unlikely(!(gr_status & GR_READY))) {
59612 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
59613 + error = -EAGAIN;
59614 + break;
59615 + }
59616 +
59617 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
59618 + current->role->expires = 0;
59619 + current->role->auth_attempts = 0;
59620 + }
59621 +
59622 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59623 + time_after(current->role->expires, get_seconds())) {
59624 + error = -EBUSY;
59625 + goto out;
59626 + }
59627 +
59628 + if (lookup_special_role_auth
59629 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
59630 + && ((!sprole_salt && !sprole_sum)
59631 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
59632 + char *p = "";
59633 + assign_special_role(gr_usermode->sp_role);
59634 + read_lock(&tasklist_lock);
59635 + if (current->real_parent)
59636 + p = current->real_parent->role->rolename;
59637 + read_unlock(&tasklist_lock);
59638 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
59639 + p, acl_sp_role_value);
59640 + } else {
59641 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
59642 + error = -EPERM;
59643 + if(!(current->role->auth_attempts++))
59644 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59645 +
59646 + goto out;
59647 + }
59648 + break;
59649 + case GR_UNSPROLE:
59650 + if (unlikely(!(gr_status & GR_READY))) {
59651 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
59652 + error = -EAGAIN;
59653 + break;
59654 + }
59655 +
59656 + if (current->role->roletype & GR_ROLE_SPECIAL) {
59657 + char *p = "";
59658 + int i = 0;
59659 +
59660 + read_lock(&tasklist_lock);
59661 + if (current->real_parent) {
59662 + p = current->real_parent->role->rolename;
59663 + i = current->real_parent->acl_role_id;
59664 + }
59665 + read_unlock(&tasklist_lock);
59666 +
59667 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
59668 + gr_set_acls(1);
59669 + } else {
59670 + error = -EPERM;
59671 + goto out;
59672 + }
59673 + break;
59674 + default:
59675 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
59676 + error = -EINVAL;
59677 + break;
59678 + }
59679 +
59680 + if (error != -EPERM)
59681 + goto out;
59682 +
59683 + if(!(gr_auth_attempts++))
59684 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59685 +
59686 + out:
59687 + mutex_unlock(&gr_dev_mutex);
59688 + return error;
59689 +}
59690 +
59691 +/* must be called with
59692 + rcu_read_lock();
59693 + read_lock(&tasklist_lock);
59694 + read_lock(&grsec_exec_file_lock);
59695 +*/
59696 +int gr_apply_subject_to_task(struct task_struct *task)
59697 +{
59698 + struct acl_object_label *obj;
59699 + char *tmpname;
59700 + struct acl_subject_label *tmpsubj;
59701 + struct file *filp;
59702 + struct name_entry *nmatch;
59703 +
59704 + filp = task->exec_file;
59705 + if (filp == NULL)
59706 + return 0;
59707 +
59708 + /* the following is to apply the correct subject
59709 + on binaries running when the RBAC system
59710 + is enabled, when the binaries have been
59711 + replaced or deleted since their execution
59712 + -----
59713 + when the RBAC system starts, the inode/dev
59714 + from exec_file will be one the RBAC system
59715 + is unaware of. It only knows the inode/dev
59716 + of the present file on disk, or the absence
59717 + of it.
59718 + */
59719 + preempt_disable();
59720 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
59721 +
59722 + nmatch = lookup_name_entry(tmpname);
59723 + preempt_enable();
59724 + tmpsubj = NULL;
59725 + if (nmatch) {
59726 + if (nmatch->deleted)
59727 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
59728 + else
59729 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
59730 + if (tmpsubj != NULL)
59731 + task->acl = tmpsubj;
59732 + }
59733 + if (tmpsubj == NULL)
59734 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
59735 + task->role);
59736 + if (task->acl) {
59737 + task->is_writable = 0;
59738 + /* ignore additional mmap checks for processes that are writable
59739 + by the default ACL */
59740 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59741 + if (unlikely(obj->mode & GR_WRITE))
59742 + task->is_writable = 1;
59743 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
59744 + if (unlikely(obj->mode & GR_WRITE))
59745 + task->is_writable = 1;
59746 +
59747 + gr_set_proc_res(task);
59748 +
59749 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59750 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
59751 +#endif
59752 + } else {
59753 + return 1;
59754 + }
59755 +
59756 + return 0;
59757 +}
59758 +
59759 +int
59760 +gr_set_acls(const int type)
59761 +{
59762 + struct task_struct *task, *task2;
59763 + struct acl_role_label *role = current->role;
59764 + __u16 acl_role_id = current->acl_role_id;
59765 + const struct cred *cred;
59766 + int ret;
59767 +
59768 + rcu_read_lock();
59769 + read_lock(&tasklist_lock);
59770 + read_lock(&grsec_exec_file_lock);
59771 + do_each_thread(task2, task) {
59772 + /* check to see if we're called from the exit handler,
59773 + if so, only replace ACLs that have inherited the admin
59774 + ACL */
59775 +
59776 + if (type && (task->role != role ||
59777 + task->acl_role_id != acl_role_id))
59778 + continue;
59779 +
59780 + task->acl_role_id = 0;
59781 + task->acl_sp_role = 0;
59782 +
59783 + if (task->exec_file) {
59784 + cred = __task_cred(task);
59785 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
59786 +
59787 + ret = gr_apply_subject_to_task(task);
59788 + if (ret) {
59789 + read_unlock(&grsec_exec_file_lock);
59790 + read_unlock(&tasklist_lock);
59791 + rcu_read_unlock();
59792 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
59793 + return ret;
59794 + }
59795 + } else {
59796 + // it's a kernel process
59797 + task->role = kernel_role;
59798 + task->acl = kernel_role->root_label;
59799 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
59800 + task->acl->mode &= ~GR_PROCFIND;
59801 +#endif
59802 + }
59803 + } while_each_thread(task2, task);
59804 + read_unlock(&grsec_exec_file_lock);
59805 + read_unlock(&tasklist_lock);
59806 + rcu_read_unlock();
59807 +
59808 + return 0;
59809 +}
59810 +
59811 +void
59812 +gr_learn_resource(const struct task_struct *task,
59813 + const int res, const unsigned long wanted, const int gt)
59814 +{
59815 + struct acl_subject_label *acl;
59816 + const struct cred *cred;
59817 +
59818 + if (unlikely((gr_status & GR_READY) &&
59819 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
59820 + goto skip_reslog;
59821 +
59822 +#ifdef CONFIG_GRKERNSEC_RESLOG
59823 + gr_log_resource(task, res, wanted, gt);
59824 +#endif
59825 + skip_reslog:
59826 +
59827 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
59828 + return;
59829 +
59830 + acl = task->acl;
59831 +
59832 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
59833 + !(acl->resmask & (1 << (unsigned short) res))))
59834 + return;
59835 +
59836 + if (wanted >= acl->res[res].rlim_cur) {
59837 + unsigned long res_add;
59838 +
59839 + res_add = wanted;
59840 + switch (res) {
59841 + case RLIMIT_CPU:
59842 + res_add += GR_RLIM_CPU_BUMP;
59843 + break;
59844 + case RLIMIT_FSIZE:
59845 + res_add += GR_RLIM_FSIZE_BUMP;
59846 + break;
59847 + case RLIMIT_DATA:
59848 + res_add += GR_RLIM_DATA_BUMP;
59849 + break;
59850 + case RLIMIT_STACK:
59851 + res_add += GR_RLIM_STACK_BUMP;
59852 + break;
59853 + case RLIMIT_CORE:
59854 + res_add += GR_RLIM_CORE_BUMP;
59855 + break;
59856 + case RLIMIT_RSS:
59857 + res_add += GR_RLIM_RSS_BUMP;
59858 + break;
59859 + case RLIMIT_NPROC:
59860 + res_add += GR_RLIM_NPROC_BUMP;
59861 + break;
59862 + case RLIMIT_NOFILE:
59863 + res_add += GR_RLIM_NOFILE_BUMP;
59864 + break;
59865 + case RLIMIT_MEMLOCK:
59866 + res_add += GR_RLIM_MEMLOCK_BUMP;
59867 + break;
59868 + case RLIMIT_AS:
59869 + res_add += GR_RLIM_AS_BUMP;
59870 + break;
59871 + case RLIMIT_LOCKS:
59872 + res_add += GR_RLIM_LOCKS_BUMP;
59873 + break;
59874 + case RLIMIT_SIGPENDING:
59875 + res_add += GR_RLIM_SIGPENDING_BUMP;
59876 + break;
59877 + case RLIMIT_MSGQUEUE:
59878 + res_add += GR_RLIM_MSGQUEUE_BUMP;
59879 + break;
59880 + case RLIMIT_NICE:
59881 + res_add += GR_RLIM_NICE_BUMP;
59882 + break;
59883 + case RLIMIT_RTPRIO:
59884 + res_add += GR_RLIM_RTPRIO_BUMP;
59885 + break;
59886 + case RLIMIT_RTTIME:
59887 + res_add += GR_RLIM_RTTIME_BUMP;
59888 + break;
59889 + }
59890 +
59891 + acl->res[res].rlim_cur = res_add;
59892 +
59893 + if (wanted > acl->res[res].rlim_max)
59894 + acl->res[res].rlim_max = res_add;
59895 +
59896 + /* only log the subject filename, since resource logging is supported for
59897 + single-subject learning only */
59898 + rcu_read_lock();
59899 + cred = __task_cred(task);
59900 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
59901 + task->role->roletype, cred->uid, cred->gid, acl->filename,
59902 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
59903 + "", (unsigned long) res, &task->signal->saved_ip);
59904 + rcu_read_unlock();
59905 + }
59906 +
59907 + return;
59908 +}
59909 +
59910 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
59911 +void
59912 +pax_set_initial_flags(struct linux_binprm *bprm)
59913 +{
59914 + struct task_struct *task = current;
59915 + struct acl_subject_label *proc;
59916 + unsigned long flags;
59917 +
59918 + if (unlikely(!(gr_status & GR_READY)))
59919 + return;
59920 +
59921 + flags = pax_get_flags(task);
59922 +
59923 + proc = task->acl;
59924 +
59925 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
59926 + flags &= ~MF_PAX_PAGEEXEC;
59927 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
59928 + flags &= ~MF_PAX_SEGMEXEC;
59929 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
59930 + flags &= ~MF_PAX_RANDMMAP;
59931 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
59932 + flags &= ~MF_PAX_EMUTRAMP;
59933 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
59934 + flags &= ~MF_PAX_MPROTECT;
59935 +
59936 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
59937 + flags |= MF_PAX_PAGEEXEC;
59938 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
59939 + flags |= MF_PAX_SEGMEXEC;
59940 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
59941 + flags |= MF_PAX_RANDMMAP;
59942 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
59943 + flags |= MF_PAX_EMUTRAMP;
59944 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
59945 + flags |= MF_PAX_MPROTECT;
59946 +
59947 + pax_set_flags(task, flags);
59948 +
59949 + return;
59950 +}
59951 +#endif
59952 +
59953 +#ifdef CONFIG_SYSCTL
59954 +/* Eric Biederman likes breaking userland ABI and every inode-based security
59955 + system to save 35kb of memory */
59956 +
59957 +/* we modify the passed in filename, but adjust it back before returning */
59958 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
59959 +{
59960 + struct name_entry *nmatch;
59961 + char *p, *lastp = NULL;
59962 + struct acl_object_label *obj = NULL, *tmp;
59963 + struct acl_subject_label *tmpsubj;
59964 + char c = '\0';
59965 +
59966 + read_lock(&gr_inode_lock);
59967 +
59968 + p = name + len - 1;
59969 + do {
59970 + nmatch = lookup_name_entry(name);
59971 + if (lastp != NULL)
59972 + *lastp = c;
59973 +
59974 + if (nmatch == NULL)
59975 + goto next_component;
59976 + tmpsubj = current->acl;
59977 + do {
59978 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
59979 + if (obj != NULL) {
59980 + tmp = obj->globbed;
59981 + while (tmp) {
59982 + if (!glob_match(tmp->filename, name)) {
59983 + obj = tmp;
59984 + goto found_obj;
59985 + }
59986 + tmp = tmp->next;
59987 + }
59988 + goto found_obj;
59989 + }
59990 + } while ((tmpsubj = tmpsubj->parent_subject));
59991 +next_component:
59992 + /* end case */
59993 + if (p == name)
59994 + break;
59995 +
59996 + while (*p != '/')
59997 + p--;
59998 + if (p == name)
59999 + lastp = p + 1;
60000 + else {
60001 + lastp = p;
60002 + p--;
60003 + }
60004 + c = *lastp;
60005 + *lastp = '\0';
60006 + } while (1);
60007 +found_obj:
60008 + read_unlock(&gr_inode_lock);
60009 + /* obj returned will always be non-null */
60010 + return obj;
60011 +}
60012 +
60013 +/* returns 0 when allowing, non-zero on error
60014 + op of 0 is used for readdir, so we don't log the names of hidden files
60015 +*/
60016 +__u32
60017 +gr_handle_sysctl(const struct ctl_table *table, const int op)
60018 +{
60019 + ctl_table *tmp;
60020 + const char *proc_sys = "/proc/sys";
60021 + char *path;
60022 + struct acl_object_label *obj;
60023 + unsigned short len = 0, pos = 0, depth = 0, i;
60024 + __u32 err = 0;
60025 + __u32 mode = 0;
60026 +
60027 + if (unlikely(!(gr_status & GR_READY)))
60028 + return 0;
60029 +
60030 + /* for now, ignore operations on non-sysctl entries if it's not a
60031 + readdir*/
60032 + if (table->child != NULL && op != 0)
60033 + return 0;
60034 +
60035 + mode |= GR_FIND;
60036 + /* it's only a read if it's an entry, read on dirs is for readdir */
60037 + if (op & MAY_READ)
60038 + mode |= GR_READ;
60039 + if (op & MAY_WRITE)
60040 + mode |= GR_WRITE;
60041 +
60042 + preempt_disable();
60043 +
60044 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
60045 +
60046 + /* it's only a read/write if it's an actual entry, not a dir
60047 + (which are opened for readdir)
60048 + */
60049 +
60050 + /* convert the requested sysctl entry into a pathname */
60051 +
60052 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
60053 + len += strlen(tmp->procname);
60054 + len++;
60055 + depth++;
60056 + }
60057 +
60058 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
60059 + /* deny */
60060 + goto out;
60061 + }
60062 +
60063 + memset(path, 0, PAGE_SIZE);
60064 +
60065 + memcpy(path, proc_sys, strlen(proc_sys));
60066 +
60067 + pos += strlen(proc_sys);
60068 +
60069 + for (; depth > 0; depth--) {
60070 + path[pos] = '/';
60071 + pos++;
60072 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
60073 + if (depth == i) {
60074 + memcpy(path + pos, tmp->procname,
60075 + strlen(tmp->procname));
60076 + pos += strlen(tmp->procname);
60077 + }
60078 + i++;
60079 + }
60080 + }
60081 +
60082 + obj = gr_lookup_by_name(path, pos);
60083 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
60084 +
60085 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
60086 + ((err & mode) != mode))) {
60087 + __u32 new_mode = mode;
60088 +
60089 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
60090 +
60091 + err = 0;
60092 + gr_log_learn_sysctl(path, new_mode);
60093 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
60094 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
60095 + err = -ENOENT;
60096 + } else if (!(err & GR_FIND)) {
60097 + err = -ENOENT;
60098 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
60099 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
60100 + path, (mode & GR_READ) ? " reading" : "",
60101 + (mode & GR_WRITE) ? " writing" : "");
60102 + err = -EACCES;
60103 + } else if ((err & mode) != mode) {
60104 + err = -EACCES;
60105 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
60106 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
60107 + path, (mode & GR_READ) ? " reading" : "",
60108 + (mode & GR_WRITE) ? " writing" : "");
60109 + err = 0;
60110 + } else
60111 + err = 0;
60112 +
60113 + out:
60114 + preempt_enable();
60115 +
60116 + return err;
60117 +}
60118 +#endif
60119 +
60120 +int
60121 +gr_handle_proc_ptrace(struct task_struct *task)
60122 +{
60123 + struct file *filp;
60124 + struct task_struct *tmp = task;
60125 + struct task_struct *curtemp = current;
60126 + __u32 retmode;
60127 +
60128 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60129 + if (unlikely(!(gr_status & GR_READY)))
60130 + return 0;
60131 +#endif
60132 +
60133 + read_lock(&tasklist_lock);
60134 + read_lock(&grsec_exec_file_lock);
60135 + filp = task->exec_file;
60136 +
60137 + while (tmp->pid > 0) {
60138 + if (tmp == curtemp)
60139 + break;
60140 + tmp = tmp->real_parent;
60141 + }
60142 +
60143 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
60144 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
60145 + read_unlock(&grsec_exec_file_lock);
60146 + read_unlock(&tasklist_lock);
60147 + return 1;
60148 + }
60149 +
60150 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60151 + if (!(gr_status & GR_READY)) {
60152 + read_unlock(&grsec_exec_file_lock);
60153 + read_unlock(&tasklist_lock);
60154 + return 0;
60155 + }
60156 +#endif
60157 +
60158 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
60159 + read_unlock(&grsec_exec_file_lock);
60160 + read_unlock(&tasklist_lock);
60161 +
60162 + if (retmode & GR_NOPTRACE)
60163 + return 1;
60164 +
60165 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
60166 + && (current->acl != task->acl || (current->acl != current->role->root_label
60167 + && current->pid != task->pid)))
60168 + return 1;
60169 +
60170 + return 0;
60171 +}
60172 +
60173 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
60174 +{
60175 + if (unlikely(!(gr_status & GR_READY)))
60176 + return;
60177 +
60178 + if (!(current->role->roletype & GR_ROLE_GOD))
60179 + return;
60180 +
60181 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
60182 + p->role->rolename, gr_task_roletype_to_char(p),
60183 + p->acl->filename);
60184 +}
60185 +
60186 +int
60187 +gr_handle_ptrace(struct task_struct *task, const long request)
60188 +{
60189 + struct task_struct *tmp = task;
60190 + struct task_struct *curtemp = current;
60191 + __u32 retmode;
60192 +
60193 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60194 + if (unlikely(!(gr_status & GR_READY)))
60195 + return 0;
60196 +#endif
60197 +
60198 + read_lock(&tasklist_lock);
60199 + while (tmp->pid > 0) {
60200 + if (tmp == curtemp)
60201 + break;
60202 + tmp = tmp->real_parent;
60203 + }
60204 +
60205 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
60206 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
60207 + read_unlock(&tasklist_lock);
60208 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60209 + return 1;
60210 + }
60211 + read_unlock(&tasklist_lock);
60212 +
60213 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60214 + if (!(gr_status & GR_READY))
60215 + return 0;
60216 +#endif
60217 +
60218 + read_lock(&grsec_exec_file_lock);
60219 + if (unlikely(!task->exec_file)) {
60220 + read_unlock(&grsec_exec_file_lock);
60221 + return 0;
60222 + }
60223 +
60224 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
60225 + read_unlock(&grsec_exec_file_lock);
60226 +
60227 + if (retmode & GR_NOPTRACE) {
60228 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60229 + return 1;
60230 + }
60231 +
60232 + if (retmode & GR_PTRACERD) {
60233 + switch (request) {
60234 + case PTRACE_POKETEXT:
60235 + case PTRACE_POKEDATA:
60236 + case PTRACE_POKEUSR:
60237 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
60238 + case PTRACE_SETREGS:
60239 + case PTRACE_SETFPREGS:
60240 +#endif
60241 +#ifdef CONFIG_X86
60242 + case PTRACE_SETFPXREGS:
60243 +#endif
60244 +#ifdef CONFIG_ALTIVEC
60245 + case PTRACE_SETVRREGS:
60246 +#endif
60247 + return 1;
60248 + default:
60249 + return 0;
60250 + }
60251 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
60252 + !(current->role->roletype & GR_ROLE_GOD) &&
60253 + (current->acl != task->acl)) {
60254 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60255 + return 1;
60256 + }
60257 +
60258 + return 0;
60259 +}
60260 +
60261 +static int is_writable_mmap(const struct file *filp)
60262 +{
60263 + struct task_struct *task = current;
60264 + struct acl_object_label *obj, *obj2;
60265 +
60266 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
60267 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
60268 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60269 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
60270 + task->role->root_label);
60271 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
60272 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
60273 + return 1;
60274 + }
60275 + }
60276 + return 0;
60277 +}
60278 +
60279 +int
60280 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
60281 +{
60282 + __u32 mode;
60283 +
60284 + if (unlikely(!file || !(prot & PROT_EXEC)))
60285 + return 1;
60286 +
60287 + if (is_writable_mmap(file))
60288 + return 0;
60289 +
60290 + mode =
60291 + gr_search_file(file->f_path.dentry,
60292 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60293 + file->f_path.mnt);
60294 +
60295 + if (!gr_tpe_allow(file))
60296 + return 0;
60297 +
60298 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60299 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60300 + return 0;
60301 + } else if (unlikely(!(mode & GR_EXEC))) {
60302 + return 0;
60303 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60304 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60305 + return 1;
60306 + }
60307 +
60308 + return 1;
60309 +}
60310 +
60311 +int
60312 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
60313 +{
60314 + __u32 mode;
60315 +
60316 + if (unlikely(!file || !(prot & PROT_EXEC)))
60317 + return 1;
60318 +
60319 + if (is_writable_mmap(file))
60320 + return 0;
60321 +
60322 + mode =
60323 + gr_search_file(file->f_path.dentry,
60324 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60325 + file->f_path.mnt);
60326 +
60327 + if (!gr_tpe_allow(file))
60328 + return 0;
60329 +
60330 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60331 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60332 + return 0;
60333 + } else if (unlikely(!(mode & GR_EXEC))) {
60334 + return 0;
60335 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60336 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60337 + return 1;
60338 + }
60339 +
60340 + return 1;
60341 +}
60342 +
60343 +void
60344 +gr_acl_handle_psacct(struct task_struct *task, const long code)
60345 +{
60346 + unsigned long runtime;
60347 + unsigned long cputime;
60348 + unsigned int wday, cday;
60349 + __u8 whr, chr;
60350 + __u8 wmin, cmin;
60351 + __u8 wsec, csec;
60352 + struct timespec timeval;
60353 +
60354 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
60355 + !(task->acl->mode & GR_PROCACCT)))
60356 + return;
60357 +
60358 + do_posix_clock_monotonic_gettime(&timeval);
60359 + runtime = timeval.tv_sec - task->start_time.tv_sec;
60360 + wday = runtime / (3600 * 24);
60361 + runtime -= wday * (3600 * 24);
60362 + whr = runtime / 3600;
60363 + runtime -= whr * 3600;
60364 + wmin = runtime / 60;
60365 + runtime -= wmin * 60;
60366 + wsec = runtime;
60367 +
60368 + cputime = (task->utime + task->stime) / HZ;
60369 + cday = cputime / (3600 * 24);
60370 + cputime -= cday * (3600 * 24);
60371 + chr = cputime / 3600;
60372 + cputime -= chr * 3600;
60373 + cmin = cputime / 60;
60374 + cputime -= cmin * 60;
60375 + csec = cputime;
60376 +
60377 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
60378 +
60379 + return;
60380 +}
60381 +
60382 +void gr_set_kernel_label(struct task_struct *task)
60383 +{
60384 + if (gr_status & GR_READY) {
60385 + task->role = kernel_role;
60386 + task->acl = kernel_role->root_label;
60387 + }
60388 + return;
60389 +}
60390 +
60391 +#ifdef CONFIG_TASKSTATS
60392 +int gr_is_taskstats_denied(int pid)
60393 +{
60394 + struct task_struct *task;
60395 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60396 + const struct cred *cred;
60397 +#endif
60398 + int ret = 0;
60399 +
60400 + /* restrict taskstats viewing to un-chrooted root users
60401 + who have the 'view' subject flag if the RBAC system is enabled
60402 + */
60403 +
60404 + rcu_read_lock();
60405 + read_lock(&tasklist_lock);
60406 + task = find_task_by_vpid(pid);
60407 + if (task) {
60408 +#ifdef CONFIG_GRKERNSEC_CHROOT
60409 + if (proc_is_chrooted(task))
60410 + ret = -EACCES;
60411 +#endif
60412 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60413 + cred = __task_cred(task);
60414 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60415 + if (cred->uid != 0)
60416 + ret = -EACCES;
60417 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60418 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
60419 + ret = -EACCES;
60420 +#endif
60421 +#endif
60422 + if (gr_status & GR_READY) {
60423 + if (!(task->acl->mode & GR_VIEW))
60424 + ret = -EACCES;
60425 + }
60426 + } else
60427 + ret = -ENOENT;
60428 +
60429 + read_unlock(&tasklist_lock);
60430 + rcu_read_unlock();
60431 +
60432 + return ret;
60433 +}
60434 +#endif
60435 +
60436 +/* AUXV entries are filled via a descendant of search_binary_handler
60437 + after we've already applied the subject for the target
60438 +*/
60439 +int gr_acl_enable_at_secure(void)
60440 +{
60441 + if (unlikely(!(gr_status & GR_READY)))
60442 + return 0;
60443 +
60444 + if (current->acl->mode & GR_ATSECURE)
60445 + return 1;
60446 +
60447 + return 0;
60448 +}
60449 +
60450 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
60451 +{
60452 + struct task_struct *task = current;
60453 + struct dentry *dentry = file->f_path.dentry;
60454 + struct vfsmount *mnt = file->f_path.mnt;
60455 + struct acl_object_label *obj, *tmp;
60456 + struct acl_subject_label *subj;
60457 + unsigned int bufsize;
60458 + int is_not_root;
60459 + char *path;
60460 + dev_t dev = __get_dev(dentry);
60461 +
60462 + if (unlikely(!(gr_status & GR_READY)))
60463 + return 1;
60464 +
60465 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60466 + return 1;
60467 +
60468 + /* ignore Eric Biederman */
60469 + if (IS_PRIVATE(dentry->d_inode))
60470 + return 1;
60471 +
60472 + subj = task->acl;
60473 + do {
60474 + obj = lookup_acl_obj_label(ino, dev, subj);
60475 + if (obj != NULL)
60476 + return (obj->mode & GR_FIND) ? 1 : 0;
60477 + } while ((subj = subj->parent_subject));
60478 +
60479 + /* this is purely an optimization since we're looking for an object
60480 + for the directory we're doing a readdir on
60481 + if it's possible for any globbed object to match the entry we're
60482 + filling into the directory, then the object we find here will be
60483 + an anchor point with attached globbed objects
60484 + */
60485 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
60486 + if (obj->globbed == NULL)
60487 + return (obj->mode & GR_FIND) ? 1 : 0;
60488 +
60489 + is_not_root = ((obj->filename[0] == '/') &&
60490 + (obj->filename[1] == '\0')) ? 0 : 1;
60491 + bufsize = PAGE_SIZE - namelen - is_not_root;
60492 +
60493 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
60494 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
60495 + return 1;
60496 +
60497 + preempt_disable();
60498 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
60499 + bufsize);
60500 +
60501 + bufsize = strlen(path);
60502 +
60503 + /* if base is "/", don't append an additional slash */
60504 + if (is_not_root)
60505 + *(path + bufsize) = '/';
60506 + memcpy(path + bufsize + is_not_root, name, namelen);
60507 + *(path + bufsize + namelen + is_not_root) = '\0';
60508 +
60509 + tmp = obj->globbed;
60510 + while (tmp) {
60511 + if (!glob_match(tmp->filename, path)) {
60512 + preempt_enable();
60513 + return (tmp->mode & GR_FIND) ? 1 : 0;
60514 + }
60515 + tmp = tmp->next;
60516 + }
60517 + preempt_enable();
60518 + return (obj->mode & GR_FIND) ? 1 : 0;
60519 +}
60520 +
60521 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
60522 +EXPORT_SYMBOL(gr_acl_is_enabled);
60523 +#endif
60524 +EXPORT_SYMBOL(gr_learn_resource);
60525 +EXPORT_SYMBOL(gr_set_kernel_label);
60526 +#ifdef CONFIG_SECURITY
60527 +EXPORT_SYMBOL(gr_check_user_change);
60528 +EXPORT_SYMBOL(gr_check_group_change);
60529 +#endif
60530 +
60531 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
60532 new file mode 100644
60533 index 0000000..34fefda
60534 --- /dev/null
60535 +++ b/grsecurity/gracl_alloc.c
60536 @@ -0,0 +1,105 @@
60537 +#include <linux/kernel.h>
60538 +#include <linux/mm.h>
60539 +#include <linux/slab.h>
60540 +#include <linux/vmalloc.h>
60541 +#include <linux/gracl.h>
60542 +#include <linux/grsecurity.h>
60543 +
60544 +static unsigned long alloc_stack_next = 1;
60545 +static unsigned long alloc_stack_size = 1;
60546 +static void **alloc_stack;
60547 +
60548 +static __inline__ int
60549 +alloc_pop(void)
60550 +{
60551 + if (alloc_stack_next == 1)
60552 + return 0;
60553 +
60554 + kfree(alloc_stack[alloc_stack_next - 2]);
60555 +
60556 + alloc_stack_next--;
60557 +
60558 + return 1;
60559 +}
60560 +
60561 +static __inline__ int
60562 +alloc_push(void *buf)
60563 +{
60564 + if (alloc_stack_next >= alloc_stack_size)
60565 + return 1;
60566 +
60567 + alloc_stack[alloc_stack_next - 1] = buf;
60568 +
60569 + alloc_stack_next++;
60570 +
60571 + return 0;
60572 +}
60573 +
60574 +void *
60575 +acl_alloc(unsigned long len)
60576 +{
60577 + void *ret = NULL;
60578 +
60579 + if (!len || len > PAGE_SIZE)
60580 + goto out;
60581 +
60582 + ret = kmalloc(len, GFP_KERNEL);
60583 +
60584 + if (ret) {
60585 + if (alloc_push(ret)) {
60586 + kfree(ret);
60587 + ret = NULL;
60588 + }
60589 + }
60590 +
60591 +out:
60592 + return ret;
60593 +}
60594 +
60595 +void *
60596 +acl_alloc_num(unsigned long num, unsigned long len)
60597 +{
60598 + if (!len || (num > (PAGE_SIZE / len)))
60599 + return NULL;
60600 +
60601 + return acl_alloc(num * len);
60602 +}
60603 +
60604 +void
60605 +acl_free_all(void)
60606 +{
60607 + if (gr_acl_is_enabled() || !alloc_stack)
60608 + return;
60609 +
60610 + while (alloc_pop()) ;
60611 +
60612 + if (alloc_stack) {
60613 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
60614 + kfree(alloc_stack);
60615 + else
60616 + vfree(alloc_stack);
60617 + }
60618 +
60619 + alloc_stack = NULL;
60620 + alloc_stack_size = 1;
60621 + alloc_stack_next = 1;
60622 +
60623 + return;
60624 +}
60625 +
60626 +int
60627 +acl_alloc_stack_init(unsigned long size)
60628 +{
60629 + if ((size * sizeof (void *)) <= PAGE_SIZE)
60630 + alloc_stack =
60631 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
60632 + else
60633 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
60634 +
60635 + alloc_stack_size = size;
60636 +
60637 + if (!alloc_stack)
60638 + return 0;
60639 + else
60640 + return 1;
60641 +}
60642 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
60643 new file mode 100644
60644 index 0000000..955ddfb
60645 --- /dev/null
60646 +++ b/grsecurity/gracl_cap.c
60647 @@ -0,0 +1,101 @@
60648 +#include <linux/kernel.h>
60649 +#include <linux/module.h>
60650 +#include <linux/sched.h>
60651 +#include <linux/gracl.h>
60652 +#include <linux/grsecurity.h>
60653 +#include <linux/grinternal.h>
60654 +
60655 +extern const char *captab_log[];
60656 +extern int captab_log_entries;
60657 +
60658 +int
60659 +gr_acl_is_capable(const int cap)
60660 +{
60661 + struct task_struct *task = current;
60662 + const struct cred *cred = current_cred();
60663 + struct acl_subject_label *curracl;
60664 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60665 + kernel_cap_t cap_audit = __cap_empty_set;
60666 +
60667 + if (!gr_acl_is_enabled())
60668 + return 1;
60669 +
60670 + curracl = task->acl;
60671 +
60672 + cap_drop = curracl->cap_lower;
60673 + cap_mask = curracl->cap_mask;
60674 + cap_audit = curracl->cap_invert_audit;
60675 +
60676 + while ((curracl = curracl->parent_subject)) {
60677 + /* if the cap isn't specified in the current computed mask but is specified in the
60678 + current level subject, and is lowered in the current level subject, then add
60679 + it to the set of dropped capabilities
60680 + otherwise, add the current level subject's mask to the current computed mask
60681 + */
60682 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60683 + cap_raise(cap_mask, cap);
60684 + if (cap_raised(curracl->cap_lower, cap))
60685 + cap_raise(cap_drop, cap);
60686 + if (cap_raised(curracl->cap_invert_audit, cap))
60687 + cap_raise(cap_audit, cap);
60688 + }
60689 + }
60690 +
60691 + if (!cap_raised(cap_drop, cap)) {
60692 + if (cap_raised(cap_audit, cap))
60693 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
60694 + return 1;
60695 + }
60696 +
60697 + curracl = task->acl;
60698 +
60699 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
60700 + && cap_raised(cred->cap_effective, cap)) {
60701 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60702 + task->role->roletype, cred->uid,
60703 + cred->gid, task->exec_file ?
60704 + gr_to_filename(task->exec_file->f_path.dentry,
60705 + task->exec_file->f_path.mnt) : curracl->filename,
60706 + curracl->filename, 0UL,
60707 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
60708 + return 1;
60709 + }
60710 +
60711 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
60712 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
60713 + return 0;
60714 +}
60715 +
60716 +int
60717 +gr_acl_is_capable_nolog(const int cap)
60718 +{
60719 + struct acl_subject_label *curracl;
60720 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60721 +
60722 + if (!gr_acl_is_enabled())
60723 + return 1;
60724 +
60725 + curracl = current->acl;
60726 +
60727 + cap_drop = curracl->cap_lower;
60728 + cap_mask = curracl->cap_mask;
60729 +
60730 + while ((curracl = curracl->parent_subject)) {
60731 + /* if the cap isn't specified in the current computed mask but is specified in the
60732 + current level subject, and is lowered in the current level subject, then add
60733 + it to the set of dropped capabilities
60734 + otherwise, add the current level subject's mask to the current computed mask
60735 + */
60736 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60737 + cap_raise(cap_mask, cap);
60738 + if (cap_raised(curracl->cap_lower, cap))
60739 + cap_raise(cap_drop, cap);
60740 + }
60741 + }
60742 +
60743 + if (!cap_raised(cap_drop, cap))
60744 + return 1;
60745 +
60746 + return 0;
60747 +}
60748 +
60749 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
60750 new file mode 100644
60751 index 0000000..523e7e8
60752 --- /dev/null
60753 +++ b/grsecurity/gracl_fs.c
60754 @@ -0,0 +1,435 @@
60755 +#include <linux/kernel.h>
60756 +#include <linux/sched.h>
60757 +#include <linux/types.h>
60758 +#include <linux/fs.h>
60759 +#include <linux/file.h>
60760 +#include <linux/stat.h>
60761 +#include <linux/grsecurity.h>
60762 +#include <linux/grinternal.h>
60763 +#include <linux/gracl.h>
60764 +
60765 +umode_t
60766 +gr_acl_umask(void)
60767 +{
60768 + if (unlikely(!gr_acl_is_enabled()))
60769 + return 0;
60770 +
60771 + return current->role->umask;
60772 +}
60773 +
60774 +__u32
60775 +gr_acl_handle_hidden_file(const struct dentry * dentry,
60776 + const struct vfsmount * mnt)
60777 +{
60778 + __u32 mode;
60779 +
60780 + if (unlikely(!dentry->d_inode))
60781 + return GR_FIND;
60782 +
60783 + mode =
60784 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
60785 +
60786 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
60787 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
60788 + return mode;
60789 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
60790 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
60791 + return 0;
60792 + } else if (unlikely(!(mode & GR_FIND)))
60793 + return 0;
60794 +
60795 + return GR_FIND;
60796 +}
60797 +
60798 +__u32
60799 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
60800 + int acc_mode)
60801 +{
60802 + __u32 reqmode = GR_FIND;
60803 + __u32 mode;
60804 +
60805 + if (unlikely(!dentry->d_inode))
60806 + return reqmode;
60807 +
60808 + if (acc_mode & MAY_APPEND)
60809 + reqmode |= GR_APPEND;
60810 + else if (acc_mode & MAY_WRITE)
60811 + reqmode |= GR_WRITE;
60812 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
60813 + reqmode |= GR_READ;
60814 +
60815 + mode =
60816 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
60817 + mnt);
60818 +
60819 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60820 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60821 + reqmode & GR_READ ? " reading" : "",
60822 + reqmode & GR_WRITE ? " writing" : reqmode &
60823 + GR_APPEND ? " appending" : "");
60824 + return reqmode;
60825 + } else
60826 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60827 + {
60828 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60829 + reqmode & GR_READ ? " reading" : "",
60830 + reqmode & GR_WRITE ? " writing" : reqmode &
60831 + GR_APPEND ? " appending" : "");
60832 + return 0;
60833 + } else if (unlikely((mode & reqmode) != reqmode))
60834 + return 0;
60835 +
60836 + return reqmode;
60837 +}
60838 +
60839 +__u32
60840 +gr_acl_handle_creat(const struct dentry * dentry,
60841 + const struct dentry * p_dentry,
60842 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
60843 + const int imode)
60844 +{
60845 + __u32 reqmode = GR_WRITE | GR_CREATE;
60846 + __u32 mode;
60847 +
60848 + if (acc_mode & MAY_APPEND)
60849 + reqmode |= GR_APPEND;
60850 + // if a directory was required or the directory already exists, then
60851 + // don't count this open as a read
60852 + if ((acc_mode & MAY_READ) &&
60853 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
60854 + reqmode |= GR_READ;
60855 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
60856 + reqmode |= GR_SETID;
60857 +
60858 + mode =
60859 + gr_check_create(dentry, p_dentry, p_mnt,
60860 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
60861 +
60862 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60863 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
60864 + reqmode & GR_READ ? " reading" : "",
60865 + reqmode & GR_WRITE ? " writing" : reqmode &
60866 + GR_APPEND ? " appending" : "");
60867 + return reqmode;
60868 + } else
60869 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60870 + {
60871 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
60872 + reqmode & GR_READ ? " reading" : "",
60873 + reqmode & GR_WRITE ? " writing" : reqmode &
60874 + GR_APPEND ? " appending" : "");
60875 + return 0;
60876 + } else if (unlikely((mode & reqmode) != reqmode))
60877 + return 0;
60878 +
60879 + return reqmode;
60880 +}
60881 +
60882 +__u32
60883 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
60884 + const int fmode)
60885 +{
60886 + __u32 mode, reqmode = GR_FIND;
60887 +
60888 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
60889 + reqmode |= GR_EXEC;
60890 + if (fmode & S_IWOTH)
60891 + reqmode |= GR_WRITE;
60892 + if (fmode & S_IROTH)
60893 + reqmode |= GR_READ;
60894 +
60895 + mode =
60896 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
60897 + mnt);
60898 +
60899 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60900 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
60901 + reqmode & GR_READ ? " reading" : "",
60902 + reqmode & GR_WRITE ? " writing" : "",
60903 + reqmode & GR_EXEC ? " executing" : "");
60904 + return reqmode;
60905 + } else
60906 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60907 + {
60908 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
60909 + reqmode & GR_READ ? " reading" : "",
60910 + reqmode & GR_WRITE ? " writing" : "",
60911 + reqmode & GR_EXEC ? " executing" : "");
60912 + return 0;
60913 + } else if (unlikely((mode & reqmode) != reqmode))
60914 + return 0;
60915 +
60916 + return reqmode;
60917 +}
60918 +
60919 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
60920 +{
60921 + __u32 mode;
60922 +
60923 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
60924 +
60925 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
60926 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
60927 + return mode;
60928 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
60929 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
60930 + return 0;
60931 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
60932 + return 0;
60933 +
60934 + return (reqmode);
60935 +}
60936 +
60937 +__u32
60938 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
60939 +{
60940 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
60941 +}
60942 +
60943 +__u32
60944 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
60945 +{
60946 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
60947 +}
60948 +
60949 +__u32
60950 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
60951 +{
60952 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
60953 +}
60954 +
60955 +__u32
60956 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
60957 +{
60958 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
60959 +}
60960 +
60961 +__u32
60962 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
60963 + umode_t *modeptr)
60964 +{
60965 + mode_t mode;
60966 +
60967 + *modeptr &= ~(mode_t)gr_acl_umask();
60968 + mode = *modeptr;
60969 +
60970 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
60971 + return 1;
60972 +
60973 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
60974 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
60975 + GR_CHMOD_ACL_MSG);
60976 + } else {
60977 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
60978 + }
60979 +}
60980 +
60981 +__u32
60982 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
60983 +{
60984 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
60985 +}
60986 +
60987 +__u32
60988 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
60989 +{
60990 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
60991 +}
60992 +
60993 +__u32
60994 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
60995 +{
60996 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
60997 +}
60998 +
60999 +__u32
61000 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
61001 +{
61002 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
61003 + GR_UNIXCONNECT_ACL_MSG);
61004 +}
61005 +
61006 +/* hardlinks require at minimum create and link permission,
61007 + any additional privilege required is based on the
61008 + privilege of the file being linked to
61009 +*/
61010 +__u32
61011 +gr_acl_handle_link(const struct dentry * new_dentry,
61012 + const struct dentry * parent_dentry,
61013 + const struct vfsmount * parent_mnt,
61014 + const struct dentry * old_dentry,
61015 + const struct vfsmount * old_mnt, const char *to)
61016 +{
61017 + __u32 mode;
61018 + __u32 needmode = GR_CREATE | GR_LINK;
61019 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
61020 +
61021 + mode =
61022 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
61023 + old_mnt);
61024 +
61025 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
61026 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
61027 + return mode;
61028 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61029 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
61030 + return 0;
61031 + } else if (unlikely((mode & needmode) != needmode))
61032 + return 0;
61033 +
61034 + return 1;
61035 +}
61036 +
61037 +__u32
61038 +gr_acl_handle_symlink(const struct dentry * new_dentry,
61039 + const struct dentry * parent_dentry,
61040 + const struct vfsmount * parent_mnt, const char *from)
61041 +{
61042 + __u32 needmode = GR_WRITE | GR_CREATE;
61043 + __u32 mode;
61044 +
61045 + mode =
61046 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
61047 + GR_CREATE | GR_AUDIT_CREATE |
61048 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
61049 +
61050 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
61051 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
61052 + return mode;
61053 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61054 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
61055 + return 0;
61056 + } else if (unlikely((mode & needmode) != needmode))
61057 + return 0;
61058 +
61059 + return (GR_WRITE | GR_CREATE);
61060 +}
61061 +
61062 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
61063 +{
61064 + __u32 mode;
61065 +
61066 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
61067 +
61068 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
61069 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
61070 + return mode;
61071 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
61072 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
61073 + return 0;
61074 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
61075 + return 0;
61076 +
61077 + return (reqmode);
61078 +}
61079 +
61080 +__u32
61081 +gr_acl_handle_mknod(const struct dentry * new_dentry,
61082 + const struct dentry * parent_dentry,
61083 + const struct vfsmount * parent_mnt,
61084 + const int mode)
61085 +{
61086 + __u32 reqmode = GR_WRITE | GR_CREATE;
61087 + if (unlikely(mode & (S_ISUID | S_ISGID)))
61088 + reqmode |= GR_SETID;
61089 +
61090 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61091 + reqmode, GR_MKNOD_ACL_MSG);
61092 +}
61093 +
61094 +__u32
61095 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
61096 + const struct dentry *parent_dentry,
61097 + const struct vfsmount *parent_mnt)
61098 +{
61099 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61100 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
61101 +}
61102 +
61103 +#define RENAME_CHECK_SUCCESS(old, new) \
61104 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
61105 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
61106 +
61107 +int
61108 +gr_acl_handle_rename(struct dentry *new_dentry,
61109 + struct dentry *parent_dentry,
61110 + const struct vfsmount *parent_mnt,
61111 + struct dentry *old_dentry,
61112 + struct inode *old_parent_inode,
61113 + struct vfsmount *old_mnt, const char *newname)
61114 +{
61115 + __u32 comp1, comp2;
61116 + int error = 0;
61117 +
61118 + if (unlikely(!gr_acl_is_enabled()))
61119 + return 0;
61120 +
61121 + if (!new_dentry->d_inode) {
61122 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
61123 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
61124 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
61125 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
61126 + GR_DELETE | GR_AUDIT_DELETE |
61127 + GR_AUDIT_READ | GR_AUDIT_WRITE |
61128 + GR_SUPPRESS, old_mnt);
61129 + } else {
61130 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
61131 + GR_CREATE | GR_DELETE |
61132 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
61133 + GR_AUDIT_READ | GR_AUDIT_WRITE |
61134 + GR_SUPPRESS, parent_mnt);
61135 + comp2 =
61136 + gr_search_file(old_dentry,
61137 + GR_READ | GR_WRITE | GR_AUDIT_READ |
61138 + GR_DELETE | GR_AUDIT_DELETE |
61139 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
61140 + }
61141 +
61142 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
61143 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
61144 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
61145 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
61146 + && !(comp2 & GR_SUPPRESS)) {
61147 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
61148 + error = -EACCES;
61149 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
61150 + error = -EACCES;
61151 +
61152 + return error;
61153 +}
61154 +
61155 +void
61156 +gr_acl_handle_exit(void)
61157 +{
61158 + u16 id;
61159 + char *rolename;
61160 + struct file *exec_file;
61161 +
61162 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
61163 + !(current->role->roletype & GR_ROLE_PERSIST))) {
61164 + id = current->acl_role_id;
61165 + rolename = current->role->rolename;
61166 + gr_set_acls(1);
61167 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
61168 + }
61169 +
61170 + write_lock(&grsec_exec_file_lock);
61171 + exec_file = current->exec_file;
61172 + current->exec_file = NULL;
61173 + write_unlock(&grsec_exec_file_lock);
61174 +
61175 + if (exec_file)
61176 + fput(exec_file);
61177 +}
61178 +
61179 +int
61180 +gr_acl_handle_procpidmem(const struct task_struct *task)
61181 +{
61182 + if (unlikely(!gr_acl_is_enabled()))
61183 + return 0;
61184 +
61185 + if (task != current && task->acl->mode & GR_PROTPROCFD)
61186 + return -EACCES;
61187 +
61188 + return 0;
61189 +}
61190 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
61191 new file mode 100644
61192 index 0000000..cd07b96
61193 --- /dev/null
61194 +++ b/grsecurity/gracl_ip.c
61195 @@ -0,0 +1,382 @@
61196 +#include <linux/kernel.h>
61197 +#include <asm/uaccess.h>
61198 +#include <asm/errno.h>
61199 +#include <net/sock.h>
61200 +#include <linux/file.h>
61201 +#include <linux/fs.h>
61202 +#include <linux/net.h>
61203 +#include <linux/in.h>
61204 +#include <linux/skbuff.h>
61205 +#include <linux/ip.h>
61206 +#include <linux/udp.h>
61207 +#include <linux/smp_lock.h>
61208 +#include <linux/types.h>
61209 +#include <linux/sched.h>
61210 +#include <linux/netdevice.h>
61211 +#include <linux/inetdevice.h>
61212 +#include <linux/gracl.h>
61213 +#include <linux/grsecurity.h>
61214 +#include <linux/grinternal.h>
61215 +
61216 +#define GR_BIND 0x01
61217 +#define GR_CONNECT 0x02
61218 +#define GR_INVERT 0x04
61219 +#define GR_BINDOVERRIDE 0x08
61220 +#define GR_CONNECTOVERRIDE 0x10
61221 +#define GR_SOCK_FAMILY 0x20
61222 +
61223 +static const char * gr_protocols[IPPROTO_MAX] = {
61224 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
61225 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
61226 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
61227 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
61228 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
61229 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
61230 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
61231 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
61232 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
61233 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
61234 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
61235 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
61236 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
61237 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
61238 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
61239 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
61240 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
61241 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
61242 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
61243 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
61244 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
61245 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
61246 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
61247 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
61248 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
61249 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
61250 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
61251 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
61252 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
61253 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
61254 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
61255 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
61256 + };
61257 +
61258 +static const char * gr_socktypes[SOCK_MAX] = {
61259 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
61260 + "unknown:7", "unknown:8", "unknown:9", "packet"
61261 + };
61262 +
61263 +static const char * gr_sockfamilies[AF_MAX+1] = {
61264 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
61265 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
61266 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
61267 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
61268 + };
61269 +
61270 +const char *
61271 +gr_proto_to_name(unsigned char proto)
61272 +{
61273 + return gr_protocols[proto];
61274 +}
61275 +
61276 +const char *
61277 +gr_socktype_to_name(unsigned char type)
61278 +{
61279 + return gr_socktypes[type];
61280 +}
61281 +
61282 +const char *
61283 +gr_sockfamily_to_name(unsigned char family)
61284 +{
61285 + return gr_sockfamilies[family];
61286 +}
61287 +
61288 +int
61289 +gr_search_socket(const int domain, const int type, const int protocol)
61290 +{
61291 + struct acl_subject_label *curr;
61292 + const struct cred *cred = current_cred();
61293 +
61294 + if (unlikely(!gr_acl_is_enabled()))
61295 + goto exit;
61296 +
61297 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
61298 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
61299 + goto exit; // let the kernel handle it
61300 +
61301 + curr = current->acl;
61302 +
61303 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
61304 + /* the family is allowed, if this is PF_INET allow it only if
61305 + the extra sock type/protocol checks pass */
61306 + if (domain == PF_INET)
61307 + goto inet_check;
61308 + goto exit;
61309 + } else {
61310 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61311 + __u32 fakeip = 0;
61312 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61313 + current->role->roletype, cred->uid,
61314 + cred->gid, current->exec_file ?
61315 + gr_to_filename(current->exec_file->f_path.dentry,
61316 + current->exec_file->f_path.mnt) :
61317 + curr->filename, curr->filename,
61318 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
61319 + &current->signal->saved_ip);
61320 + goto exit;
61321 + }
61322 + goto exit_fail;
61323 + }
61324 +
61325 +inet_check:
61326 + /* the rest of this checking is for IPv4 only */
61327 + if (!curr->ips)
61328 + goto exit;
61329 +
61330 + if ((curr->ip_type & (1 << type)) &&
61331 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
61332 + goto exit;
61333 +
61334 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61335 + /* we don't place acls on raw sockets , and sometimes
61336 + dgram/ip sockets are opened for ioctl and not
61337 + bind/connect, so we'll fake a bind learn log */
61338 + if (type == SOCK_RAW || type == SOCK_PACKET) {
61339 + __u32 fakeip = 0;
61340 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61341 + current->role->roletype, cred->uid,
61342 + cred->gid, current->exec_file ?
61343 + gr_to_filename(current->exec_file->f_path.dentry,
61344 + current->exec_file->f_path.mnt) :
61345 + curr->filename, curr->filename,
61346 + &fakeip, 0, type,
61347 + protocol, GR_CONNECT, &current->signal->saved_ip);
61348 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
61349 + __u32 fakeip = 0;
61350 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61351 + current->role->roletype, cred->uid,
61352 + cred->gid, current->exec_file ?
61353 + gr_to_filename(current->exec_file->f_path.dentry,
61354 + current->exec_file->f_path.mnt) :
61355 + curr->filename, curr->filename,
61356 + &fakeip, 0, type,
61357 + protocol, GR_BIND, &current->signal->saved_ip);
61358 + }
61359 + /* we'll log when they use connect or bind */
61360 + goto exit;
61361 + }
61362 +
61363 +exit_fail:
61364 + if (domain == PF_INET)
61365 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
61366 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
61367 + else
61368 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
61369 + gr_socktype_to_name(type), protocol);
61370 +
61371 + return 0;
61372 +exit:
61373 + return 1;
61374 +}
61375 +
61376 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
61377 +{
61378 + if ((ip->mode & mode) &&
61379 + (ip_port >= ip->low) &&
61380 + (ip_port <= ip->high) &&
61381 + ((ntohl(ip_addr) & our_netmask) ==
61382 + (ntohl(our_addr) & our_netmask))
61383 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
61384 + && (ip->type & (1 << type))) {
61385 + if (ip->mode & GR_INVERT)
61386 + return 2; // specifically denied
61387 + else
61388 + return 1; // allowed
61389 + }
61390 +
61391 + return 0; // not specifically allowed, may continue parsing
61392 +}
61393 +
61394 +static int
61395 +gr_search_connectbind(const int full_mode, struct sock *sk,
61396 + struct sockaddr_in *addr, const int type)
61397 +{
61398 + char iface[IFNAMSIZ] = {0};
61399 + struct acl_subject_label *curr;
61400 + struct acl_ip_label *ip;
61401 + struct inet_sock *isk;
61402 + struct net_device *dev;
61403 + struct in_device *idev;
61404 + unsigned long i;
61405 + int ret;
61406 + int mode = full_mode & (GR_BIND | GR_CONNECT);
61407 + __u32 ip_addr = 0;
61408 + __u32 our_addr;
61409 + __u32 our_netmask;
61410 + char *p;
61411 + __u16 ip_port = 0;
61412 + const struct cred *cred = current_cred();
61413 +
61414 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
61415 + return 0;
61416 +
61417 + curr = current->acl;
61418 + isk = inet_sk(sk);
61419 +
61420 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
61421 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
61422 + addr->sin_addr.s_addr = curr->inaddr_any_override;
61423 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
61424 + struct sockaddr_in saddr;
61425 + int err;
61426 +
61427 + saddr.sin_family = AF_INET;
61428 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
61429 + saddr.sin_port = isk->sport;
61430 +
61431 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61432 + if (err)
61433 + return err;
61434 +
61435 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61436 + if (err)
61437 + return err;
61438 + }
61439 +
61440 + if (!curr->ips)
61441 + return 0;
61442 +
61443 + ip_addr = addr->sin_addr.s_addr;
61444 + ip_port = ntohs(addr->sin_port);
61445 +
61446 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61447 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61448 + current->role->roletype, cred->uid,
61449 + cred->gid, current->exec_file ?
61450 + gr_to_filename(current->exec_file->f_path.dentry,
61451 + current->exec_file->f_path.mnt) :
61452 + curr->filename, curr->filename,
61453 + &ip_addr, ip_port, type,
61454 + sk->sk_protocol, mode, &current->signal->saved_ip);
61455 + return 0;
61456 + }
61457 +
61458 + for (i = 0; i < curr->ip_num; i++) {
61459 + ip = *(curr->ips + i);
61460 + if (ip->iface != NULL) {
61461 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
61462 + p = strchr(iface, ':');
61463 + if (p != NULL)
61464 + *p = '\0';
61465 + dev = dev_get_by_name(sock_net(sk), iface);
61466 + if (dev == NULL)
61467 + continue;
61468 + idev = in_dev_get(dev);
61469 + if (idev == NULL) {
61470 + dev_put(dev);
61471 + continue;
61472 + }
61473 + rcu_read_lock();
61474 + for_ifa(idev) {
61475 + if (!strcmp(ip->iface, ifa->ifa_label)) {
61476 + our_addr = ifa->ifa_address;
61477 + our_netmask = 0xffffffff;
61478 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61479 + if (ret == 1) {
61480 + rcu_read_unlock();
61481 + in_dev_put(idev);
61482 + dev_put(dev);
61483 + return 0;
61484 + } else if (ret == 2) {
61485 + rcu_read_unlock();
61486 + in_dev_put(idev);
61487 + dev_put(dev);
61488 + goto denied;
61489 + }
61490 + }
61491 + } endfor_ifa(idev);
61492 + rcu_read_unlock();
61493 + in_dev_put(idev);
61494 + dev_put(dev);
61495 + } else {
61496 + our_addr = ip->addr;
61497 + our_netmask = ip->netmask;
61498 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61499 + if (ret == 1)
61500 + return 0;
61501 + else if (ret == 2)
61502 + goto denied;
61503 + }
61504 + }
61505 +
61506 +denied:
61507 + if (mode == GR_BIND)
61508 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61509 + else if (mode == GR_CONNECT)
61510 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61511 +
61512 + return -EACCES;
61513 +}
61514 +
61515 +int
61516 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
61517 +{
61518 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
61519 +}
61520 +
61521 +int
61522 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
61523 +{
61524 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
61525 +}
61526 +
61527 +int gr_search_listen(struct socket *sock)
61528 +{
61529 + struct sock *sk = sock->sk;
61530 + struct sockaddr_in addr;
61531 +
61532 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61533 + addr.sin_port = inet_sk(sk)->sport;
61534 +
61535 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61536 +}
61537 +
61538 +int gr_search_accept(struct socket *sock)
61539 +{
61540 + struct sock *sk = sock->sk;
61541 + struct sockaddr_in addr;
61542 +
61543 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61544 + addr.sin_port = inet_sk(sk)->sport;
61545 +
61546 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61547 +}
61548 +
61549 +int
61550 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
61551 +{
61552 + if (addr)
61553 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
61554 + else {
61555 + struct sockaddr_in sin;
61556 + const struct inet_sock *inet = inet_sk(sk);
61557 +
61558 + sin.sin_addr.s_addr = inet->daddr;
61559 + sin.sin_port = inet->dport;
61560 +
61561 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61562 + }
61563 +}
61564 +
61565 +int
61566 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
61567 +{
61568 + struct sockaddr_in sin;
61569 +
61570 + if (unlikely(skb->len < sizeof (struct udphdr)))
61571 + return 0; // skip this packet
61572 +
61573 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
61574 + sin.sin_port = udp_hdr(skb)->source;
61575 +
61576 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61577 +}
61578 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
61579 new file mode 100644
61580 index 0000000..34bdd46
61581 --- /dev/null
61582 +++ b/grsecurity/gracl_learn.c
61583 @@ -0,0 +1,208 @@
61584 +#include <linux/kernel.h>
61585 +#include <linux/mm.h>
61586 +#include <linux/sched.h>
61587 +#include <linux/poll.h>
61588 +#include <linux/smp_lock.h>
61589 +#include <linux/string.h>
61590 +#include <linux/file.h>
61591 +#include <linux/types.h>
61592 +#include <linux/vmalloc.h>
61593 +#include <linux/grinternal.h>
61594 +
61595 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
61596 + size_t count, loff_t *ppos);
61597 +extern int gr_acl_is_enabled(void);
61598 +
61599 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
61600 +static int gr_learn_attached;
61601 +
61602 +/* use a 512k buffer */
61603 +#define LEARN_BUFFER_SIZE (512 * 1024)
61604 +
61605 +static DEFINE_SPINLOCK(gr_learn_lock);
61606 +static DEFINE_MUTEX(gr_learn_user_mutex);
61607 +
61608 +/* we need to maintain two buffers, so that the kernel context of grlearn
61609 + uses a semaphore around the userspace copying, and the other kernel contexts
61610 + use a spinlock when copying into the buffer, since they cannot sleep
61611 +*/
61612 +static char *learn_buffer;
61613 +static char *learn_buffer_user;
61614 +static int learn_buffer_len;
61615 +static int learn_buffer_user_len;
61616 +
61617 +static ssize_t
61618 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
61619 +{
61620 + DECLARE_WAITQUEUE(wait, current);
61621 + ssize_t retval = 0;
61622 +
61623 + add_wait_queue(&learn_wait, &wait);
61624 + set_current_state(TASK_INTERRUPTIBLE);
61625 + do {
61626 + mutex_lock(&gr_learn_user_mutex);
61627 + spin_lock(&gr_learn_lock);
61628 + if (learn_buffer_len)
61629 + break;
61630 + spin_unlock(&gr_learn_lock);
61631 + mutex_unlock(&gr_learn_user_mutex);
61632 + if (file->f_flags & O_NONBLOCK) {
61633 + retval = -EAGAIN;
61634 + goto out;
61635 + }
61636 + if (signal_pending(current)) {
61637 + retval = -ERESTARTSYS;
61638 + goto out;
61639 + }
61640 +
61641 + schedule();
61642 + } while (1);
61643 +
61644 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
61645 + learn_buffer_user_len = learn_buffer_len;
61646 + retval = learn_buffer_len;
61647 + learn_buffer_len = 0;
61648 +
61649 + spin_unlock(&gr_learn_lock);
61650 +
61651 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
61652 + retval = -EFAULT;
61653 +
61654 + mutex_unlock(&gr_learn_user_mutex);
61655 +out:
61656 + set_current_state(TASK_RUNNING);
61657 + remove_wait_queue(&learn_wait, &wait);
61658 + return retval;
61659 +}
61660 +
61661 +static unsigned int
61662 +poll_learn(struct file * file, poll_table * wait)
61663 +{
61664 + poll_wait(file, &learn_wait, wait);
61665 +
61666 + if (learn_buffer_len)
61667 + return (POLLIN | POLLRDNORM);
61668 +
61669 + return 0;
61670 +}
61671 +
61672 +void
61673 +gr_clear_learn_entries(void)
61674 +{
61675 + char *tmp;
61676 +
61677 + mutex_lock(&gr_learn_user_mutex);
61678 + spin_lock(&gr_learn_lock);
61679 + tmp = learn_buffer;
61680 + learn_buffer = NULL;
61681 + spin_unlock(&gr_learn_lock);
61682 + if (tmp)
61683 + vfree(tmp);
61684 + if (learn_buffer_user != NULL) {
61685 + vfree(learn_buffer_user);
61686 + learn_buffer_user = NULL;
61687 + }
61688 + learn_buffer_len = 0;
61689 + mutex_unlock(&gr_learn_user_mutex);
61690 +
61691 + return;
61692 +}
61693 +
61694 +void
61695 +gr_add_learn_entry(const char *fmt, ...)
61696 +{
61697 + va_list args;
61698 + unsigned int len;
61699 +
61700 + if (!gr_learn_attached)
61701 + return;
61702 +
61703 + spin_lock(&gr_learn_lock);
61704 +
61705 + /* leave a gap at the end so we know when it's "full" but don't have to
61706 + compute the exact length of the string we're trying to append
61707 + */
61708 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
61709 + spin_unlock(&gr_learn_lock);
61710 + wake_up_interruptible(&learn_wait);
61711 + return;
61712 + }
61713 + if (learn_buffer == NULL) {
61714 + spin_unlock(&gr_learn_lock);
61715 + return;
61716 + }
61717 +
61718 + va_start(args, fmt);
61719 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
61720 + va_end(args);
61721 +
61722 + learn_buffer_len += len + 1;
61723 +
61724 + spin_unlock(&gr_learn_lock);
61725 + wake_up_interruptible(&learn_wait);
61726 +
61727 + return;
61728 +}
61729 +
61730 +static int
61731 +open_learn(struct inode *inode, struct file *file)
61732 +{
61733 + if (file->f_mode & FMODE_READ && gr_learn_attached)
61734 + return -EBUSY;
61735 + if (file->f_mode & FMODE_READ) {
61736 + int retval = 0;
61737 + mutex_lock(&gr_learn_user_mutex);
61738 + if (learn_buffer == NULL)
61739 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
61740 + if (learn_buffer_user == NULL)
61741 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
61742 + if (learn_buffer == NULL) {
61743 + retval = -ENOMEM;
61744 + goto out_error;
61745 + }
61746 + if (learn_buffer_user == NULL) {
61747 + retval = -ENOMEM;
61748 + goto out_error;
61749 + }
61750 + learn_buffer_len = 0;
61751 + learn_buffer_user_len = 0;
61752 + gr_learn_attached = 1;
61753 +out_error:
61754 + mutex_unlock(&gr_learn_user_mutex);
61755 + return retval;
61756 + }
61757 + return 0;
61758 +}
61759 +
61760 +static int
61761 +close_learn(struct inode *inode, struct file *file)
61762 +{
61763 + if (file->f_mode & FMODE_READ) {
61764 + char *tmp = NULL;
61765 + mutex_lock(&gr_learn_user_mutex);
61766 + spin_lock(&gr_learn_lock);
61767 + tmp = learn_buffer;
61768 + learn_buffer = NULL;
61769 + spin_unlock(&gr_learn_lock);
61770 + if (tmp)
61771 + vfree(tmp);
61772 + if (learn_buffer_user != NULL) {
61773 + vfree(learn_buffer_user);
61774 + learn_buffer_user = NULL;
61775 + }
61776 + learn_buffer_len = 0;
61777 + learn_buffer_user_len = 0;
61778 + gr_learn_attached = 0;
61779 + mutex_unlock(&gr_learn_user_mutex);
61780 + }
61781 +
61782 + return 0;
61783 +}
61784 +
61785 +const struct file_operations grsec_fops = {
61786 + .read = read_learn,
61787 + .write = write_grsec_handler,
61788 + .open = open_learn,
61789 + .release = close_learn,
61790 + .poll = poll_learn,
61791 +};
61792 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
61793 new file mode 100644
61794 index 0000000..70b2179
61795 --- /dev/null
61796 +++ b/grsecurity/gracl_res.c
61797 @@ -0,0 +1,67 @@
61798 +#include <linux/kernel.h>
61799 +#include <linux/sched.h>
61800 +#include <linux/gracl.h>
61801 +#include <linux/grinternal.h>
61802 +
61803 +static const char *restab_log[] = {
61804 + [RLIMIT_CPU] = "RLIMIT_CPU",
61805 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
61806 + [RLIMIT_DATA] = "RLIMIT_DATA",
61807 + [RLIMIT_STACK] = "RLIMIT_STACK",
61808 + [RLIMIT_CORE] = "RLIMIT_CORE",
61809 + [RLIMIT_RSS] = "RLIMIT_RSS",
61810 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
61811 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
61812 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
61813 + [RLIMIT_AS] = "RLIMIT_AS",
61814 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
61815 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
61816 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
61817 + [RLIMIT_NICE] = "RLIMIT_NICE",
61818 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
61819 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
61820 + [GR_CRASH_RES] = "RLIMIT_CRASH"
61821 +};
61822 +
61823 +void
61824 +gr_log_resource(const struct task_struct *task,
61825 + const int res, const unsigned long wanted, const int gt)
61826 +{
61827 + const struct cred *cred;
61828 + unsigned long rlim;
61829 +
61830 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
61831 + return;
61832 +
61833 + // not yet supported resource
61834 + if (unlikely(!restab_log[res]))
61835 + return;
61836 +
61837 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
61838 + rlim = task->signal->rlim[res].rlim_max;
61839 + else
61840 + rlim = task->signal->rlim[res].rlim_cur;
61841 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
61842 + return;
61843 +
61844 + rcu_read_lock();
61845 + cred = __task_cred(task);
61846 +
61847 + if (res == RLIMIT_NPROC &&
61848 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
61849 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
61850 + goto out_rcu_unlock;
61851 + else if (res == RLIMIT_MEMLOCK &&
61852 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
61853 + goto out_rcu_unlock;
61854 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
61855 + goto out_rcu_unlock;
61856 + rcu_read_unlock();
61857 +
61858 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
61859 +
61860 + return;
61861 +out_rcu_unlock:
61862 + rcu_read_unlock();
61863 + return;
61864 +}
61865 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
61866 new file mode 100644
61867 index 0000000..1d1b734
61868 --- /dev/null
61869 +++ b/grsecurity/gracl_segv.c
61870 @@ -0,0 +1,284 @@
61871 +#include <linux/kernel.h>
61872 +#include <linux/mm.h>
61873 +#include <asm/uaccess.h>
61874 +#include <asm/errno.h>
61875 +#include <asm/mman.h>
61876 +#include <net/sock.h>
61877 +#include <linux/file.h>
61878 +#include <linux/fs.h>
61879 +#include <linux/net.h>
61880 +#include <linux/in.h>
61881 +#include <linux/smp_lock.h>
61882 +#include <linux/slab.h>
61883 +#include <linux/types.h>
61884 +#include <linux/sched.h>
61885 +#include <linux/timer.h>
61886 +#include <linux/gracl.h>
61887 +#include <linux/grsecurity.h>
61888 +#include <linux/grinternal.h>
61889 +
61890 +static struct crash_uid *uid_set;
61891 +static unsigned short uid_used;
61892 +static DEFINE_SPINLOCK(gr_uid_lock);
61893 +extern rwlock_t gr_inode_lock;
61894 +extern struct acl_subject_label *
61895 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
61896 + struct acl_role_label *role);
61897 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
61898 +
61899 +int
61900 +gr_init_uidset(void)
61901 +{
61902 + uid_set =
61903 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
61904 + uid_used = 0;
61905 +
61906 + return uid_set ? 1 : 0;
61907 +}
61908 +
61909 +void
61910 +gr_free_uidset(void)
61911 +{
61912 + if (uid_set)
61913 + kfree(uid_set);
61914 +
61915 + return;
61916 +}
61917 +
61918 +int
61919 +gr_find_uid(const uid_t uid)
61920 +{
61921 + struct crash_uid *tmp = uid_set;
61922 + uid_t buid;
61923 + int low = 0, high = uid_used - 1, mid;
61924 +
61925 + while (high >= low) {
61926 + mid = (low + high) >> 1;
61927 + buid = tmp[mid].uid;
61928 + if (buid == uid)
61929 + return mid;
61930 + if (buid > uid)
61931 + high = mid - 1;
61932 + if (buid < uid)
61933 + low = mid + 1;
61934 + }
61935 +
61936 + return -1;
61937 +}
61938 +
61939 +static __inline__ void
61940 +gr_insertsort(void)
61941 +{
61942 + unsigned short i, j;
61943 + struct crash_uid index;
61944 +
61945 + for (i = 1; i < uid_used; i++) {
61946 + index = uid_set[i];
61947 + j = i;
61948 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
61949 + uid_set[j] = uid_set[j - 1];
61950 + j--;
61951 + }
61952 + uid_set[j] = index;
61953 + }
61954 +
61955 + return;
61956 +}
61957 +
61958 +static __inline__ void
61959 +gr_insert_uid(const uid_t uid, const unsigned long expires)
61960 +{
61961 + int loc;
61962 +
61963 + if (uid_used == GR_UIDTABLE_MAX)
61964 + return;
61965 +
61966 + loc = gr_find_uid(uid);
61967 +
61968 + if (loc >= 0) {
61969 + uid_set[loc].expires = expires;
61970 + return;
61971 + }
61972 +
61973 + uid_set[uid_used].uid = uid;
61974 + uid_set[uid_used].expires = expires;
61975 + uid_used++;
61976 +
61977 + gr_insertsort();
61978 +
61979 + return;
61980 +}
61981 +
61982 +void
61983 +gr_remove_uid(const unsigned short loc)
61984 +{
61985 + unsigned short i;
61986 +
61987 + for (i = loc + 1; i < uid_used; i++)
61988 + uid_set[i - 1] = uid_set[i];
61989 +
61990 + uid_used--;
61991 +
61992 + return;
61993 +}
61994 +
61995 +int
61996 +gr_check_crash_uid(const uid_t uid)
61997 +{
61998 + int loc;
61999 + int ret = 0;
62000 +
62001 + if (unlikely(!gr_acl_is_enabled()))
62002 + return 0;
62003 +
62004 + spin_lock(&gr_uid_lock);
62005 + loc = gr_find_uid(uid);
62006 +
62007 + if (loc < 0)
62008 + goto out_unlock;
62009 +
62010 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
62011 + gr_remove_uid(loc);
62012 + else
62013 + ret = 1;
62014 +
62015 +out_unlock:
62016 + spin_unlock(&gr_uid_lock);
62017 + return ret;
62018 +}
62019 +
62020 +static __inline__ int
62021 +proc_is_setxid(const struct cred *cred)
62022 +{
62023 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
62024 + cred->uid != cred->fsuid)
62025 + return 1;
62026 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
62027 + cred->gid != cred->fsgid)
62028 + return 1;
62029 +
62030 + return 0;
62031 +}
62032 +
62033 +void
62034 +gr_handle_crash(struct task_struct *task, const int sig)
62035 +{
62036 + struct acl_subject_label *curr;
62037 + struct task_struct *tsk, *tsk2;
62038 + const struct cred *cred;
62039 + const struct cred *cred2;
62040 +
62041 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
62042 + return;
62043 +
62044 + if (unlikely(!gr_acl_is_enabled()))
62045 + return;
62046 +
62047 + curr = task->acl;
62048 +
62049 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
62050 + return;
62051 +
62052 + if (time_before_eq(curr->expires, get_seconds())) {
62053 + curr->expires = 0;
62054 + curr->crashes = 0;
62055 + }
62056 +
62057 + curr->crashes++;
62058 +
62059 + if (!curr->expires)
62060 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
62061 +
62062 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62063 + time_after(curr->expires, get_seconds())) {
62064 + rcu_read_lock();
62065 + cred = __task_cred(task);
62066 + if (cred->uid && proc_is_setxid(cred)) {
62067 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62068 + spin_lock(&gr_uid_lock);
62069 + gr_insert_uid(cred->uid, curr->expires);
62070 + spin_unlock(&gr_uid_lock);
62071 + curr->expires = 0;
62072 + curr->crashes = 0;
62073 + read_lock(&tasklist_lock);
62074 + do_each_thread(tsk2, tsk) {
62075 + cred2 = __task_cred(tsk);
62076 + if (tsk != task && cred2->uid == cred->uid)
62077 + gr_fake_force_sig(SIGKILL, tsk);
62078 + } while_each_thread(tsk2, tsk);
62079 + read_unlock(&tasklist_lock);
62080 + } else {
62081 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62082 + read_lock(&tasklist_lock);
62083 + read_lock(&grsec_exec_file_lock);
62084 + do_each_thread(tsk2, tsk) {
62085 + if (likely(tsk != task)) {
62086 + // if this thread has the same subject as the one that triggered
62087 + // RES_CRASH and it's the same binary, kill it
62088 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
62089 + gr_fake_force_sig(SIGKILL, tsk);
62090 + }
62091 + } while_each_thread(tsk2, tsk);
62092 + read_unlock(&grsec_exec_file_lock);
62093 + read_unlock(&tasklist_lock);
62094 + }
62095 + rcu_read_unlock();
62096 + }
62097 +
62098 + return;
62099 +}
62100 +
62101 +int
62102 +gr_check_crash_exec(const struct file *filp)
62103 +{
62104 + struct acl_subject_label *curr;
62105 +
62106 + if (unlikely(!gr_acl_is_enabled()))
62107 + return 0;
62108 +
62109 + read_lock(&gr_inode_lock);
62110 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
62111 + filp->f_path.dentry->d_inode->i_sb->s_dev,
62112 + current->role);
62113 + read_unlock(&gr_inode_lock);
62114 +
62115 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
62116 + (!curr->crashes && !curr->expires))
62117 + return 0;
62118 +
62119 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62120 + time_after(curr->expires, get_seconds()))
62121 + return 1;
62122 + else if (time_before_eq(curr->expires, get_seconds())) {
62123 + curr->crashes = 0;
62124 + curr->expires = 0;
62125 + }
62126 +
62127 + return 0;
62128 +}
62129 +
62130 +void
62131 +gr_handle_alertkill(struct task_struct *task)
62132 +{
62133 + struct acl_subject_label *curracl;
62134 + __u32 curr_ip;
62135 + struct task_struct *p, *p2;
62136 +
62137 + if (unlikely(!gr_acl_is_enabled()))
62138 + return;
62139 +
62140 + curracl = task->acl;
62141 + curr_ip = task->signal->curr_ip;
62142 +
62143 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
62144 + read_lock(&tasklist_lock);
62145 + do_each_thread(p2, p) {
62146 + if (p->signal->curr_ip == curr_ip)
62147 + gr_fake_force_sig(SIGKILL, p);
62148 + } while_each_thread(p2, p);
62149 + read_unlock(&tasklist_lock);
62150 + } else if (curracl->mode & GR_KILLPROC)
62151 + gr_fake_force_sig(SIGKILL, task);
62152 +
62153 + return;
62154 +}
62155 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
62156 new file mode 100644
62157 index 0000000..9d83a69
62158 --- /dev/null
62159 +++ b/grsecurity/gracl_shm.c
62160 @@ -0,0 +1,40 @@
62161 +#include <linux/kernel.h>
62162 +#include <linux/mm.h>
62163 +#include <linux/sched.h>
62164 +#include <linux/file.h>
62165 +#include <linux/ipc.h>
62166 +#include <linux/gracl.h>
62167 +#include <linux/grsecurity.h>
62168 +#include <linux/grinternal.h>
62169 +
62170 +int
62171 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62172 + const time_t shm_createtime, const uid_t cuid, const int shmid)
62173 +{
62174 + struct task_struct *task;
62175 +
62176 + if (!gr_acl_is_enabled())
62177 + return 1;
62178 +
62179 + rcu_read_lock();
62180 + read_lock(&tasklist_lock);
62181 +
62182 + task = find_task_by_vpid(shm_cprid);
62183 +
62184 + if (unlikely(!task))
62185 + task = find_task_by_vpid(shm_lapid);
62186 +
62187 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
62188 + (task->pid == shm_lapid)) &&
62189 + (task->acl->mode & GR_PROTSHM) &&
62190 + (task->acl != current->acl))) {
62191 + read_unlock(&tasklist_lock);
62192 + rcu_read_unlock();
62193 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
62194 + return 0;
62195 + }
62196 + read_unlock(&tasklist_lock);
62197 + rcu_read_unlock();
62198 +
62199 + return 1;
62200 +}
62201 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
62202 new file mode 100644
62203 index 0000000..bc0be01
62204 --- /dev/null
62205 +++ b/grsecurity/grsec_chdir.c
62206 @@ -0,0 +1,19 @@
62207 +#include <linux/kernel.h>
62208 +#include <linux/sched.h>
62209 +#include <linux/fs.h>
62210 +#include <linux/file.h>
62211 +#include <linux/grsecurity.h>
62212 +#include <linux/grinternal.h>
62213 +
62214 +void
62215 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
62216 +{
62217 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
62218 + if ((grsec_enable_chdir && grsec_enable_group &&
62219 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
62220 + !grsec_enable_group)) {
62221 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
62222 + }
62223 +#endif
62224 + return;
62225 +}
62226 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
62227 new file mode 100644
62228 index 0000000..197bdd5
62229 --- /dev/null
62230 +++ b/grsecurity/grsec_chroot.c
62231 @@ -0,0 +1,386 @@
62232 +#include <linux/kernel.h>
62233 +#include <linux/module.h>
62234 +#include <linux/sched.h>
62235 +#include <linux/file.h>
62236 +#include <linux/fs.h>
62237 +#include <linux/mount.h>
62238 +#include <linux/types.h>
62239 +#include <linux/pid_namespace.h>
62240 +#include <linux/grsecurity.h>
62241 +#include <linux/grinternal.h>
62242 +
62243 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
62244 +{
62245 +#ifdef CONFIG_GRKERNSEC
62246 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
62247 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
62248 + task->gr_is_chrooted = 1;
62249 + else
62250 + task->gr_is_chrooted = 0;
62251 +
62252 + task->gr_chroot_dentry = path->dentry;
62253 +#endif
62254 + return;
62255 +}
62256 +
62257 +void gr_clear_chroot_entries(struct task_struct *task)
62258 +{
62259 +#ifdef CONFIG_GRKERNSEC
62260 + task->gr_is_chrooted = 0;
62261 + task->gr_chroot_dentry = NULL;
62262 +#endif
62263 + return;
62264 +}
62265 +
62266 +int
62267 +gr_handle_chroot_unix(const pid_t pid)
62268 +{
62269 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
62270 + struct task_struct *p;
62271 +
62272 + if (unlikely(!grsec_enable_chroot_unix))
62273 + return 1;
62274 +
62275 + if (likely(!proc_is_chrooted(current)))
62276 + return 1;
62277 +
62278 + rcu_read_lock();
62279 + read_lock(&tasklist_lock);
62280 +
62281 + p = find_task_by_vpid_unrestricted(pid);
62282 + if (unlikely(p && !have_same_root(current, p))) {
62283 + read_unlock(&tasklist_lock);
62284 + rcu_read_unlock();
62285 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
62286 + return 0;
62287 + }
62288 + read_unlock(&tasklist_lock);
62289 + rcu_read_unlock();
62290 +#endif
62291 + return 1;
62292 +}
62293 +
62294 +int
62295 +gr_handle_chroot_nice(void)
62296 +{
62297 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62298 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
62299 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
62300 + return -EPERM;
62301 + }
62302 +#endif
62303 + return 0;
62304 +}
62305 +
62306 +int
62307 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
62308 +{
62309 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62310 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
62311 + && proc_is_chrooted(current)) {
62312 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
62313 + return -EACCES;
62314 + }
62315 +#endif
62316 + return 0;
62317 +}
62318 +
62319 +int
62320 +gr_handle_chroot_rawio(const struct inode *inode)
62321 +{
62322 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62323 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
62324 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
62325 + return 1;
62326 +#endif
62327 + return 0;
62328 +}
62329 +
62330 +int
62331 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
62332 +{
62333 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62334 + struct task_struct *p;
62335 + int ret = 0;
62336 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
62337 + return ret;
62338 +
62339 + read_lock(&tasklist_lock);
62340 + do_each_pid_task(pid, type, p) {
62341 + if (!have_same_root(current, p)) {
62342 + ret = 1;
62343 + goto out;
62344 + }
62345 + } while_each_pid_task(pid, type, p);
62346 +out:
62347 + read_unlock(&tasklist_lock);
62348 + return ret;
62349 +#endif
62350 + return 0;
62351 +}
62352 +
62353 +int
62354 +gr_pid_is_chrooted(struct task_struct *p)
62355 +{
62356 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62357 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
62358 + return 0;
62359 +
62360 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
62361 + !have_same_root(current, p)) {
62362 + return 1;
62363 + }
62364 +#endif
62365 + return 0;
62366 +}
62367 +
62368 +EXPORT_SYMBOL(gr_pid_is_chrooted);
62369 +
62370 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
62371 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
62372 +{
62373 + struct dentry *dentry = (struct dentry *)u_dentry;
62374 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
62375 + struct dentry *realroot;
62376 + struct vfsmount *realrootmnt;
62377 + struct dentry *currentroot;
62378 + struct vfsmount *currentmnt;
62379 + struct task_struct *reaper = &init_task;
62380 + int ret = 1;
62381 +
62382 + read_lock(&reaper->fs->lock);
62383 + realrootmnt = mntget(reaper->fs->root.mnt);
62384 + realroot = dget(reaper->fs->root.dentry);
62385 + read_unlock(&reaper->fs->lock);
62386 +
62387 + read_lock(&current->fs->lock);
62388 + currentmnt = mntget(current->fs->root.mnt);
62389 + currentroot = dget(current->fs->root.dentry);
62390 + read_unlock(&current->fs->lock);
62391 +
62392 + spin_lock(&dcache_lock);
62393 + for (;;) {
62394 + if (unlikely((dentry == realroot && mnt == realrootmnt)
62395 + || (dentry == currentroot && mnt == currentmnt)))
62396 + break;
62397 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
62398 + if (mnt->mnt_parent == mnt)
62399 + break;
62400 + dentry = mnt->mnt_mountpoint;
62401 + mnt = mnt->mnt_parent;
62402 + continue;
62403 + }
62404 + dentry = dentry->d_parent;
62405 + }
62406 + spin_unlock(&dcache_lock);
62407 +
62408 + dput(currentroot);
62409 + mntput(currentmnt);
62410 +
62411 + /* access is outside of chroot */
62412 + if (dentry == realroot && mnt == realrootmnt)
62413 + ret = 0;
62414 +
62415 + dput(realroot);
62416 + mntput(realrootmnt);
62417 + return ret;
62418 +}
62419 +#endif
62420 +
62421 +int
62422 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
62423 +{
62424 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
62425 + if (!grsec_enable_chroot_fchdir)
62426 + return 1;
62427 +
62428 + if (!proc_is_chrooted(current))
62429 + return 1;
62430 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
62431 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
62432 + return 0;
62433 + }
62434 +#endif
62435 + return 1;
62436 +}
62437 +
62438 +int
62439 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62440 + const time_t shm_createtime)
62441 +{
62442 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
62443 + struct task_struct *p;
62444 + time_t starttime;
62445 +
62446 + if (unlikely(!grsec_enable_chroot_shmat))
62447 + return 1;
62448 +
62449 + if (likely(!proc_is_chrooted(current)))
62450 + return 1;
62451 +
62452 + rcu_read_lock();
62453 + read_lock(&tasklist_lock);
62454 +
62455 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
62456 + starttime = p->start_time.tv_sec;
62457 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
62458 + if (have_same_root(current, p)) {
62459 + goto allow;
62460 + } else {
62461 + read_unlock(&tasklist_lock);
62462 + rcu_read_unlock();
62463 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62464 + return 0;
62465 + }
62466 + }
62467 + /* creator exited, pid reuse, fall through to next check */
62468 + }
62469 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
62470 + if (unlikely(!have_same_root(current, p))) {
62471 + read_unlock(&tasklist_lock);
62472 + rcu_read_unlock();
62473 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62474 + return 0;
62475 + }
62476 + }
62477 +
62478 +allow:
62479 + read_unlock(&tasklist_lock);
62480 + rcu_read_unlock();
62481 +#endif
62482 + return 1;
62483 +}
62484 +
62485 +void
62486 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
62487 +{
62488 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
62489 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
62490 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
62491 +#endif
62492 + return;
62493 +}
62494 +
62495 +int
62496 +gr_handle_chroot_mknod(const struct dentry *dentry,
62497 + const struct vfsmount *mnt, const int mode)
62498 +{
62499 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
62500 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
62501 + proc_is_chrooted(current)) {
62502 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
62503 + return -EPERM;
62504 + }
62505 +#endif
62506 + return 0;
62507 +}
62508 +
62509 +int
62510 +gr_handle_chroot_mount(const struct dentry *dentry,
62511 + const struct vfsmount *mnt, const char *dev_name)
62512 +{
62513 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
62514 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
62515 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
62516 + return -EPERM;
62517 + }
62518 +#endif
62519 + return 0;
62520 +}
62521 +
62522 +int
62523 +gr_handle_chroot_pivot(void)
62524 +{
62525 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
62526 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
62527 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
62528 + return -EPERM;
62529 + }
62530 +#endif
62531 + return 0;
62532 +}
62533 +
62534 +int
62535 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
62536 +{
62537 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
62538 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
62539 + !gr_is_outside_chroot(dentry, mnt)) {
62540 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
62541 + return -EPERM;
62542 + }
62543 +#endif
62544 + return 0;
62545 +}
62546 +
62547 +extern const char *captab_log[];
62548 +extern int captab_log_entries;
62549 +
62550 +int
62551 +gr_chroot_is_capable(const int cap)
62552 +{
62553 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62554 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62555 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62556 + if (cap_raised(chroot_caps, cap)) {
62557 + const struct cred *creds = current_cred();
62558 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
62559 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
62560 + }
62561 + return 0;
62562 + }
62563 + }
62564 +#endif
62565 + return 1;
62566 +}
62567 +
62568 +int
62569 +gr_chroot_is_capable_nolog(const int cap)
62570 +{
62571 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62572 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62573 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62574 + if (cap_raised(chroot_caps, cap)) {
62575 + return 0;
62576 + }
62577 + }
62578 +#endif
62579 + return 1;
62580 +}
62581 +
62582 +int
62583 +gr_handle_chroot_sysctl(const int op)
62584 +{
62585 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
62586 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
62587 + && (op & MAY_WRITE))
62588 + return -EACCES;
62589 +#endif
62590 + return 0;
62591 +}
62592 +
62593 +void
62594 +gr_handle_chroot_chdir(struct path *path)
62595 +{
62596 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
62597 + if (grsec_enable_chroot_chdir)
62598 + set_fs_pwd(current->fs, path);
62599 +#endif
62600 + return;
62601 +}
62602 +
62603 +int
62604 +gr_handle_chroot_chmod(const struct dentry *dentry,
62605 + const struct vfsmount *mnt, const int mode)
62606 +{
62607 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
62608 + /* allow chmod +s on directories, but not on files */
62609 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
62610 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
62611 + proc_is_chrooted(current)) {
62612 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
62613 + return -EPERM;
62614 + }
62615 +#endif
62616 + return 0;
62617 +}
62618 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
62619 new file mode 100644
62620 index 0000000..40545bf
62621 --- /dev/null
62622 +++ b/grsecurity/grsec_disabled.c
62623 @@ -0,0 +1,437 @@
62624 +#include <linux/kernel.h>
62625 +#include <linux/module.h>
62626 +#include <linux/sched.h>
62627 +#include <linux/file.h>
62628 +#include <linux/fs.h>
62629 +#include <linux/kdev_t.h>
62630 +#include <linux/net.h>
62631 +#include <linux/in.h>
62632 +#include <linux/ip.h>
62633 +#include <linux/skbuff.h>
62634 +#include <linux/sysctl.h>
62635 +
62636 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62637 +void
62638 +pax_set_initial_flags(struct linux_binprm *bprm)
62639 +{
62640 + return;
62641 +}
62642 +#endif
62643 +
62644 +#ifdef CONFIG_SYSCTL
62645 +__u32
62646 +gr_handle_sysctl(const struct ctl_table * table, const int op)
62647 +{
62648 + return 0;
62649 +}
62650 +#endif
62651 +
62652 +#ifdef CONFIG_TASKSTATS
62653 +int gr_is_taskstats_denied(int pid)
62654 +{
62655 + return 0;
62656 +}
62657 +#endif
62658 +
62659 +int
62660 +gr_acl_is_enabled(void)
62661 +{
62662 + return 0;
62663 +}
62664 +
62665 +void
62666 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
62667 +{
62668 + return;
62669 +}
62670 +
62671 +int
62672 +gr_handle_rawio(const struct inode *inode)
62673 +{
62674 + return 0;
62675 +}
62676 +
62677 +void
62678 +gr_acl_handle_psacct(struct task_struct *task, const long code)
62679 +{
62680 + return;
62681 +}
62682 +
62683 +int
62684 +gr_handle_ptrace(struct task_struct *task, const long request)
62685 +{
62686 + return 0;
62687 +}
62688 +
62689 +int
62690 +gr_handle_proc_ptrace(struct task_struct *task)
62691 +{
62692 + return 0;
62693 +}
62694 +
62695 +void
62696 +gr_learn_resource(const struct task_struct *task,
62697 + const int res, const unsigned long wanted, const int gt)
62698 +{
62699 + return;
62700 +}
62701 +
62702 +int
62703 +gr_set_acls(const int type)
62704 +{
62705 + return 0;
62706 +}
62707 +
62708 +int
62709 +gr_check_hidden_task(const struct task_struct *tsk)
62710 +{
62711 + return 0;
62712 +}
62713 +
62714 +int
62715 +gr_check_protected_task(const struct task_struct *task)
62716 +{
62717 + return 0;
62718 +}
62719 +
62720 +int
62721 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
62722 +{
62723 + return 0;
62724 +}
62725 +
62726 +void
62727 +gr_copy_label(struct task_struct *tsk)
62728 +{
62729 + return;
62730 +}
62731 +
62732 +void
62733 +gr_set_pax_flags(struct task_struct *task)
62734 +{
62735 + return;
62736 +}
62737 +
62738 +int
62739 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
62740 + const int unsafe_share)
62741 +{
62742 + return 0;
62743 +}
62744 +
62745 +void
62746 +gr_handle_delete(const ino_t ino, const dev_t dev)
62747 +{
62748 + return;
62749 +}
62750 +
62751 +void
62752 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
62753 +{
62754 + return;
62755 +}
62756 +
62757 +void
62758 +gr_handle_crash(struct task_struct *task, const int sig)
62759 +{
62760 + return;
62761 +}
62762 +
62763 +int
62764 +gr_check_crash_exec(const struct file *filp)
62765 +{
62766 + return 0;
62767 +}
62768 +
62769 +int
62770 +gr_check_crash_uid(const uid_t uid)
62771 +{
62772 + return 0;
62773 +}
62774 +
62775 +void
62776 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
62777 + struct dentry *old_dentry,
62778 + struct dentry *new_dentry,
62779 + struct vfsmount *mnt, const __u8 replace)
62780 +{
62781 + return;
62782 +}
62783 +
62784 +int
62785 +gr_search_socket(const int family, const int type, const int protocol)
62786 +{
62787 + return 1;
62788 +}
62789 +
62790 +int
62791 +gr_search_connectbind(const int mode, const struct socket *sock,
62792 + const struct sockaddr_in *addr)
62793 +{
62794 + return 0;
62795 +}
62796 +
62797 +void
62798 +gr_handle_alertkill(struct task_struct *task)
62799 +{
62800 + return;
62801 +}
62802 +
62803 +__u32
62804 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
62805 +{
62806 + return 1;
62807 +}
62808 +
62809 +__u32
62810 +gr_acl_handle_hidden_file(const struct dentry * dentry,
62811 + const struct vfsmount * mnt)
62812 +{
62813 + return 1;
62814 +}
62815 +
62816 +__u32
62817 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
62818 + int acc_mode)
62819 +{
62820 + return 1;
62821 +}
62822 +
62823 +__u32
62824 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
62825 +{
62826 + return 1;
62827 +}
62828 +
62829 +__u32
62830 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
62831 +{
62832 + return 1;
62833 +}
62834 +
62835 +int
62836 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
62837 + unsigned int *vm_flags)
62838 +{
62839 + return 1;
62840 +}
62841 +
62842 +__u32
62843 +gr_acl_handle_truncate(const struct dentry * dentry,
62844 + const struct vfsmount * mnt)
62845 +{
62846 + return 1;
62847 +}
62848 +
62849 +__u32
62850 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
62851 +{
62852 + return 1;
62853 +}
62854 +
62855 +__u32
62856 +gr_acl_handle_access(const struct dentry * dentry,
62857 + const struct vfsmount * mnt, const int fmode)
62858 +{
62859 + return 1;
62860 +}
62861 +
62862 +__u32
62863 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
62864 + umode_t *mode)
62865 +{
62866 + return 1;
62867 +}
62868 +
62869 +__u32
62870 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
62871 +{
62872 + return 1;
62873 +}
62874 +
62875 +__u32
62876 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
62877 +{
62878 + return 1;
62879 +}
62880 +
62881 +void
62882 +grsecurity_init(void)
62883 +{
62884 + return;
62885 +}
62886 +
62887 +umode_t gr_acl_umask(void)
62888 +{
62889 + return 0;
62890 +}
62891 +
62892 +__u32
62893 +gr_acl_handle_mknod(const struct dentry * new_dentry,
62894 + const struct dentry * parent_dentry,
62895 + const struct vfsmount * parent_mnt,
62896 + const int mode)
62897 +{
62898 + return 1;
62899 +}
62900 +
62901 +__u32
62902 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
62903 + const struct dentry * parent_dentry,
62904 + const struct vfsmount * parent_mnt)
62905 +{
62906 + return 1;
62907 +}
62908 +
62909 +__u32
62910 +gr_acl_handle_symlink(const struct dentry * new_dentry,
62911 + const struct dentry * parent_dentry,
62912 + const struct vfsmount * parent_mnt, const char *from)
62913 +{
62914 + return 1;
62915 +}
62916 +
62917 +__u32
62918 +gr_acl_handle_link(const struct dentry * new_dentry,
62919 + const struct dentry * parent_dentry,
62920 + const struct vfsmount * parent_mnt,
62921 + const struct dentry * old_dentry,
62922 + const struct vfsmount * old_mnt, const char *to)
62923 +{
62924 + return 1;
62925 +}
62926 +
62927 +int
62928 +gr_acl_handle_rename(const struct dentry *new_dentry,
62929 + const struct dentry *parent_dentry,
62930 + const struct vfsmount *parent_mnt,
62931 + const struct dentry *old_dentry,
62932 + const struct inode *old_parent_inode,
62933 + const struct vfsmount *old_mnt, const char *newname)
62934 +{
62935 + return 0;
62936 +}
62937 +
62938 +int
62939 +gr_acl_handle_filldir(const struct file *file, const char *name,
62940 + const int namelen, const ino_t ino)
62941 +{
62942 + return 1;
62943 +}
62944 +
62945 +int
62946 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62947 + const time_t shm_createtime, const uid_t cuid, const int shmid)
62948 +{
62949 + return 1;
62950 +}
62951 +
62952 +int
62953 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
62954 +{
62955 + return 0;
62956 +}
62957 +
62958 +int
62959 +gr_search_accept(const struct socket *sock)
62960 +{
62961 + return 0;
62962 +}
62963 +
62964 +int
62965 +gr_search_listen(const struct socket *sock)
62966 +{
62967 + return 0;
62968 +}
62969 +
62970 +int
62971 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
62972 +{
62973 + return 0;
62974 +}
62975 +
62976 +__u32
62977 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
62978 +{
62979 + return 1;
62980 +}
62981 +
62982 +__u32
62983 +gr_acl_handle_creat(const struct dentry * dentry,
62984 + const struct dentry * p_dentry,
62985 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
62986 + const int imode)
62987 +{
62988 + return 1;
62989 +}
62990 +
62991 +void
62992 +gr_acl_handle_exit(void)
62993 +{
62994 + return;
62995 +}
62996 +
62997 +int
62998 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
62999 +{
63000 + return 1;
63001 +}
63002 +
63003 +void
63004 +gr_set_role_label(const uid_t uid, const gid_t gid)
63005 +{
63006 + return;
63007 +}
63008 +
63009 +int
63010 +gr_acl_handle_procpidmem(const struct task_struct *task)
63011 +{
63012 + return 0;
63013 +}
63014 +
63015 +int
63016 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
63017 +{
63018 + return 0;
63019 +}
63020 +
63021 +int
63022 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
63023 +{
63024 + return 0;
63025 +}
63026 +
63027 +void
63028 +gr_set_kernel_label(struct task_struct *task)
63029 +{
63030 + return;
63031 +}
63032 +
63033 +int
63034 +gr_check_user_change(int real, int effective, int fs)
63035 +{
63036 + return 0;
63037 +}
63038 +
63039 +int
63040 +gr_check_group_change(int real, int effective, int fs)
63041 +{
63042 + return 0;
63043 +}
63044 +
63045 +int gr_acl_enable_at_secure(void)
63046 +{
63047 + return 0;
63048 +}
63049 +
63050 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
63051 +{
63052 + return dentry->d_inode->i_sb->s_dev;
63053 +}
63054 +
63055 +EXPORT_SYMBOL(gr_learn_resource);
63056 +EXPORT_SYMBOL(gr_set_kernel_label);
63057 +#ifdef CONFIG_SECURITY
63058 +EXPORT_SYMBOL(gr_check_user_change);
63059 +EXPORT_SYMBOL(gr_check_group_change);
63060 +#endif
63061 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
63062 new file mode 100644
63063 index 0000000..a96e155
63064 --- /dev/null
63065 +++ b/grsecurity/grsec_exec.c
63066 @@ -0,0 +1,204 @@
63067 +#include <linux/kernel.h>
63068 +#include <linux/sched.h>
63069 +#include <linux/file.h>
63070 +#include <linux/binfmts.h>
63071 +#include <linux/smp_lock.h>
63072 +#include <linux/fs.h>
63073 +#include <linux/types.h>
63074 +#include <linux/grdefs.h>
63075 +#include <linux/grinternal.h>
63076 +#include <linux/capability.h>
63077 +#include <linux/compat.h>
63078 +#include <linux/module.h>
63079 +
63080 +#include <asm/uaccess.h>
63081 +
63082 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63083 +static char gr_exec_arg_buf[132];
63084 +static DEFINE_MUTEX(gr_exec_arg_mutex);
63085 +#endif
63086 +
63087 +void
63088 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
63089 +{
63090 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63091 + char *grarg = gr_exec_arg_buf;
63092 + unsigned int i, x, execlen = 0;
63093 + char c;
63094 +
63095 + if (!((grsec_enable_execlog && grsec_enable_group &&
63096 + in_group_p(grsec_audit_gid))
63097 + || (grsec_enable_execlog && !grsec_enable_group)))
63098 + return;
63099 +
63100 + mutex_lock(&gr_exec_arg_mutex);
63101 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
63102 +
63103 + if (unlikely(argv == NULL))
63104 + goto log;
63105 +
63106 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
63107 + const char __user *p;
63108 + unsigned int len;
63109 +
63110 + if (copy_from_user(&p, argv + i, sizeof(p)))
63111 + goto log;
63112 + if (!p)
63113 + goto log;
63114 + len = strnlen_user(p, 128 - execlen);
63115 + if (len > 128 - execlen)
63116 + len = 128 - execlen;
63117 + else if (len > 0)
63118 + len--;
63119 + if (copy_from_user(grarg + execlen, p, len))
63120 + goto log;
63121 +
63122 + /* rewrite unprintable characters */
63123 + for (x = 0; x < len; x++) {
63124 + c = *(grarg + execlen + x);
63125 + if (c < 32 || c > 126)
63126 + *(grarg + execlen + x) = ' ';
63127 + }
63128 +
63129 + execlen += len;
63130 + *(grarg + execlen) = ' ';
63131 + *(grarg + execlen + 1) = '\0';
63132 + execlen++;
63133 + }
63134 +
63135 + log:
63136 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
63137 + bprm->file->f_path.mnt, grarg);
63138 + mutex_unlock(&gr_exec_arg_mutex);
63139 +#endif
63140 + return;
63141 +}
63142 +
63143 +#ifdef CONFIG_COMPAT
63144 +void
63145 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
63146 +{
63147 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63148 + char *grarg = gr_exec_arg_buf;
63149 + unsigned int i, x, execlen = 0;
63150 + char c;
63151 +
63152 + if (!((grsec_enable_execlog && grsec_enable_group &&
63153 + in_group_p(grsec_audit_gid))
63154 + || (grsec_enable_execlog && !grsec_enable_group)))
63155 + return;
63156 +
63157 + mutex_lock(&gr_exec_arg_mutex);
63158 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
63159 +
63160 + if (unlikely(argv == NULL))
63161 + goto log;
63162 +
63163 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
63164 + compat_uptr_t p;
63165 + unsigned int len;
63166 +
63167 + if (get_user(p, argv + i))
63168 + goto log;
63169 + len = strnlen_user(compat_ptr(p), 128 - execlen);
63170 + if (len > 128 - execlen)
63171 + len = 128 - execlen;
63172 + else if (len > 0)
63173 + len--;
63174 + else
63175 + goto log;
63176 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
63177 + goto log;
63178 +
63179 + /* rewrite unprintable characters */
63180 + for (x = 0; x < len; x++) {
63181 + c = *(grarg + execlen + x);
63182 + if (c < 32 || c > 126)
63183 + *(grarg + execlen + x) = ' ';
63184 + }
63185 +
63186 + execlen += len;
63187 + *(grarg + execlen) = ' ';
63188 + *(grarg + execlen + 1) = '\0';
63189 + execlen++;
63190 + }
63191 +
63192 + log:
63193 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
63194 + bprm->file->f_path.mnt, grarg);
63195 + mutex_unlock(&gr_exec_arg_mutex);
63196 +#endif
63197 + return;
63198 +}
63199 +#endif
63200 +
63201 +#ifdef CONFIG_GRKERNSEC
63202 +extern int gr_acl_is_capable(const int cap);
63203 +extern int gr_acl_is_capable_nolog(const int cap);
63204 +extern int gr_chroot_is_capable(const int cap);
63205 +extern int gr_chroot_is_capable_nolog(const int cap);
63206 +#endif
63207 +
63208 +const char *captab_log[] = {
63209 + "CAP_CHOWN",
63210 + "CAP_DAC_OVERRIDE",
63211 + "CAP_DAC_READ_SEARCH",
63212 + "CAP_FOWNER",
63213 + "CAP_FSETID",
63214 + "CAP_KILL",
63215 + "CAP_SETGID",
63216 + "CAP_SETUID",
63217 + "CAP_SETPCAP",
63218 + "CAP_LINUX_IMMUTABLE",
63219 + "CAP_NET_BIND_SERVICE",
63220 + "CAP_NET_BROADCAST",
63221 + "CAP_NET_ADMIN",
63222 + "CAP_NET_RAW",
63223 + "CAP_IPC_LOCK",
63224 + "CAP_IPC_OWNER",
63225 + "CAP_SYS_MODULE",
63226 + "CAP_SYS_RAWIO",
63227 + "CAP_SYS_CHROOT",
63228 + "CAP_SYS_PTRACE",
63229 + "CAP_SYS_PACCT",
63230 + "CAP_SYS_ADMIN",
63231 + "CAP_SYS_BOOT",
63232 + "CAP_SYS_NICE",
63233 + "CAP_SYS_RESOURCE",
63234 + "CAP_SYS_TIME",
63235 + "CAP_SYS_TTY_CONFIG",
63236 + "CAP_MKNOD",
63237 + "CAP_LEASE",
63238 + "CAP_AUDIT_WRITE",
63239 + "CAP_AUDIT_CONTROL",
63240 + "CAP_SETFCAP",
63241 + "CAP_MAC_OVERRIDE",
63242 + "CAP_MAC_ADMIN"
63243 +};
63244 +
63245 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
63246 +
63247 +int gr_is_capable(const int cap)
63248 +{
63249 +#ifdef CONFIG_GRKERNSEC
63250 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
63251 + return 1;
63252 + return 0;
63253 +#else
63254 + return 1;
63255 +#endif
63256 +}
63257 +
63258 +int gr_is_capable_nolog(const int cap)
63259 +{
63260 +#ifdef CONFIG_GRKERNSEC
63261 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
63262 + return 1;
63263 + return 0;
63264 +#else
63265 + return 1;
63266 +#endif
63267 +}
63268 +
63269 +EXPORT_SYMBOL(gr_is_capable);
63270 +EXPORT_SYMBOL(gr_is_capable_nolog);
63271 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
63272 new file mode 100644
63273 index 0000000..d3ee748
63274 --- /dev/null
63275 +++ b/grsecurity/grsec_fifo.c
63276 @@ -0,0 +1,24 @@
63277 +#include <linux/kernel.h>
63278 +#include <linux/sched.h>
63279 +#include <linux/fs.h>
63280 +#include <linux/file.h>
63281 +#include <linux/grinternal.h>
63282 +
63283 +int
63284 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
63285 + const struct dentry *dir, const int flag, const int acc_mode)
63286 +{
63287 +#ifdef CONFIG_GRKERNSEC_FIFO
63288 + const struct cred *cred = current_cred();
63289 +
63290 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
63291 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
63292 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
63293 + (cred->fsuid != dentry->d_inode->i_uid)) {
63294 + if (!inode_permission(dentry->d_inode, acc_mode))
63295 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
63296 + return -EACCES;
63297 + }
63298 +#endif
63299 + return 0;
63300 +}
63301 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
63302 new file mode 100644
63303 index 0000000..8ca18bf
63304 --- /dev/null
63305 +++ b/grsecurity/grsec_fork.c
63306 @@ -0,0 +1,23 @@
63307 +#include <linux/kernel.h>
63308 +#include <linux/sched.h>
63309 +#include <linux/grsecurity.h>
63310 +#include <linux/grinternal.h>
63311 +#include <linux/errno.h>
63312 +
63313 +void
63314 +gr_log_forkfail(const int retval)
63315 +{
63316 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
63317 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
63318 + switch (retval) {
63319 + case -EAGAIN:
63320 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
63321 + break;
63322 + case -ENOMEM:
63323 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
63324 + break;
63325 + }
63326 + }
63327 +#endif
63328 + return;
63329 +}
63330 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
63331 new file mode 100644
63332 index 0000000..1e995d3
63333 --- /dev/null
63334 +++ b/grsecurity/grsec_init.c
63335 @@ -0,0 +1,278 @@
63336 +#include <linux/kernel.h>
63337 +#include <linux/sched.h>
63338 +#include <linux/mm.h>
63339 +#include <linux/smp_lock.h>
63340 +#include <linux/gracl.h>
63341 +#include <linux/slab.h>
63342 +#include <linux/vmalloc.h>
63343 +#include <linux/percpu.h>
63344 +#include <linux/module.h>
63345 +
63346 +int grsec_enable_ptrace_readexec;
63347 +int grsec_enable_setxid;
63348 +int grsec_enable_brute;
63349 +int grsec_enable_link;
63350 +int grsec_enable_dmesg;
63351 +int grsec_enable_harden_ptrace;
63352 +int grsec_enable_fifo;
63353 +int grsec_enable_execlog;
63354 +int grsec_enable_signal;
63355 +int grsec_enable_forkfail;
63356 +int grsec_enable_audit_ptrace;
63357 +int grsec_enable_time;
63358 +int grsec_enable_audit_textrel;
63359 +int grsec_enable_group;
63360 +int grsec_audit_gid;
63361 +int grsec_enable_chdir;
63362 +int grsec_enable_mount;
63363 +int grsec_enable_rofs;
63364 +int grsec_enable_chroot_findtask;
63365 +int grsec_enable_chroot_mount;
63366 +int grsec_enable_chroot_shmat;
63367 +int grsec_enable_chroot_fchdir;
63368 +int grsec_enable_chroot_double;
63369 +int grsec_enable_chroot_pivot;
63370 +int grsec_enable_chroot_chdir;
63371 +int grsec_enable_chroot_chmod;
63372 +int grsec_enable_chroot_mknod;
63373 +int grsec_enable_chroot_nice;
63374 +int grsec_enable_chroot_execlog;
63375 +int grsec_enable_chroot_caps;
63376 +int grsec_enable_chroot_sysctl;
63377 +int grsec_enable_chroot_unix;
63378 +int grsec_enable_tpe;
63379 +int grsec_tpe_gid;
63380 +int grsec_enable_blackhole;
63381 +#ifdef CONFIG_IPV6_MODULE
63382 +EXPORT_SYMBOL(grsec_enable_blackhole);
63383 +#endif
63384 +int grsec_lastack_retries;
63385 +int grsec_enable_tpe_all;
63386 +int grsec_enable_tpe_invert;
63387 +int grsec_enable_socket_all;
63388 +int grsec_socket_all_gid;
63389 +int grsec_enable_socket_client;
63390 +int grsec_socket_client_gid;
63391 +int grsec_enable_socket_server;
63392 +int grsec_socket_server_gid;
63393 +int grsec_resource_logging;
63394 +int grsec_disable_privio;
63395 +int grsec_enable_log_rwxmaps;
63396 +int grsec_lock;
63397 +
63398 +DEFINE_SPINLOCK(grsec_alert_lock);
63399 +unsigned long grsec_alert_wtime = 0;
63400 +unsigned long grsec_alert_fyet = 0;
63401 +
63402 +DEFINE_SPINLOCK(grsec_audit_lock);
63403 +
63404 +DEFINE_RWLOCK(grsec_exec_file_lock);
63405 +
63406 +char *gr_shared_page[4];
63407 +
63408 +char *gr_alert_log_fmt;
63409 +char *gr_audit_log_fmt;
63410 +char *gr_alert_log_buf;
63411 +char *gr_audit_log_buf;
63412 +
63413 +extern struct gr_arg *gr_usermode;
63414 +extern unsigned char *gr_system_salt;
63415 +extern unsigned char *gr_system_sum;
63416 +
63417 +void __init
63418 +grsecurity_init(void)
63419 +{
63420 + int j;
63421 + /* create the per-cpu shared pages */
63422 +
63423 +#ifdef CONFIG_X86
63424 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
63425 +#endif
63426 +
63427 + for (j = 0; j < 4; j++) {
63428 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
63429 + if (gr_shared_page[j] == NULL) {
63430 + panic("Unable to allocate grsecurity shared page");
63431 + return;
63432 + }
63433 + }
63434 +
63435 + /* allocate log buffers */
63436 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
63437 + if (!gr_alert_log_fmt) {
63438 + panic("Unable to allocate grsecurity alert log format buffer");
63439 + return;
63440 + }
63441 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
63442 + if (!gr_audit_log_fmt) {
63443 + panic("Unable to allocate grsecurity audit log format buffer");
63444 + return;
63445 + }
63446 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63447 + if (!gr_alert_log_buf) {
63448 + panic("Unable to allocate grsecurity alert log buffer");
63449 + return;
63450 + }
63451 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63452 + if (!gr_audit_log_buf) {
63453 + panic("Unable to allocate grsecurity audit log buffer");
63454 + return;
63455 + }
63456 +
63457 + /* allocate memory for authentication structure */
63458 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
63459 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
63460 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
63461 +
63462 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
63463 + panic("Unable to allocate grsecurity authentication structure");
63464 + return;
63465 + }
63466 +
63467 +
63468 +#ifdef CONFIG_GRKERNSEC_IO
63469 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
63470 + grsec_disable_privio = 1;
63471 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63472 + grsec_disable_privio = 1;
63473 +#else
63474 + grsec_disable_privio = 0;
63475 +#endif
63476 +#endif
63477 +
63478 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
63479 + /* for backward compatibility, tpe_invert always defaults to on if
63480 + enabled in the kernel
63481 + */
63482 + grsec_enable_tpe_invert = 1;
63483 +#endif
63484 +
63485 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63486 +#ifndef CONFIG_GRKERNSEC_SYSCTL
63487 + grsec_lock = 1;
63488 +#endif
63489 +
63490 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63491 + grsec_enable_audit_textrel = 1;
63492 +#endif
63493 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63494 + grsec_enable_log_rwxmaps = 1;
63495 +#endif
63496 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
63497 + grsec_enable_group = 1;
63498 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
63499 +#endif
63500 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
63501 + grsec_enable_chdir = 1;
63502 +#endif
63503 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
63504 + grsec_enable_harden_ptrace = 1;
63505 +#endif
63506 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63507 + grsec_enable_mount = 1;
63508 +#endif
63509 +#ifdef CONFIG_GRKERNSEC_LINK
63510 + grsec_enable_link = 1;
63511 +#endif
63512 +#ifdef CONFIG_GRKERNSEC_BRUTE
63513 + grsec_enable_brute = 1;
63514 +#endif
63515 +#ifdef CONFIG_GRKERNSEC_DMESG
63516 + grsec_enable_dmesg = 1;
63517 +#endif
63518 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
63519 + grsec_enable_blackhole = 1;
63520 + grsec_lastack_retries = 4;
63521 +#endif
63522 +#ifdef CONFIG_GRKERNSEC_FIFO
63523 + grsec_enable_fifo = 1;
63524 +#endif
63525 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63526 + grsec_enable_execlog = 1;
63527 +#endif
63528 +#ifdef CONFIG_GRKERNSEC_SETXID
63529 + grsec_enable_setxid = 1;
63530 +#endif
63531 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
63532 + grsec_enable_ptrace_readexec = 1;
63533 +#endif
63534 +#ifdef CONFIG_GRKERNSEC_SIGNAL
63535 + grsec_enable_signal = 1;
63536 +#endif
63537 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
63538 + grsec_enable_forkfail = 1;
63539 +#endif
63540 +#ifdef CONFIG_GRKERNSEC_TIME
63541 + grsec_enable_time = 1;
63542 +#endif
63543 +#ifdef CONFIG_GRKERNSEC_RESLOG
63544 + grsec_resource_logging = 1;
63545 +#endif
63546 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63547 + grsec_enable_chroot_findtask = 1;
63548 +#endif
63549 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63550 + grsec_enable_chroot_unix = 1;
63551 +#endif
63552 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63553 + grsec_enable_chroot_mount = 1;
63554 +#endif
63555 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63556 + grsec_enable_chroot_fchdir = 1;
63557 +#endif
63558 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63559 + grsec_enable_chroot_shmat = 1;
63560 +#endif
63561 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63562 + grsec_enable_audit_ptrace = 1;
63563 +#endif
63564 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63565 + grsec_enable_chroot_double = 1;
63566 +#endif
63567 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63568 + grsec_enable_chroot_pivot = 1;
63569 +#endif
63570 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63571 + grsec_enable_chroot_chdir = 1;
63572 +#endif
63573 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63574 + grsec_enable_chroot_chmod = 1;
63575 +#endif
63576 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63577 + grsec_enable_chroot_mknod = 1;
63578 +#endif
63579 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63580 + grsec_enable_chroot_nice = 1;
63581 +#endif
63582 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63583 + grsec_enable_chroot_execlog = 1;
63584 +#endif
63585 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63586 + grsec_enable_chroot_caps = 1;
63587 +#endif
63588 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63589 + grsec_enable_chroot_sysctl = 1;
63590 +#endif
63591 +#ifdef CONFIG_GRKERNSEC_TPE
63592 + grsec_enable_tpe = 1;
63593 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
63594 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
63595 + grsec_enable_tpe_all = 1;
63596 +#endif
63597 +#endif
63598 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63599 + grsec_enable_socket_all = 1;
63600 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
63601 +#endif
63602 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63603 + grsec_enable_socket_client = 1;
63604 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
63605 +#endif
63606 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63607 + grsec_enable_socket_server = 1;
63608 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
63609 +#endif
63610 +#endif
63611 +
63612 + return;
63613 +}
63614 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
63615 new file mode 100644
63616 index 0000000..3efe141
63617 --- /dev/null
63618 +++ b/grsecurity/grsec_link.c
63619 @@ -0,0 +1,43 @@
63620 +#include <linux/kernel.h>
63621 +#include <linux/sched.h>
63622 +#include <linux/fs.h>
63623 +#include <linux/file.h>
63624 +#include <linux/grinternal.h>
63625 +
63626 +int
63627 +gr_handle_follow_link(const struct inode *parent,
63628 + const struct inode *inode,
63629 + const struct dentry *dentry, const struct vfsmount *mnt)
63630 +{
63631 +#ifdef CONFIG_GRKERNSEC_LINK
63632 + const struct cred *cred = current_cred();
63633 +
63634 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
63635 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
63636 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
63637 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
63638 + return -EACCES;
63639 + }
63640 +#endif
63641 + return 0;
63642 +}
63643 +
63644 +int
63645 +gr_handle_hardlink(const struct dentry *dentry,
63646 + const struct vfsmount *mnt,
63647 + struct inode *inode, const int mode, const char *to)
63648 +{
63649 +#ifdef CONFIG_GRKERNSEC_LINK
63650 + const struct cred *cred = current_cred();
63651 +
63652 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
63653 + (!S_ISREG(mode) || (mode & S_ISUID) ||
63654 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
63655 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
63656 + !capable(CAP_FOWNER) && cred->uid) {
63657 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
63658 + return -EPERM;
63659 + }
63660 +#endif
63661 + return 0;
63662 +}
63663 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
63664 new file mode 100644
63665 index 0000000..a45d2e9
63666 --- /dev/null
63667 +++ b/grsecurity/grsec_log.c
63668 @@ -0,0 +1,322 @@
63669 +#include <linux/kernel.h>
63670 +#include <linux/sched.h>
63671 +#include <linux/file.h>
63672 +#include <linux/tty.h>
63673 +#include <linux/fs.h>
63674 +#include <linux/grinternal.h>
63675 +
63676 +#ifdef CONFIG_TREE_PREEMPT_RCU
63677 +#define DISABLE_PREEMPT() preempt_disable()
63678 +#define ENABLE_PREEMPT() preempt_enable()
63679 +#else
63680 +#define DISABLE_PREEMPT()
63681 +#define ENABLE_PREEMPT()
63682 +#endif
63683 +
63684 +#define BEGIN_LOCKS(x) \
63685 + DISABLE_PREEMPT(); \
63686 + rcu_read_lock(); \
63687 + read_lock(&tasklist_lock); \
63688 + read_lock(&grsec_exec_file_lock); \
63689 + if (x != GR_DO_AUDIT) \
63690 + spin_lock(&grsec_alert_lock); \
63691 + else \
63692 + spin_lock(&grsec_audit_lock)
63693 +
63694 +#define END_LOCKS(x) \
63695 + if (x != GR_DO_AUDIT) \
63696 + spin_unlock(&grsec_alert_lock); \
63697 + else \
63698 + spin_unlock(&grsec_audit_lock); \
63699 + read_unlock(&grsec_exec_file_lock); \
63700 + read_unlock(&tasklist_lock); \
63701 + rcu_read_unlock(); \
63702 + ENABLE_PREEMPT(); \
63703 + if (x == GR_DONT_AUDIT) \
63704 + gr_handle_alertkill(current)
63705 +
63706 +enum {
63707 + FLOODING,
63708 + NO_FLOODING
63709 +};
63710 +
63711 +extern char *gr_alert_log_fmt;
63712 +extern char *gr_audit_log_fmt;
63713 +extern char *gr_alert_log_buf;
63714 +extern char *gr_audit_log_buf;
63715 +
63716 +static int gr_log_start(int audit)
63717 +{
63718 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
63719 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
63720 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63721 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
63722 + unsigned long curr_secs = get_seconds();
63723 +
63724 + if (audit == GR_DO_AUDIT)
63725 + goto set_fmt;
63726 +
63727 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
63728 + grsec_alert_wtime = curr_secs;
63729 + grsec_alert_fyet = 0;
63730 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
63731 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
63732 + grsec_alert_fyet++;
63733 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
63734 + grsec_alert_wtime = curr_secs;
63735 + grsec_alert_fyet++;
63736 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
63737 + return FLOODING;
63738 + }
63739 + else return FLOODING;
63740 +
63741 +set_fmt:
63742 +#endif
63743 + memset(buf, 0, PAGE_SIZE);
63744 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
63745 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
63746 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63747 + } else if (current->signal->curr_ip) {
63748 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
63749 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
63750 + } else if (gr_acl_is_enabled()) {
63751 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
63752 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63753 + } else {
63754 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
63755 + strcpy(buf, fmt);
63756 + }
63757 +
63758 + return NO_FLOODING;
63759 +}
63760 +
63761 +static void gr_log_middle(int audit, const char *msg, va_list ap)
63762 + __attribute__ ((format (printf, 2, 0)));
63763 +
63764 +static void gr_log_middle(int audit, const char *msg, va_list ap)
63765 +{
63766 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63767 + unsigned int len = strlen(buf);
63768 +
63769 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63770 +
63771 + return;
63772 +}
63773 +
63774 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
63775 + __attribute__ ((format (printf, 2, 3)));
63776 +
63777 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
63778 +{
63779 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63780 + unsigned int len = strlen(buf);
63781 + va_list ap;
63782 +
63783 + va_start(ap, msg);
63784 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63785 + va_end(ap);
63786 +
63787 + return;
63788 +}
63789 +
63790 +static void gr_log_end(int audit, int append_default)
63791 +{
63792 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63793 +
63794 + if (append_default) {
63795 + unsigned int len = strlen(buf);
63796 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
63797 + }
63798 +
63799 + printk("%s\n", buf);
63800 +
63801 + return;
63802 +}
63803 +
63804 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
63805 +{
63806 + int logtype;
63807 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
63808 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
63809 + void *voidptr = NULL;
63810 + int num1 = 0, num2 = 0;
63811 + unsigned long ulong1 = 0, ulong2 = 0;
63812 + struct dentry *dentry = NULL;
63813 + struct vfsmount *mnt = NULL;
63814 + struct file *file = NULL;
63815 + struct task_struct *task = NULL;
63816 + const struct cred *cred, *pcred;
63817 + va_list ap;
63818 +
63819 + BEGIN_LOCKS(audit);
63820 + logtype = gr_log_start(audit);
63821 + if (logtype == FLOODING) {
63822 + END_LOCKS(audit);
63823 + return;
63824 + }
63825 + va_start(ap, argtypes);
63826 + switch (argtypes) {
63827 + case GR_TTYSNIFF:
63828 + task = va_arg(ap, struct task_struct *);
63829 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
63830 + break;
63831 + case GR_SYSCTL_HIDDEN:
63832 + str1 = va_arg(ap, char *);
63833 + gr_log_middle_varargs(audit, msg, result, str1);
63834 + break;
63835 + case GR_RBAC:
63836 + dentry = va_arg(ap, struct dentry *);
63837 + mnt = va_arg(ap, struct vfsmount *);
63838 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
63839 + break;
63840 + case GR_RBAC_STR:
63841 + dentry = va_arg(ap, struct dentry *);
63842 + mnt = va_arg(ap, struct vfsmount *);
63843 + str1 = va_arg(ap, char *);
63844 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
63845 + break;
63846 + case GR_STR_RBAC:
63847 + str1 = va_arg(ap, char *);
63848 + dentry = va_arg(ap, struct dentry *);
63849 + mnt = va_arg(ap, struct vfsmount *);
63850 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
63851 + break;
63852 + case GR_RBAC_MODE2:
63853 + dentry = va_arg(ap, struct dentry *);
63854 + mnt = va_arg(ap, struct vfsmount *);
63855 + str1 = va_arg(ap, char *);
63856 + str2 = va_arg(ap, char *);
63857 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
63858 + break;
63859 + case GR_RBAC_MODE3:
63860 + dentry = va_arg(ap, struct dentry *);
63861 + mnt = va_arg(ap, struct vfsmount *);
63862 + str1 = va_arg(ap, char *);
63863 + str2 = va_arg(ap, char *);
63864 + str3 = va_arg(ap, char *);
63865 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
63866 + break;
63867 + case GR_FILENAME:
63868 + dentry = va_arg(ap, struct dentry *);
63869 + mnt = va_arg(ap, struct vfsmount *);
63870 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
63871 + break;
63872 + case GR_STR_FILENAME:
63873 + str1 = va_arg(ap, char *);
63874 + dentry = va_arg(ap, struct dentry *);
63875 + mnt = va_arg(ap, struct vfsmount *);
63876 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
63877 + break;
63878 + case GR_FILENAME_STR:
63879 + dentry = va_arg(ap, struct dentry *);
63880 + mnt = va_arg(ap, struct vfsmount *);
63881 + str1 = va_arg(ap, char *);
63882 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
63883 + break;
63884 + case GR_FILENAME_TWO_INT:
63885 + dentry = va_arg(ap, struct dentry *);
63886 + mnt = va_arg(ap, struct vfsmount *);
63887 + num1 = va_arg(ap, int);
63888 + num2 = va_arg(ap, int);
63889 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
63890 + break;
63891 + case GR_FILENAME_TWO_INT_STR:
63892 + dentry = va_arg(ap, struct dentry *);
63893 + mnt = va_arg(ap, struct vfsmount *);
63894 + num1 = va_arg(ap, int);
63895 + num2 = va_arg(ap, int);
63896 + str1 = va_arg(ap, char *);
63897 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
63898 + break;
63899 + case GR_TEXTREL:
63900 + file = va_arg(ap, struct file *);
63901 + ulong1 = va_arg(ap, unsigned long);
63902 + ulong2 = va_arg(ap, unsigned long);
63903 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
63904 + break;
63905 + case GR_PTRACE:
63906 + task = va_arg(ap, struct task_struct *);
63907 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
63908 + break;
63909 + case GR_RESOURCE:
63910 + task = va_arg(ap, struct task_struct *);
63911 + cred = __task_cred(task);
63912 + pcred = __task_cred(task->real_parent);
63913 + ulong1 = va_arg(ap, unsigned long);
63914 + str1 = va_arg(ap, char *);
63915 + ulong2 = va_arg(ap, unsigned long);
63916 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63917 + break;
63918 + case GR_CAP:
63919 + task = va_arg(ap, struct task_struct *);
63920 + cred = __task_cred(task);
63921 + pcred = __task_cred(task->real_parent);
63922 + str1 = va_arg(ap, char *);
63923 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63924 + break;
63925 + case GR_SIG:
63926 + str1 = va_arg(ap, char *);
63927 + voidptr = va_arg(ap, void *);
63928 + gr_log_middle_varargs(audit, msg, str1, voidptr);
63929 + break;
63930 + case GR_SIG2:
63931 + task = va_arg(ap, struct task_struct *);
63932 + cred = __task_cred(task);
63933 + pcred = __task_cred(task->real_parent);
63934 + num1 = va_arg(ap, int);
63935 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63936 + break;
63937 + case GR_CRASH1:
63938 + task = va_arg(ap, struct task_struct *);
63939 + cred = __task_cred(task);
63940 + pcred = __task_cred(task->real_parent);
63941 + ulong1 = va_arg(ap, unsigned long);
63942 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
63943 + break;
63944 + case GR_CRASH2:
63945 + task = va_arg(ap, struct task_struct *);
63946 + cred = __task_cred(task);
63947 + pcred = __task_cred(task->real_parent);
63948 + ulong1 = va_arg(ap, unsigned long);
63949 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
63950 + break;
63951 + case GR_RWXMAP:
63952 + file = va_arg(ap, struct file *);
63953 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
63954 + break;
63955 + case GR_PSACCT:
63956 + {
63957 + unsigned int wday, cday;
63958 + __u8 whr, chr;
63959 + __u8 wmin, cmin;
63960 + __u8 wsec, csec;
63961 + char cur_tty[64] = { 0 };
63962 + char parent_tty[64] = { 0 };
63963 +
63964 + task = va_arg(ap, struct task_struct *);
63965 + wday = va_arg(ap, unsigned int);
63966 + cday = va_arg(ap, unsigned int);
63967 + whr = va_arg(ap, int);
63968 + chr = va_arg(ap, int);
63969 + wmin = va_arg(ap, int);
63970 + cmin = va_arg(ap, int);
63971 + wsec = va_arg(ap, int);
63972 + csec = va_arg(ap, int);
63973 + ulong1 = va_arg(ap, unsigned long);
63974 + cred = __task_cred(task);
63975 + pcred = __task_cred(task->real_parent);
63976 +
63977 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63978 + }
63979 + break;
63980 + default:
63981 + gr_log_middle(audit, msg, ap);
63982 + }
63983 + va_end(ap);
63984 + // these don't need DEFAULTSECARGS printed on the end
63985 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
63986 + gr_log_end(audit, 0);
63987 + else
63988 + gr_log_end(audit, 1);
63989 + END_LOCKS(audit);
63990 +}
63991 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
63992 new file mode 100644
63993 index 0000000..f536303
63994 --- /dev/null
63995 +++ b/grsecurity/grsec_mem.c
63996 @@ -0,0 +1,40 @@
63997 +#include <linux/kernel.h>
63998 +#include <linux/sched.h>
63999 +#include <linux/mm.h>
64000 +#include <linux/mman.h>
64001 +#include <linux/grinternal.h>
64002 +
64003 +void
64004 +gr_handle_ioperm(void)
64005 +{
64006 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
64007 + return;
64008 +}
64009 +
64010 +void
64011 +gr_handle_iopl(void)
64012 +{
64013 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
64014 + return;
64015 +}
64016 +
64017 +void
64018 +gr_handle_mem_readwrite(u64 from, u64 to)
64019 +{
64020 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
64021 + return;
64022 +}
64023 +
64024 +void
64025 +gr_handle_vm86(void)
64026 +{
64027 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
64028 + return;
64029 +}
64030 +
64031 +void
64032 +gr_log_badprocpid(const char *entry)
64033 +{
64034 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
64035 + return;
64036 +}
64037 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
64038 new file mode 100644
64039 index 0000000..2131422
64040 --- /dev/null
64041 +++ b/grsecurity/grsec_mount.c
64042 @@ -0,0 +1,62 @@
64043 +#include <linux/kernel.h>
64044 +#include <linux/sched.h>
64045 +#include <linux/mount.h>
64046 +#include <linux/grsecurity.h>
64047 +#include <linux/grinternal.h>
64048 +
64049 +void
64050 +gr_log_remount(const char *devname, const int retval)
64051 +{
64052 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64053 + if (grsec_enable_mount && (retval >= 0))
64054 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
64055 +#endif
64056 + return;
64057 +}
64058 +
64059 +void
64060 +gr_log_unmount(const char *devname, const int retval)
64061 +{
64062 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64063 + if (grsec_enable_mount && (retval >= 0))
64064 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
64065 +#endif
64066 + return;
64067 +}
64068 +
64069 +void
64070 +gr_log_mount(const char *from, const char *to, const int retval)
64071 +{
64072 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64073 + if (grsec_enable_mount && (retval >= 0))
64074 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
64075 +#endif
64076 + return;
64077 +}
64078 +
64079 +int
64080 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
64081 +{
64082 +#ifdef CONFIG_GRKERNSEC_ROFS
64083 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
64084 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
64085 + return -EPERM;
64086 + } else
64087 + return 0;
64088 +#endif
64089 + return 0;
64090 +}
64091 +
64092 +int
64093 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
64094 +{
64095 +#ifdef CONFIG_GRKERNSEC_ROFS
64096 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
64097 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
64098 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
64099 + return -EPERM;
64100 + } else
64101 + return 0;
64102 +#endif
64103 + return 0;
64104 +}
64105 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
64106 new file mode 100644
64107 index 0000000..a3b12a0
64108 --- /dev/null
64109 +++ b/grsecurity/grsec_pax.c
64110 @@ -0,0 +1,36 @@
64111 +#include <linux/kernel.h>
64112 +#include <linux/sched.h>
64113 +#include <linux/mm.h>
64114 +#include <linux/file.h>
64115 +#include <linux/grinternal.h>
64116 +#include <linux/grsecurity.h>
64117 +
64118 +void
64119 +gr_log_textrel(struct vm_area_struct * vma)
64120 +{
64121 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64122 + if (grsec_enable_audit_textrel)
64123 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
64124 +#endif
64125 + return;
64126 +}
64127 +
64128 +void
64129 +gr_log_rwxmmap(struct file *file)
64130 +{
64131 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64132 + if (grsec_enable_log_rwxmaps)
64133 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
64134 +#endif
64135 + return;
64136 +}
64137 +
64138 +void
64139 +gr_log_rwxmprotect(struct file *file)
64140 +{
64141 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64142 + if (grsec_enable_log_rwxmaps)
64143 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
64144 +#endif
64145 + return;
64146 +}
64147 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
64148 new file mode 100644
64149 index 0000000..78f8733
64150 --- /dev/null
64151 +++ b/grsecurity/grsec_ptrace.c
64152 @@ -0,0 +1,30 @@
64153 +#include <linux/kernel.h>
64154 +#include <linux/sched.h>
64155 +#include <linux/grinternal.h>
64156 +#include <linux/security.h>
64157 +
64158 +void
64159 +gr_audit_ptrace(struct task_struct *task)
64160 +{
64161 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64162 + if (grsec_enable_audit_ptrace)
64163 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
64164 +#endif
64165 + return;
64166 +}
64167 +
64168 +int
64169 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
64170 +{
64171 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64172 + const struct dentry *dentry = file->f_path.dentry;
64173 + const struct vfsmount *mnt = file->f_path.mnt;
64174 +
64175 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
64176 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
64177 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
64178 + return -EACCES;
64179 + }
64180 +#endif
64181 + return 0;
64182 +}
64183 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
64184 new file mode 100644
64185 index 0000000..c648492
64186 --- /dev/null
64187 +++ b/grsecurity/grsec_sig.c
64188 @@ -0,0 +1,206 @@
64189 +#include <linux/kernel.h>
64190 +#include <linux/sched.h>
64191 +#include <linux/delay.h>
64192 +#include <linux/grsecurity.h>
64193 +#include <linux/grinternal.h>
64194 +#include <linux/hardirq.h>
64195 +
64196 +char *signames[] = {
64197 + [SIGSEGV] = "Segmentation fault",
64198 + [SIGILL] = "Illegal instruction",
64199 + [SIGABRT] = "Abort",
64200 + [SIGBUS] = "Invalid alignment/Bus error"
64201 +};
64202 +
64203 +void
64204 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
64205 +{
64206 +#ifdef CONFIG_GRKERNSEC_SIGNAL
64207 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
64208 + (sig == SIGABRT) || (sig == SIGBUS))) {
64209 + if (t->pid == current->pid) {
64210 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
64211 + } else {
64212 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
64213 + }
64214 + }
64215 +#endif
64216 + return;
64217 +}
64218 +
64219 +int
64220 +gr_handle_signal(const struct task_struct *p, const int sig)
64221 +{
64222 +#ifdef CONFIG_GRKERNSEC
64223 + /* ignore the 0 signal for protected task checks */
64224 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
64225 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
64226 + return -EPERM;
64227 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
64228 + return -EPERM;
64229 + }
64230 +#endif
64231 + return 0;
64232 +}
64233 +
64234 +#ifdef CONFIG_GRKERNSEC
64235 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
64236 +
64237 +int gr_fake_force_sig(int sig, struct task_struct *t)
64238 +{
64239 + unsigned long int flags;
64240 + int ret, blocked, ignored;
64241 + struct k_sigaction *action;
64242 +
64243 + spin_lock_irqsave(&t->sighand->siglock, flags);
64244 + action = &t->sighand->action[sig-1];
64245 + ignored = action->sa.sa_handler == SIG_IGN;
64246 + blocked = sigismember(&t->blocked, sig);
64247 + if (blocked || ignored) {
64248 + action->sa.sa_handler = SIG_DFL;
64249 + if (blocked) {
64250 + sigdelset(&t->blocked, sig);
64251 + recalc_sigpending_and_wake(t);
64252 + }
64253 + }
64254 + if (action->sa.sa_handler == SIG_DFL)
64255 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
64256 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
64257 +
64258 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
64259 +
64260 + return ret;
64261 +}
64262 +#endif
64263 +
64264 +#ifdef CONFIG_GRKERNSEC_BRUTE
64265 +#define GR_USER_BAN_TIME (15 * 60)
64266 +
64267 +static int __get_dumpable(unsigned long mm_flags)
64268 +{
64269 + int ret;
64270 +
64271 + ret = mm_flags & MMF_DUMPABLE_MASK;
64272 + return (ret >= 2) ? 2 : ret;
64273 +}
64274 +#endif
64275 +
64276 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
64277 +{
64278 +#ifdef CONFIG_GRKERNSEC_BRUTE
64279 + uid_t uid = 0;
64280 +
64281 + if (!grsec_enable_brute)
64282 + return;
64283 +
64284 + rcu_read_lock();
64285 + read_lock(&tasklist_lock);
64286 + read_lock(&grsec_exec_file_lock);
64287 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
64288 + p->real_parent->brute = 1;
64289 + else {
64290 + const struct cred *cred = __task_cred(p), *cred2;
64291 + struct task_struct *tsk, *tsk2;
64292 +
64293 + if (!__get_dumpable(mm_flags) && cred->uid) {
64294 + struct user_struct *user;
64295 +
64296 + uid = cred->uid;
64297 +
64298 + /* this is put upon execution past expiration */
64299 + user = find_user(uid);
64300 + if (user == NULL)
64301 + goto unlock;
64302 + user->banned = 1;
64303 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
64304 + if (user->ban_expires == ~0UL)
64305 + user->ban_expires--;
64306 +
64307 + do_each_thread(tsk2, tsk) {
64308 + cred2 = __task_cred(tsk);
64309 + if (tsk != p && cred2->uid == uid)
64310 + gr_fake_force_sig(SIGKILL, tsk);
64311 + } while_each_thread(tsk2, tsk);
64312 + }
64313 + }
64314 +unlock:
64315 + read_unlock(&grsec_exec_file_lock);
64316 + read_unlock(&tasklist_lock);
64317 + rcu_read_unlock();
64318 +
64319 + if (uid)
64320 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
64321 +#endif
64322 + return;
64323 +}
64324 +
64325 +void gr_handle_brute_check(void)
64326 +{
64327 +#ifdef CONFIG_GRKERNSEC_BRUTE
64328 + if (current->brute)
64329 + msleep(30 * 1000);
64330 +#endif
64331 + return;
64332 +}
64333 +
64334 +void gr_handle_kernel_exploit(void)
64335 +{
64336 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
64337 + const struct cred *cred;
64338 + struct task_struct *tsk, *tsk2;
64339 + struct user_struct *user;
64340 + uid_t uid;
64341 +
64342 + if (in_irq() || in_serving_softirq() || in_nmi())
64343 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
64344 +
64345 + uid = current_uid();
64346 +
64347 + if (uid == 0)
64348 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
64349 + else {
64350 + /* kill all the processes of this user, hold a reference
64351 + to their creds struct, and prevent them from creating
64352 + another process until system reset
64353 + */
64354 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
64355 + /* we intentionally leak this ref */
64356 + user = get_uid(current->cred->user);
64357 + if (user) {
64358 + user->banned = 1;
64359 + user->ban_expires = ~0UL;
64360 + }
64361 +
64362 + read_lock(&tasklist_lock);
64363 + do_each_thread(tsk2, tsk) {
64364 + cred = __task_cred(tsk);
64365 + if (cred->uid == uid)
64366 + gr_fake_force_sig(SIGKILL, tsk);
64367 + } while_each_thread(tsk2, tsk);
64368 + read_unlock(&tasklist_lock);
64369 + }
64370 +#endif
64371 +}
64372 +
64373 +int __gr_process_user_ban(struct user_struct *user)
64374 +{
64375 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64376 + if (unlikely(user->banned)) {
64377 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
64378 + user->banned = 0;
64379 + user->ban_expires = 0;
64380 + free_uid(user);
64381 + } else
64382 + return -EPERM;
64383 + }
64384 +#endif
64385 + return 0;
64386 +}
64387 +
64388 +int gr_process_user_ban(void)
64389 +{
64390 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64391 + return __gr_process_user_ban(current->cred->user);
64392 +#endif
64393 + return 0;
64394 +}
64395 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
64396 new file mode 100644
64397 index 0000000..7512ea9
64398 --- /dev/null
64399 +++ b/grsecurity/grsec_sock.c
64400 @@ -0,0 +1,275 @@
64401 +#include <linux/kernel.h>
64402 +#include <linux/module.h>
64403 +#include <linux/sched.h>
64404 +#include <linux/file.h>
64405 +#include <linux/net.h>
64406 +#include <linux/in.h>
64407 +#include <linux/ip.h>
64408 +#include <net/sock.h>
64409 +#include <net/inet_sock.h>
64410 +#include <linux/grsecurity.h>
64411 +#include <linux/grinternal.h>
64412 +#include <linux/gracl.h>
64413 +
64414 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
64415 +EXPORT_SYMBOL(gr_cap_rtnetlink);
64416 +
64417 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
64418 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
64419 +
64420 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
64421 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
64422 +
64423 +#ifdef CONFIG_UNIX_MODULE
64424 +EXPORT_SYMBOL(gr_acl_handle_unix);
64425 +EXPORT_SYMBOL(gr_acl_handle_mknod);
64426 +EXPORT_SYMBOL(gr_handle_chroot_unix);
64427 +EXPORT_SYMBOL(gr_handle_create);
64428 +#endif
64429 +
64430 +#ifdef CONFIG_GRKERNSEC
64431 +#define gr_conn_table_size 32749
64432 +struct conn_table_entry {
64433 + struct conn_table_entry *next;
64434 + struct signal_struct *sig;
64435 +};
64436 +
64437 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
64438 +DEFINE_SPINLOCK(gr_conn_table_lock);
64439 +
64440 +extern const char * gr_socktype_to_name(unsigned char type);
64441 +extern const char * gr_proto_to_name(unsigned char proto);
64442 +extern const char * gr_sockfamily_to_name(unsigned char family);
64443 +
64444 +static __inline__ int
64445 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
64446 +{
64447 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
64448 +}
64449 +
64450 +static __inline__ int
64451 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
64452 + __u16 sport, __u16 dport)
64453 +{
64454 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
64455 + sig->gr_sport == sport && sig->gr_dport == dport))
64456 + return 1;
64457 + else
64458 + return 0;
64459 +}
64460 +
64461 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
64462 +{
64463 + struct conn_table_entry **match;
64464 + unsigned int index;
64465 +
64466 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64467 + sig->gr_sport, sig->gr_dport,
64468 + gr_conn_table_size);
64469 +
64470 + newent->sig = sig;
64471 +
64472 + match = &gr_conn_table[index];
64473 + newent->next = *match;
64474 + *match = newent;
64475 +
64476 + return;
64477 +}
64478 +
64479 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
64480 +{
64481 + struct conn_table_entry *match, *last = NULL;
64482 + unsigned int index;
64483 +
64484 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64485 + sig->gr_sport, sig->gr_dport,
64486 + gr_conn_table_size);
64487 +
64488 + match = gr_conn_table[index];
64489 + while (match && !conn_match(match->sig,
64490 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
64491 + sig->gr_dport)) {
64492 + last = match;
64493 + match = match->next;
64494 + }
64495 +
64496 + if (match) {
64497 + if (last)
64498 + last->next = match->next;
64499 + else
64500 + gr_conn_table[index] = NULL;
64501 + kfree(match);
64502 + }
64503 +
64504 + return;
64505 +}
64506 +
64507 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
64508 + __u16 sport, __u16 dport)
64509 +{
64510 + struct conn_table_entry *match;
64511 + unsigned int index;
64512 +
64513 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
64514 +
64515 + match = gr_conn_table[index];
64516 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
64517 + match = match->next;
64518 +
64519 + if (match)
64520 + return match->sig;
64521 + else
64522 + return NULL;
64523 +}
64524 +
64525 +#endif
64526 +
64527 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
64528 +{
64529 +#ifdef CONFIG_GRKERNSEC
64530 + struct signal_struct *sig = task->signal;
64531 + struct conn_table_entry *newent;
64532 +
64533 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
64534 + if (newent == NULL)
64535 + return;
64536 + /* no bh lock needed since we are called with bh disabled */
64537 + spin_lock(&gr_conn_table_lock);
64538 + gr_del_task_from_ip_table_nolock(sig);
64539 + sig->gr_saddr = inet->rcv_saddr;
64540 + sig->gr_daddr = inet->daddr;
64541 + sig->gr_sport = inet->sport;
64542 + sig->gr_dport = inet->dport;
64543 + gr_add_to_task_ip_table_nolock(sig, newent);
64544 + spin_unlock(&gr_conn_table_lock);
64545 +#endif
64546 + return;
64547 +}
64548 +
64549 +void gr_del_task_from_ip_table(struct task_struct *task)
64550 +{
64551 +#ifdef CONFIG_GRKERNSEC
64552 + spin_lock_bh(&gr_conn_table_lock);
64553 + gr_del_task_from_ip_table_nolock(task->signal);
64554 + spin_unlock_bh(&gr_conn_table_lock);
64555 +#endif
64556 + return;
64557 +}
64558 +
64559 +void
64560 +gr_attach_curr_ip(const struct sock *sk)
64561 +{
64562 +#ifdef CONFIG_GRKERNSEC
64563 + struct signal_struct *p, *set;
64564 + const struct inet_sock *inet = inet_sk(sk);
64565 +
64566 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
64567 + return;
64568 +
64569 + set = current->signal;
64570 +
64571 + spin_lock_bh(&gr_conn_table_lock);
64572 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
64573 + inet->dport, inet->sport);
64574 + if (unlikely(p != NULL)) {
64575 + set->curr_ip = p->curr_ip;
64576 + set->used_accept = 1;
64577 + gr_del_task_from_ip_table_nolock(p);
64578 + spin_unlock_bh(&gr_conn_table_lock);
64579 + return;
64580 + }
64581 + spin_unlock_bh(&gr_conn_table_lock);
64582 +
64583 + set->curr_ip = inet->daddr;
64584 + set->used_accept = 1;
64585 +#endif
64586 + return;
64587 +}
64588 +
64589 +int
64590 +gr_handle_sock_all(const int family, const int type, const int protocol)
64591 +{
64592 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64593 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
64594 + (family != AF_UNIX)) {
64595 + if (family == AF_INET)
64596 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
64597 + else
64598 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
64599 + return -EACCES;
64600 + }
64601 +#endif
64602 + return 0;
64603 +}
64604 +
64605 +int
64606 +gr_handle_sock_server(const struct sockaddr *sck)
64607 +{
64608 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64609 + if (grsec_enable_socket_server &&
64610 + in_group_p(grsec_socket_server_gid) &&
64611 + sck && (sck->sa_family != AF_UNIX) &&
64612 + (sck->sa_family != AF_LOCAL)) {
64613 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64614 + return -EACCES;
64615 + }
64616 +#endif
64617 + return 0;
64618 +}
64619 +
64620 +int
64621 +gr_handle_sock_server_other(const struct sock *sck)
64622 +{
64623 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64624 + if (grsec_enable_socket_server &&
64625 + in_group_p(grsec_socket_server_gid) &&
64626 + sck && (sck->sk_family != AF_UNIX) &&
64627 + (sck->sk_family != AF_LOCAL)) {
64628 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64629 + return -EACCES;
64630 + }
64631 +#endif
64632 + return 0;
64633 +}
64634 +
64635 +int
64636 +gr_handle_sock_client(const struct sockaddr *sck)
64637 +{
64638 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64639 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
64640 + sck && (sck->sa_family != AF_UNIX) &&
64641 + (sck->sa_family != AF_LOCAL)) {
64642 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
64643 + return -EACCES;
64644 + }
64645 +#endif
64646 + return 0;
64647 +}
64648 +
64649 +kernel_cap_t
64650 +gr_cap_rtnetlink(struct sock *sock)
64651 +{
64652 +#ifdef CONFIG_GRKERNSEC
64653 + if (!gr_acl_is_enabled())
64654 + return current_cap();
64655 + else if (sock->sk_protocol == NETLINK_ISCSI &&
64656 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
64657 + gr_is_capable(CAP_SYS_ADMIN))
64658 + return current_cap();
64659 + else if (sock->sk_protocol == NETLINK_AUDIT &&
64660 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
64661 + gr_is_capable(CAP_AUDIT_WRITE) &&
64662 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
64663 + gr_is_capable(CAP_AUDIT_CONTROL))
64664 + return current_cap();
64665 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
64666 + ((sock->sk_protocol == NETLINK_ROUTE) ?
64667 + gr_is_capable_nolog(CAP_NET_ADMIN) :
64668 + gr_is_capable(CAP_NET_ADMIN)))
64669 + return current_cap();
64670 + else
64671 + return __cap_empty_set;
64672 +#else
64673 + return current_cap();
64674 +#endif
64675 +}
64676 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
64677 new file mode 100644
64678 index 0000000..31f3258
64679 --- /dev/null
64680 +++ b/grsecurity/grsec_sysctl.c
64681 @@ -0,0 +1,499 @@
64682 +#include <linux/kernel.h>
64683 +#include <linux/sched.h>
64684 +#include <linux/sysctl.h>
64685 +#include <linux/grsecurity.h>
64686 +#include <linux/grinternal.h>
64687 +
64688 +int
64689 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
64690 +{
64691 +#ifdef CONFIG_GRKERNSEC_SYSCTL
64692 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
64693 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
64694 + return -EACCES;
64695 + }
64696 +#endif
64697 + return 0;
64698 +}
64699 +
64700 +#ifdef CONFIG_GRKERNSEC_ROFS
64701 +static int __maybe_unused one = 1;
64702 +#endif
64703 +
64704 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
64705 +ctl_table grsecurity_table[] = {
64706 +#ifdef CONFIG_GRKERNSEC_SYSCTL
64707 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
64708 +#ifdef CONFIG_GRKERNSEC_IO
64709 + {
64710 + .ctl_name = CTL_UNNUMBERED,
64711 + .procname = "disable_priv_io",
64712 + .data = &grsec_disable_privio,
64713 + .maxlen = sizeof(int),
64714 + .mode = 0600,
64715 + .proc_handler = &proc_dointvec,
64716 + },
64717 +#endif
64718 +#endif
64719 +#ifdef CONFIG_GRKERNSEC_LINK
64720 + {
64721 + .ctl_name = CTL_UNNUMBERED,
64722 + .procname = "linking_restrictions",
64723 + .data = &grsec_enable_link,
64724 + .maxlen = sizeof(int),
64725 + .mode = 0600,
64726 + .proc_handler = &proc_dointvec,
64727 + },
64728 +#endif
64729 +#ifdef CONFIG_GRKERNSEC_BRUTE
64730 + {
64731 + .ctl_name = CTL_UNNUMBERED,
64732 + .procname = "deter_bruteforce",
64733 + .data = &grsec_enable_brute,
64734 + .maxlen = sizeof(int),
64735 + .mode = 0600,
64736 + .proc_handler = &proc_dointvec,
64737 + },
64738 +#endif
64739 +#ifdef CONFIG_GRKERNSEC_FIFO
64740 + {
64741 + .ctl_name = CTL_UNNUMBERED,
64742 + .procname = "fifo_restrictions",
64743 + .data = &grsec_enable_fifo,
64744 + .maxlen = sizeof(int),
64745 + .mode = 0600,
64746 + .proc_handler = &proc_dointvec,
64747 + },
64748 +#endif
64749 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64750 + {
64751 + .ctl_name = CTL_UNNUMBERED,
64752 + .procname = "ptrace_readexec",
64753 + .data = &grsec_enable_ptrace_readexec,
64754 + .maxlen = sizeof(int),
64755 + .mode = 0600,
64756 + .proc_handler = &proc_dointvec,
64757 + },
64758 +#endif
64759 +#ifdef CONFIG_GRKERNSEC_SETXID
64760 + {
64761 + .ctl_name = CTL_UNNUMBERED,
64762 + .procname = "consistent_setxid",
64763 + .data = &grsec_enable_setxid,
64764 + .maxlen = sizeof(int),
64765 + .mode = 0600,
64766 + .proc_handler = &proc_dointvec,
64767 + },
64768 +#endif
64769 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
64770 + {
64771 + .ctl_name = CTL_UNNUMBERED,
64772 + .procname = "ip_blackhole",
64773 + .data = &grsec_enable_blackhole,
64774 + .maxlen = sizeof(int),
64775 + .mode = 0600,
64776 + .proc_handler = &proc_dointvec,
64777 + },
64778 + {
64779 + .ctl_name = CTL_UNNUMBERED,
64780 + .procname = "lastack_retries",
64781 + .data = &grsec_lastack_retries,
64782 + .maxlen = sizeof(int),
64783 + .mode = 0600,
64784 + .proc_handler = &proc_dointvec,
64785 + },
64786 +#endif
64787 +#ifdef CONFIG_GRKERNSEC_EXECLOG
64788 + {
64789 + .ctl_name = CTL_UNNUMBERED,
64790 + .procname = "exec_logging",
64791 + .data = &grsec_enable_execlog,
64792 + .maxlen = sizeof(int),
64793 + .mode = 0600,
64794 + .proc_handler = &proc_dointvec,
64795 + },
64796 +#endif
64797 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64798 + {
64799 + .ctl_name = CTL_UNNUMBERED,
64800 + .procname = "rwxmap_logging",
64801 + .data = &grsec_enable_log_rwxmaps,
64802 + .maxlen = sizeof(int),
64803 + .mode = 0600,
64804 + .proc_handler = &proc_dointvec,
64805 + },
64806 +#endif
64807 +#ifdef CONFIG_GRKERNSEC_SIGNAL
64808 + {
64809 + .ctl_name = CTL_UNNUMBERED,
64810 + .procname = "signal_logging",
64811 + .data = &grsec_enable_signal,
64812 + .maxlen = sizeof(int),
64813 + .mode = 0600,
64814 + .proc_handler = &proc_dointvec,
64815 + },
64816 +#endif
64817 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
64818 + {
64819 + .ctl_name = CTL_UNNUMBERED,
64820 + .procname = "forkfail_logging",
64821 + .data = &grsec_enable_forkfail,
64822 + .maxlen = sizeof(int),
64823 + .mode = 0600,
64824 + .proc_handler = &proc_dointvec,
64825 + },
64826 +#endif
64827 +#ifdef CONFIG_GRKERNSEC_TIME
64828 + {
64829 + .ctl_name = CTL_UNNUMBERED,
64830 + .procname = "timechange_logging",
64831 + .data = &grsec_enable_time,
64832 + .maxlen = sizeof(int),
64833 + .mode = 0600,
64834 + .proc_handler = &proc_dointvec,
64835 + },
64836 +#endif
64837 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
64838 + {
64839 + .ctl_name = CTL_UNNUMBERED,
64840 + .procname = "chroot_deny_shmat",
64841 + .data = &grsec_enable_chroot_shmat,
64842 + .maxlen = sizeof(int),
64843 + .mode = 0600,
64844 + .proc_handler = &proc_dointvec,
64845 + },
64846 +#endif
64847 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
64848 + {
64849 + .ctl_name = CTL_UNNUMBERED,
64850 + .procname = "chroot_deny_unix",
64851 + .data = &grsec_enable_chroot_unix,
64852 + .maxlen = sizeof(int),
64853 + .mode = 0600,
64854 + .proc_handler = &proc_dointvec,
64855 + },
64856 +#endif
64857 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
64858 + {
64859 + .ctl_name = CTL_UNNUMBERED,
64860 + .procname = "chroot_deny_mount",
64861 + .data = &grsec_enable_chroot_mount,
64862 + .maxlen = sizeof(int),
64863 + .mode = 0600,
64864 + .proc_handler = &proc_dointvec,
64865 + },
64866 +#endif
64867 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
64868 + {
64869 + .ctl_name = CTL_UNNUMBERED,
64870 + .procname = "chroot_deny_fchdir",
64871 + .data = &grsec_enable_chroot_fchdir,
64872 + .maxlen = sizeof(int),
64873 + .mode = 0600,
64874 + .proc_handler = &proc_dointvec,
64875 + },
64876 +#endif
64877 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
64878 + {
64879 + .ctl_name = CTL_UNNUMBERED,
64880 + .procname = "chroot_deny_chroot",
64881 + .data = &grsec_enable_chroot_double,
64882 + .maxlen = sizeof(int),
64883 + .mode = 0600,
64884 + .proc_handler = &proc_dointvec,
64885 + },
64886 +#endif
64887 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
64888 + {
64889 + .ctl_name = CTL_UNNUMBERED,
64890 + .procname = "chroot_deny_pivot",
64891 + .data = &grsec_enable_chroot_pivot,
64892 + .maxlen = sizeof(int),
64893 + .mode = 0600,
64894 + .proc_handler = &proc_dointvec,
64895 + },
64896 +#endif
64897 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
64898 + {
64899 + .ctl_name = CTL_UNNUMBERED,
64900 + .procname = "chroot_enforce_chdir",
64901 + .data = &grsec_enable_chroot_chdir,
64902 + .maxlen = sizeof(int),
64903 + .mode = 0600,
64904 + .proc_handler = &proc_dointvec,
64905 + },
64906 +#endif
64907 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
64908 + {
64909 + .ctl_name = CTL_UNNUMBERED,
64910 + .procname = "chroot_deny_chmod",
64911 + .data = &grsec_enable_chroot_chmod,
64912 + .maxlen = sizeof(int),
64913 + .mode = 0600,
64914 + .proc_handler = &proc_dointvec,
64915 + },
64916 +#endif
64917 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
64918 + {
64919 + .ctl_name = CTL_UNNUMBERED,
64920 + .procname = "chroot_deny_mknod",
64921 + .data = &grsec_enable_chroot_mknod,
64922 + .maxlen = sizeof(int),
64923 + .mode = 0600,
64924 + .proc_handler = &proc_dointvec,
64925 + },
64926 +#endif
64927 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64928 + {
64929 + .ctl_name = CTL_UNNUMBERED,
64930 + .procname = "chroot_restrict_nice",
64931 + .data = &grsec_enable_chroot_nice,
64932 + .maxlen = sizeof(int),
64933 + .mode = 0600,
64934 + .proc_handler = &proc_dointvec,
64935 + },
64936 +#endif
64937 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
64938 + {
64939 + .ctl_name = CTL_UNNUMBERED,
64940 + .procname = "chroot_execlog",
64941 + .data = &grsec_enable_chroot_execlog,
64942 + .maxlen = sizeof(int),
64943 + .mode = 0600,
64944 + .proc_handler = &proc_dointvec,
64945 + },
64946 +#endif
64947 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64948 + {
64949 + .ctl_name = CTL_UNNUMBERED,
64950 + .procname = "chroot_caps",
64951 + .data = &grsec_enable_chroot_caps,
64952 + .maxlen = sizeof(int),
64953 + .mode = 0600,
64954 + .proc_handler = &proc_dointvec,
64955 + },
64956 +#endif
64957 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
64958 + {
64959 + .ctl_name = CTL_UNNUMBERED,
64960 + .procname = "chroot_deny_sysctl",
64961 + .data = &grsec_enable_chroot_sysctl,
64962 + .maxlen = sizeof(int),
64963 + .mode = 0600,
64964 + .proc_handler = &proc_dointvec,
64965 + },
64966 +#endif
64967 +#ifdef CONFIG_GRKERNSEC_TPE
64968 + {
64969 + .ctl_name = CTL_UNNUMBERED,
64970 + .procname = "tpe",
64971 + .data = &grsec_enable_tpe,
64972 + .maxlen = sizeof(int),
64973 + .mode = 0600,
64974 + .proc_handler = &proc_dointvec,
64975 + },
64976 + {
64977 + .ctl_name = CTL_UNNUMBERED,
64978 + .procname = "tpe_gid",
64979 + .data = &grsec_tpe_gid,
64980 + .maxlen = sizeof(int),
64981 + .mode = 0600,
64982 + .proc_handler = &proc_dointvec,
64983 + },
64984 +#endif
64985 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64986 + {
64987 + .ctl_name = CTL_UNNUMBERED,
64988 + .procname = "tpe_invert",
64989 + .data = &grsec_enable_tpe_invert,
64990 + .maxlen = sizeof(int),
64991 + .mode = 0600,
64992 + .proc_handler = &proc_dointvec,
64993 + },
64994 +#endif
64995 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
64996 + {
64997 + .ctl_name = CTL_UNNUMBERED,
64998 + .procname = "tpe_restrict_all",
64999 + .data = &grsec_enable_tpe_all,
65000 + .maxlen = sizeof(int),
65001 + .mode = 0600,
65002 + .proc_handler = &proc_dointvec,
65003 + },
65004 +#endif
65005 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65006 + {
65007 + .ctl_name = CTL_UNNUMBERED,
65008 + .procname = "socket_all",
65009 + .data = &grsec_enable_socket_all,
65010 + .maxlen = sizeof(int),
65011 + .mode = 0600,
65012 + .proc_handler = &proc_dointvec,
65013 + },
65014 + {
65015 + .ctl_name = CTL_UNNUMBERED,
65016 + .procname = "socket_all_gid",
65017 + .data = &grsec_socket_all_gid,
65018 + .maxlen = sizeof(int),
65019 + .mode = 0600,
65020 + .proc_handler = &proc_dointvec,
65021 + },
65022 +#endif
65023 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65024 + {
65025 + .ctl_name = CTL_UNNUMBERED,
65026 + .procname = "socket_client",
65027 + .data = &grsec_enable_socket_client,
65028 + .maxlen = sizeof(int),
65029 + .mode = 0600,
65030 + .proc_handler = &proc_dointvec,
65031 + },
65032 + {
65033 + .ctl_name = CTL_UNNUMBERED,
65034 + .procname = "socket_client_gid",
65035 + .data = &grsec_socket_client_gid,
65036 + .maxlen = sizeof(int),
65037 + .mode = 0600,
65038 + .proc_handler = &proc_dointvec,
65039 + },
65040 +#endif
65041 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65042 + {
65043 + .ctl_name = CTL_UNNUMBERED,
65044 + .procname = "socket_server",
65045 + .data = &grsec_enable_socket_server,
65046 + .maxlen = sizeof(int),
65047 + .mode = 0600,
65048 + .proc_handler = &proc_dointvec,
65049 + },
65050 + {
65051 + .ctl_name = CTL_UNNUMBERED,
65052 + .procname = "socket_server_gid",
65053 + .data = &grsec_socket_server_gid,
65054 + .maxlen = sizeof(int),
65055 + .mode = 0600,
65056 + .proc_handler = &proc_dointvec,
65057 + },
65058 +#endif
65059 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
65060 + {
65061 + .ctl_name = CTL_UNNUMBERED,
65062 + .procname = "audit_group",
65063 + .data = &grsec_enable_group,
65064 + .maxlen = sizeof(int),
65065 + .mode = 0600,
65066 + .proc_handler = &proc_dointvec,
65067 + },
65068 + {
65069 + .ctl_name = CTL_UNNUMBERED,
65070 + .procname = "audit_gid",
65071 + .data = &grsec_audit_gid,
65072 + .maxlen = sizeof(int),
65073 + .mode = 0600,
65074 + .proc_handler = &proc_dointvec,
65075 + },
65076 +#endif
65077 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
65078 + {
65079 + .ctl_name = CTL_UNNUMBERED,
65080 + .procname = "audit_chdir",
65081 + .data = &grsec_enable_chdir,
65082 + .maxlen = sizeof(int),
65083 + .mode = 0600,
65084 + .proc_handler = &proc_dointvec,
65085 + },
65086 +#endif
65087 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65088 + {
65089 + .ctl_name = CTL_UNNUMBERED,
65090 + .procname = "audit_mount",
65091 + .data = &grsec_enable_mount,
65092 + .maxlen = sizeof(int),
65093 + .mode = 0600,
65094 + .proc_handler = &proc_dointvec,
65095 + },
65096 +#endif
65097 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65098 + {
65099 + .ctl_name = CTL_UNNUMBERED,
65100 + .procname = "audit_textrel",
65101 + .data = &grsec_enable_audit_textrel,
65102 + .maxlen = sizeof(int),
65103 + .mode = 0600,
65104 + .proc_handler = &proc_dointvec,
65105 + },
65106 +#endif
65107 +#ifdef CONFIG_GRKERNSEC_DMESG
65108 + {
65109 + .ctl_name = CTL_UNNUMBERED,
65110 + .procname = "dmesg",
65111 + .data = &grsec_enable_dmesg,
65112 + .maxlen = sizeof(int),
65113 + .mode = 0600,
65114 + .proc_handler = &proc_dointvec,
65115 + },
65116 +#endif
65117 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65118 + {
65119 + .ctl_name = CTL_UNNUMBERED,
65120 + .procname = "chroot_findtask",
65121 + .data = &grsec_enable_chroot_findtask,
65122 + .maxlen = sizeof(int),
65123 + .mode = 0600,
65124 + .proc_handler = &proc_dointvec,
65125 + },
65126 +#endif
65127 +#ifdef CONFIG_GRKERNSEC_RESLOG
65128 + {
65129 + .ctl_name = CTL_UNNUMBERED,
65130 + .procname = "resource_logging",
65131 + .data = &grsec_resource_logging,
65132 + .maxlen = sizeof(int),
65133 + .mode = 0600,
65134 + .proc_handler = &proc_dointvec,
65135 + },
65136 +#endif
65137 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65138 + {
65139 + .ctl_name = CTL_UNNUMBERED,
65140 + .procname = "audit_ptrace",
65141 + .data = &grsec_enable_audit_ptrace,
65142 + .maxlen = sizeof(int),
65143 + .mode = 0600,
65144 + .proc_handler = &proc_dointvec,
65145 + },
65146 +#endif
65147 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65148 + {
65149 + .ctl_name = CTL_UNNUMBERED,
65150 + .procname = "harden_ptrace",
65151 + .data = &grsec_enable_harden_ptrace,
65152 + .maxlen = sizeof(int),
65153 + .mode = 0600,
65154 + .proc_handler = &proc_dointvec,
65155 + },
65156 +#endif
65157 + {
65158 + .ctl_name = CTL_UNNUMBERED,
65159 + .procname = "grsec_lock",
65160 + .data = &grsec_lock,
65161 + .maxlen = sizeof(int),
65162 + .mode = 0600,
65163 + .proc_handler = &proc_dointvec,
65164 + },
65165 +#endif
65166 +#ifdef CONFIG_GRKERNSEC_ROFS
65167 + {
65168 + .ctl_name = CTL_UNNUMBERED,
65169 + .procname = "romount_protect",
65170 + .data = &grsec_enable_rofs,
65171 + .maxlen = sizeof(int),
65172 + .mode = 0600,
65173 + .proc_handler = &proc_dointvec_minmax,
65174 + .extra1 = &one,
65175 + .extra2 = &one,
65176 + },
65177 +#endif
65178 + { .ctl_name = 0 }
65179 +};
65180 +#endif
65181 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
65182 new file mode 100644
65183 index 0000000..0dc13c3
65184 --- /dev/null
65185 +++ b/grsecurity/grsec_time.c
65186 @@ -0,0 +1,16 @@
65187 +#include <linux/kernel.h>
65188 +#include <linux/sched.h>
65189 +#include <linux/grinternal.h>
65190 +#include <linux/module.h>
65191 +
65192 +void
65193 +gr_log_timechange(void)
65194 +{
65195 +#ifdef CONFIG_GRKERNSEC_TIME
65196 + if (grsec_enable_time)
65197 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
65198 +#endif
65199 + return;
65200 +}
65201 +
65202 +EXPORT_SYMBOL(gr_log_timechange);
65203 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
65204 new file mode 100644
65205 index 0000000..07e0dc0
65206 --- /dev/null
65207 +++ b/grsecurity/grsec_tpe.c
65208 @@ -0,0 +1,73 @@
65209 +#include <linux/kernel.h>
65210 +#include <linux/sched.h>
65211 +#include <linux/file.h>
65212 +#include <linux/fs.h>
65213 +#include <linux/grinternal.h>
65214 +
65215 +extern int gr_acl_tpe_check(void);
65216 +
65217 +int
65218 +gr_tpe_allow(const struct file *file)
65219 +{
65220 +#ifdef CONFIG_GRKERNSEC
65221 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
65222 + const struct cred *cred = current_cred();
65223 + char *msg = NULL;
65224 + char *msg2 = NULL;
65225 +
65226 + // never restrict root
65227 + if (!cred->uid)
65228 + return 1;
65229 +
65230 + if (grsec_enable_tpe) {
65231 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65232 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
65233 + msg = "not being in trusted group";
65234 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
65235 + msg = "being in untrusted group";
65236 +#else
65237 + if (in_group_p(grsec_tpe_gid))
65238 + msg = "being in untrusted group";
65239 +#endif
65240 + }
65241 + if (!msg && gr_acl_tpe_check())
65242 + msg = "being in untrusted role";
65243 +
65244 + // not in any affected group/role
65245 + if (!msg)
65246 + goto next_check;
65247 +
65248 + if (inode->i_uid)
65249 + msg2 = "file in non-root-owned directory";
65250 + else if (inode->i_mode & S_IWOTH)
65251 + msg2 = "file in world-writable directory";
65252 + else if (inode->i_mode & S_IWGRP)
65253 + msg2 = "file in group-writable directory";
65254 +
65255 + if (msg && msg2) {
65256 + char fullmsg[70] = {0};
65257 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
65258 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
65259 + return 0;
65260 + }
65261 + msg = NULL;
65262 +next_check:
65263 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
65264 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
65265 + return 1;
65266 +
65267 + if (inode->i_uid && (inode->i_uid != cred->uid))
65268 + msg = "directory not owned by user";
65269 + else if (inode->i_mode & S_IWOTH)
65270 + msg = "file in world-writable directory";
65271 + else if (inode->i_mode & S_IWGRP)
65272 + msg = "file in group-writable directory";
65273 +
65274 + if (msg) {
65275 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
65276 + return 0;
65277 + }
65278 +#endif
65279 +#endif
65280 + return 1;
65281 +}
65282 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
65283 new file mode 100644
65284 index 0000000..9f7b1ac
65285 --- /dev/null
65286 +++ b/grsecurity/grsum.c
65287 @@ -0,0 +1,61 @@
65288 +#include <linux/err.h>
65289 +#include <linux/kernel.h>
65290 +#include <linux/sched.h>
65291 +#include <linux/mm.h>
65292 +#include <linux/scatterlist.h>
65293 +#include <linux/crypto.h>
65294 +#include <linux/gracl.h>
65295 +
65296 +
65297 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
65298 +#error "crypto and sha256 must be built into the kernel"
65299 +#endif
65300 +
65301 +int
65302 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
65303 +{
65304 + char *p;
65305 + struct crypto_hash *tfm;
65306 + struct hash_desc desc;
65307 + struct scatterlist sg;
65308 + unsigned char temp_sum[GR_SHA_LEN];
65309 + volatile int retval = 0;
65310 + volatile int dummy = 0;
65311 + unsigned int i;
65312 +
65313 + sg_init_table(&sg, 1);
65314 +
65315 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
65316 + if (IS_ERR(tfm)) {
65317 + /* should never happen, since sha256 should be built in */
65318 + return 1;
65319 + }
65320 +
65321 + desc.tfm = tfm;
65322 + desc.flags = 0;
65323 +
65324 + crypto_hash_init(&desc);
65325 +
65326 + p = salt;
65327 + sg_set_buf(&sg, p, GR_SALT_LEN);
65328 + crypto_hash_update(&desc, &sg, sg.length);
65329 +
65330 + p = entry->pw;
65331 + sg_set_buf(&sg, p, strlen(p));
65332 +
65333 + crypto_hash_update(&desc, &sg, sg.length);
65334 +
65335 + crypto_hash_final(&desc, temp_sum);
65336 +
65337 + memset(entry->pw, 0, GR_PW_LEN);
65338 +
65339 + for (i = 0; i < GR_SHA_LEN; i++)
65340 + if (sum[i] != temp_sum[i])
65341 + retval = 1;
65342 + else
65343 + dummy = 1; // waste a cycle
65344 +
65345 + crypto_free_hash(tfm);
65346 +
65347 + return retval;
65348 +}
65349 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
65350 index 3cd9ccd..fe16d47 100644
65351 --- a/include/acpi/acpi_bus.h
65352 +++ b/include/acpi/acpi_bus.h
65353 @@ -107,7 +107,7 @@ struct acpi_device_ops {
65354 acpi_op_bind bind;
65355 acpi_op_unbind unbind;
65356 acpi_op_notify notify;
65357 -};
65358 +} __no_const;
65359
65360 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
65361
65362 diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
65363 index f4906f6..71feb73 100644
65364 --- a/include/acpi/acpi_drivers.h
65365 +++ b/include/acpi/acpi_drivers.h
65366 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
65367 Dock Station
65368 -------------------------------------------------------------------------- */
65369 struct acpi_dock_ops {
65370 - acpi_notify_handler handler;
65371 - acpi_notify_handler uevent;
65372 + const acpi_notify_handler handler;
65373 + const acpi_notify_handler uevent;
65374 };
65375
65376 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
65377 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
65378 extern int register_dock_notifier(struct notifier_block *nb);
65379 extern void unregister_dock_notifier(struct notifier_block *nb);
65380 extern int register_hotplug_dock_device(acpi_handle handle,
65381 - struct acpi_dock_ops *ops,
65382 + const struct acpi_dock_ops *ops,
65383 void *context);
65384 extern void unregister_hotplug_dock_device(acpi_handle handle);
65385 #else
65386 @@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
65387 {
65388 }
65389 static inline int register_hotplug_dock_device(acpi_handle handle,
65390 - struct acpi_dock_ops *ops,
65391 + const struct acpi_dock_ops *ops,
65392 void *context)
65393 {
65394 return -ENODEV;
65395 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
65396 index b7babf0..a9ac9fc 100644
65397 --- a/include/asm-generic/atomic-long.h
65398 +++ b/include/asm-generic/atomic-long.h
65399 @@ -22,6 +22,12 @@
65400
65401 typedef atomic64_t atomic_long_t;
65402
65403 +#ifdef CONFIG_PAX_REFCOUNT
65404 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
65405 +#else
65406 +typedef atomic64_t atomic_long_unchecked_t;
65407 +#endif
65408 +
65409 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
65410
65411 static inline long atomic_long_read(atomic_long_t *l)
65412 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65413 return (long)atomic64_read(v);
65414 }
65415
65416 +#ifdef CONFIG_PAX_REFCOUNT
65417 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65418 +{
65419 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65420 +
65421 + return (long)atomic64_read_unchecked(v);
65422 +}
65423 +#endif
65424 +
65425 static inline void atomic_long_set(atomic_long_t *l, long i)
65426 {
65427 atomic64_t *v = (atomic64_t *)l;
65428 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65429 atomic64_set(v, i);
65430 }
65431
65432 +#ifdef CONFIG_PAX_REFCOUNT
65433 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65434 +{
65435 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65436 +
65437 + atomic64_set_unchecked(v, i);
65438 +}
65439 +#endif
65440 +
65441 static inline void atomic_long_inc(atomic_long_t *l)
65442 {
65443 atomic64_t *v = (atomic64_t *)l;
65444 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65445 atomic64_inc(v);
65446 }
65447
65448 +#ifdef CONFIG_PAX_REFCOUNT
65449 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65450 +{
65451 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65452 +
65453 + atomic64_inc_unchecked(v);
65454 +}
65455 +#endif
65456 +
65457 static inline void atomic_long_dec(atomic_long_t *l)
65458 {
65459 atomic64_t *v = (atomic64_t *)l;
65460 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65461 atomic64_dec(v);
65462 }
65463
65464 +#ifdef CONFIG_PAX_REFCOUNT
65465 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65466 +{
65467 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65468 +
65469 + atomic64_dec_unchecked(v);
65470 +}
65471 +#endif
65472 +
65473 static inline void atomic_long_add(long i, atomic_long_t *l)
65474 {
65475 atomic64_t *v = (atomic64_t *)l;
65476 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65477 atomic64_add(i, v);
65478 }
65479
65480 +#ifdef CONFIG_PAX_REFCOUNT
65481 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65482 +{
65483 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65484 +
65485 + atomic64_add_unchecked(i, v);
65486 +}
65487 +#endif
65488 +
65489 static inline void atomic_long_sub(long i, atomic_long_t *l)
65490 {
65491 atomic64_t *v = (atomic64_t *)l;
65492 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65493 return (long)atomic64_inc_return(v);
65494 }
65495
65496 +#ifdef CONFIG_PAX_REFCOUNT
65497 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65498 +{
65499 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65500 +
65501 + return (long)atomic64_inc_return_unchecked(v);
65502 +}
65503 +#endif
65504 +
65505 static inline long atomic_long_dec_return(atomic_long_t *l)
65506 {
65507 atomic64_t *v = (atomic64_t *)l;
65508 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65509
65510 typedef atomic_t atomic_long_t;
65511
65512 +#ifdef CONFIG_PAX_REFCOUNT
65513 +typedef atomic_unchecked_t atomic_long_unchecked_t;
65514 +#else
65515 +typedef atomic_t atomic_long_unchecked_t;
65516 +#endif
65517 +
65518 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
65519 static inline long atomic_long_read(atomic_long_t *l)
65520 {
65521 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65522 return (long)atomic_read(v);
65523 }
65524
65525 +#ifdef CONFIG_PAX_REFCOUNT
65526 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65527 +{
65528 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65529 +
65530 + return (long)atomic_read_unchecked(v);
65531 +}
65532 +#endif
65533 +
65534 static inline void atomic_long_set(atomic_long_t *l, long i)
65535 {
65536 atomic_t *v = (atomic_t *)l;
65537 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65538 atomic_set(v, i);
65539 }
65540
65541 +#ifdef CONFIG_PAX_REFCOUNT
65542 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65543 +{
65544 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65545 +
65546 + atomic_set_unchecked(v, i);
65547 +}
65548 +#endif
65549 +
65550 static inline void atomic_long_inc(atomic_long_t *l)
65551 {
65552 atomic_t *v = (atomic_t *)l;
65553 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65554 atomic_inc(v);
65555 }
65556
65557 +#ifdef CONFIG_PAX_REFCOUNT
65558 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65559 +{
65560 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65561 +
65562 + atomic_inc_unchecked(v);
65563 +}
65564 +#endif
65565 +
65566 static inline void atomic_long_dec(atomic_long_t *l)
65567 {
65568 atomic_t *v = (atomic_t *)l;
65569 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65570 atomic_dec(v);
65571 }
65572
65573 +#ifdef CONFIG_PAX_REFCOUNT
65574 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65575 +{
65576 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65577 +
65578 + atomic_dec_unchecked(v);
65579 +}
65580 +#endif
65581 +
65582 static inline void atomic_long_add(long i, atomic_long_t *l)
65583 {
65584 atomic_t *v = (atomic_t *)l;
65585 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65586 atomic_add(i, v);
65587 }
65588
65589 +#ifdef CONFIG_PAX_REFCOUNT
65590 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65591 +{
65592 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65593 +
65594 + atomic_add_unchecked(i, v);
65595 +}
65596 +#endif
65597 +
65598 static inline void atomic_long_sub(long i, atomic_long_t *l)
65599 {
65600 atomic_t *v = (atomic_t *)l;
65601 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65602 return (long)atomic_inc_return(v);
65603 }
65604
65605 +#ifdef CONFIG_PAX_REFCOUNT
65606 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65607 +{
65608 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65609 +
65610 + return (long)atomic_inc_return_unchecked(v);
65611 +}
65612 +#endif
65613 +
65614 static inline long atomic_long_dec_return(atomic_long_t *l)
65615 {
65616 atomic_t *v = (atomic_t *)l;
65617 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65618
65619 #endif /* BITS_PER_LONG == 64 */
65620
65621 +#ifdef CONFIG_PAX_REFCOUNT
65622 +static inline void pax_refcount_needs_these_functions(void)
65623 +{
65624 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
65625 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
65626 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
65627 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
65628 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
65629 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
65630 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
65631 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
65632 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
65633 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
65634 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
65635 +
65636 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
65637 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
65638 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
65639 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
65640 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
65641 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
65642 +}
65643 +#else
65644 +#define atomic_read_unchecked(v) atomic_read(v)
65645 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
65646 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
65647 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
65648 +#define atomic_inc_unchecked(v) atomic_inc(v)
65649 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
65650 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
65651 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
65652 +#define atomic_dec_unchecked(v) atomic_dec(v)
65653 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
65654 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
65655 +
65656 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
65657 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
65658 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
65659 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
65660 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
65661 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
65662 +#endif
65663 +
65664 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
65665 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
65666 index b18ce4f..2ee2843 100644
65667 --- a/include/asm-generic/atomic64.h
65668 +++ b/include/asm-generic/atomic64.h
65669 @@ -16,6 +16,8 @@ typedef struct {
65670 long long counter;
65671 } atomic64_t;
65672
65673 +typedef atomic64_t atomic64_unchecked_t;
65674 +
65675 #define ATOMIC64_INIT(i) { (i) }
65676
65677 extern long long atomic64_read(const atomic64_t *v);
65678 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
65679 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
65680 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
65681
65682 +#define atomic64_read_unchecked(v) atomic64_read(v)
65683 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
65684 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
65685 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
65686 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
65687 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
65688 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
65689 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
65690 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
65691 +
65692 #endif /* _ASM_GENERIC_ATOMIC64_H */
65693 diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
65694 index d48ddf0..656a0ac 100644
65695 --- a/include/asm-generic/bug.h
65696 +++ b/include/asm-generic/bug.h
65697 @@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
65698
65699 #else /* !CONFIG_BUG */
65700 #ifndef HAVE_ARCH_BUG
65701 -#define BUG() do {} while(0)
65702 +#define BUG() do { for (;;) ; } while(0)
65703 #endif
65704
65705 #ifndef HAVE_ARCH_BUG_ON
65706 -#define BUG_ON(condition) do { if (condition) ; } while(0)
65707 +#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
65708 #endif
65709
65710 #ifndef HAVE_ARCH_WARN_ON
65711 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
65712 index 1bfcfe5..e04c5c9 100644
65713 --- a/include/asm-generic/cache.h
65714 +++ b/include/asm-generic/cache.h
65715 @@ -6,7 +6,7 @@
65716 * cache lines need to provide their own cache.h.
65717 */
65718
65719 -#define L1_CACHE_SHIFT 5
65720 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
65721 +#define L1_CACHE_SHIFT 5UL
65722 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
65723
65724 #endif /* __ASM_GENERIC_CACHE_H */
65725 diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
65726 index 6920695..41038bc 100644
65727 --- a/include/asm-generic/dma-mapping-common.h
65728 +++ b/include/asm-generic/dma-mapping-common.h
65729 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
65730 enum dma_data_direction dir,
65731 struct dma_attrs *attrs)
65732 {
65733 - struct dma_map_ops *ops = get_dma_ops(dev);
65734 + const struct dma_map_ops *ops = get_dma_ops(dev);
65735 dma_addr_t addr;
65736
65737 kmemcheck_mark_initialized(ptr, size);
65738 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
65739 enum dma_data_direction dir,
65740 struct dma_attrs *attrs)
65741 {
65742 - struct dma_map_ops *ops = get_dma_ops(dev);
65743 + const struct dma_map_ops *ops = get_dma_ops(dev);
65744
65745 BUG_ON(!valid_dma_direction(dir));
65746 if (ops->unmap_page)
65747 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
65748 int nents, enum dma_data_direction dir,
65749 struct dma_attrs *attrs)
65750 {
65751 - struct dma_map_ops *ops = get_dma_ops(dev);
65752 + const struct dma_map_ops *ops = get_dma_ops(dev);
65753 int i, ents;
65754 struct scatterlist *s;
65755
65756 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
65757 int nents, enum dma_data_direction dir,
65758 struct dma_attrs *attrs)
65759 {
65760 - struct dma_map_ops *ops = get_dma_ops(dev);
65761 + const struct dma_map_ops *ops = get_dma_ops(dev);
65762
65763 BUG_ON(!valid_dma_direction(dir));
65764 debug_dma_unmap_sg(dev, sg, nents, dir);
65765 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65766 size_t offset, size_t size,
65767 enum dma_data_direction dir)
65768 {
65769 - struct dma_map_ops *ops = get_dma_ops(dev);
65770 + const struct dma_map_ops *ops = get_dma_ops(dev);
65771 dma_addr_t addr;
65772
65773 kmemcheck_mark_initialized(page_address(page) + offset, size);
65774 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65775 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
65776 size_t size, enum dma_data_direction dir)
65777 {
65778 - struct dma_map_ops *ops = get_dma_ops(dev);
65779 + const struct dma_map_ops *ops = get_dma_ops(dev);
65780
65781 BUG_ON(!valid_dma_direction(dir));
65782 if (ops->unmap_page)
65783 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
65784 size_t size,
65785 enum dma_data_direction dir)
65786 {
65787 - struct dma_map_ops *ops = get_dma_ops(dev);
65788 + const struct dma_map_ops *ops = get_dma_ops(dev);
65789
65790 BUG_ON(!valid_dma_direction(dir));
65791 if (ops->sync_single_for_cpu)
65792 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
65793 dma_addr_t addr, size_t size,
65794 enum dma_data_direction dir)
65795 {
65796 - struct dma_map_ops *ops = get_dma_ops(dev);
65797 + const struct dma_map_ops *ops = get_dma_ops(dev);
65798
65799 BUG_ON(!valid_dma_direction(dir));
65800 if (ops->sync_single_for_device)
65801 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
65802 size_t size,
65803 enum dma_data_direction dir)
65804 {
65805 - struct dma_map_ops *ops = get_dma_ops(dev);
65806 + const struct dma_map_ops *ops = get_dma_ops(dev);
65807
65808 BUG_ON(!valid_dma_direction(dir));
65809 if (ops->sync_single_range_for_cpu) {
65810 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
65811 size_t size,
65812 enum dma_data_direction dir)
65813 {
65814 - struct dma_map_ops *ops = get_dma_ops(dev);
65815 + const struct dma_map_ops *ops = get_dma_ops(dev);
65816
65817 BUG_ON(!valid_dma_direction(dir));
65818 if (ops->sync_single_range_for_device) {
65819 @@ -155,7 +155,7 @@ static inline void
65820 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
65821 int nelems, enum dma_data_direction dir)
65822 {
65823 - struct dma_map_ops *ops = get_dma_ops(dev);
65824 + const struct dma_map_ops *ops = get_dma_ops(dev);
65825
65826 BUG_ON(!valid_dma_direction(dir));
65827 if (ops->sync_sg_for_cpu)
65828 @@ -167,7 +167,7 @@ static inline void
65829 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
65830 int nelems, enum dma_data_direction dir)
65831 {
65832 - struct dma_map_ops *ops = get_dma_ops(dev);
65833 + const struct dma_map_ops *ops = get_dma_ops(dev);
65834
65835 BUG_ON(!valid_dma_direction(dir));
65836 if (ops->sync_sg_for_device)
65837 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
65838 index 0d68a1e..b74a761 100644
65839 --- a/include/asm-generic/emergency-restart.h
65840 +++ b/include/asm-generic/emergency-restart.h
65841 @@ -1,7 +1,7 @@
65842 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
65843 #define _ASM_GENERIC_EMERGENCY_RESTART_H
65844
65845 -static inline void machine_emergency_restart(void)
65846 +static inline __noreturn void machine_emergency_restart(void)
65847 {
65848 machine_restart(NULL);
65849 }
65850 diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
65851 index 3c2344f..4590a7d 100644
65852 --- a/include/asm-generic/futex.h
65853 +++ b/include/asm-generic/futex.h
65854 @@ -6,7 +6,7 @@
65855 #include <asm/errno.h>
65856
65857 static inline int
65858 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
65859 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
65860 {
65861 int op = (encoded_op >> 28) & 7;
65862 int cmp = (encoded_op >> 24) & 15;
65863 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
65864 }
65865
65866 static inline int
65867 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
65868 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
65869 {
65870 return -ENOSYS;
65871 }
65872 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
65873 index 1ca3efc..e3dc852 100644
65874 --- a/include/asm-generic/int-l64.h
65875 +++ b/include/asm-generic/int-l64.h
65876 @@ -46,6 +46,8 @@ typedef unsigned int u32;
65877 typedef signed long s64;
65878 typedef unsigned long u64;
65879
65880 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
65881 +
65882 #define S8_C(x) x
65883 #define U8_C(x) x ## U
65884 #define S16_C(x) x
65885 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
65886 index f394147..b6152b9 100644
65887 --- a/include/asm-generic/int-ll64.h
65888 +++ b/include/asm-generic/int-ll64.h
65889 @@ -51,6 +51,8 @@ typedef unsigned int u32;
65890 typedef signed long long s64;
65891 typedef unsigned long long u64;
65892
65893 +typedef unsigned long long intoverflow_t;
65894 +
65895 #define S8_C(x) x
65896 #define U8_C(x) x ## U
65897 #define S16_C(x) x
65898 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
65899 index e5f234a..cdb16b3 100644
65900 --- a/include/asm-generic/kmap_types.h
65901 +++ b/include/asm-generic/kmap_types.h
65902 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
65903 KMAP_D(16) KM_IRQ_PTE,
65904 KMAP_D(17) KM_NMI,
65905 KMAP_D(18) KM_NMI_PTE,
65906 -KMAP_D(19) KM_TYPE_NR
65907 +KMAP_D(19) KM_CLEARPAGE,
65908 +KMAP_D(20) KM_TYPE_NR
65909 };
65910
65911 #undef KMAP_D
65912 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
65913 index 725612b..9cc513a 100644
65914 --- a/include/asm-generic/pgtable-nopmd.h
65915 +++ b/include/asm-generic/pgtable-nopmd.h
65916 @@ -1,14 +1,19 @@
65917 #ifndef _PGTABLE_NOPMD_H
65918 #define _PGTABLE_NOPMD_H
65919
65920 -#ifndef __ASSEMBLY__
65921 -
65922 #include <asm-generic/pgtable-nopud.h>
65923
65924 -struct mm_struct;
65925 -
65926 #define __PAGETABLE_PMD_FOLDED
65927
65928 +#define PMD_SHIFT PUD_SHIFT
65929 +#define PTRS_PER_PMD 1
65930 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
65931 +#define PMD_MASK (~(PMD_SIZE-1))
65932 +
65933 +#ifndef __ASSEMBLY__
65934 +
65935 +struct mm_struct;
65936 +
65937 /*
65938 * Having the pmd type consist of a pud gets the size right, and allows
65939 * us to conceptually access the pud entry that this pmd is folded into
65940 @@ -16,11 +21,6 @@ struct mm_struct;
65941 */
65942 typedef struct { pud_t pud; } pmd_t;
65943
65944 -#define PMD_SHIFT PUD_SHIFT
65945 -#define PTRS_PER_PMD 1
65946 -#define PMD_SIZE (1UL << PMD_SHIFT)
65947 -#define PMD_MASK (~(PMD_SIZE-1))
65948 -
65949 /*
65950 * The "pud_xxx()" functions here are trivial for a folded two-level
65951 * setup: the pmd is never bad, and a pmd always exists (as it's folded
65952 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
65953 index 810431d..ccc3638 100644
65954 --- a/include/asm-generic/pgtable-nopud.h
65955 +++ b/include/asm-generic/pgtable-nopud.h
65956 @@ -1,10 +1,15 @@
65957 #ifndef _PGTABLE_NOPUD_H
65958 #define _PGTABLE_NOPUD_H
65959
65960 -#ifndef __ASSEMBLY__
65961 -
65962 #define __PAGETABLE_PUD_FOLDED
65963
65964 +#define PUD_SHIFT PGDIR_SHIFT
65965 +#define PTRS_PER_PUD 1
65966 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
65967 +#define PUD_MASK (~(PUD_SIZE-1))
65968 +
65969 +#ifndef __ASSEMBLY__
65970 +
65971 /*
65972 * Having the pud type consist of a pgd gets the size right, and allows
65973 * us to conceptually access the pgd entry that this pud is folded into
65974 @@ -12,11 +17,6 @@
65975 */
65976 typedef struct { pgd_t pgd; } pud_t;
65977
65978 -#define PUD_SHIFT PGDIR_SHIFT
65979 -#define PTRS_PER_PUD 1
65980 -#define PUD_SIZE (1UL << PUD_SHIFT)
65981 -#define PUD_MASK (~(PUD_SIZE-1))
65982 -
65983 /*
65984 * The "pgd_xxx()" functions here are trivial for a folded two-level
65985 * setup: the pud is never bad, and a pud always exists (as it's folded
65986 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
65987 index e2bd73e..fea8ed3 100644
65988 --- a/include/asm-generic/pgtable.h
65989 +++ b/include/asm-generic/pgtable.h
65990 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
65991 unsigned long size);
65992 #endif
65993
65994 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
65995 +static inline unsigned long pax_open_kernel(void) { return 0; }
65996 +#endif
65997 +
65998 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
65999 +static inline unsigned long pax_close_kernel(void) { return 0; }
66000 +#endif
66001 +
66002 #endif /* !__ASSEMBLY__ */
66003
66004 #endif /* _ASM_GENERIC_PGTABLE_H */
66005 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
66006 index b6e818f..21aa58a 100644
66007 --- a/include/asm-generic/vmlinux.lds.h
66008 +++ b/include/asm-generic/vmlinux.lds.h
66009 @@ -199,6 +199,7 @@
66010 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
66011 VMLINUX_SYMBOL(__start_rodata) = .; \
66012 *(.rodata) *(.rodata.*) \
66013 + *(.data.read_only) \
66014 *(__vermagic) /* Kernel version magic */ \
66015 *(__markers_strings) /* Markers: strings */ \
66016 *(__tracepoints_strings)/* Tracepoints: strings */ \
66017 @@ -656,22 +657,24 @@
66018 * section in the linker script will go there too. @phdr should have
66019 * a leading colon.
66020 *
66021 - * Note that this macros defines __per_cpu_load as an absolute symbol.
66022 + * Note that this macros defines per_cpu_load as an absolute symbol.
66023 * If there is no need to put the percpu section at a predetermined
66024 * address, use PERCPU().
66025 */
66026 #define PERCPU_VADDR(vaddr, phdr) \
66027 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
66028 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
66029 + per_cpu_load = .; \
66030 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
66031 - LOAD_OFFSET) { \
66032 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
66033 VMLINUX_SYMBOL(__per_cpu_start) = .; \
66034 *(.data.percpu.first) \
66035 - *(.data.percpu.page_aligned) \
66036 *(.data.percpu) \
66037 + . = ALIGN(PAGE_SIZE); \
66038 + *(.data.percpu.page_aligned) \
66039 *(.data.percpu.shared_aligned) \
66040 VMLINUX_SYMBOL(__per_cpu_end) = .; \
66041 } phdr \
66042 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
66043 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
66044
66045 /**
66046 * PERCPU - define output section for percpu area, simple version
66047 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
66048 index ebab6a6..351dba1 100644
66049 --- a/include/drm/drmP.h
66050 +++ b/include/drm/drmP.h
66051 @@ -71,6 +71,7 @@
66052 #include <linux/workqueue.h>
66053 #include <linux/poll.h>
66054 #include <asm/pgalloc.h>
66055 +#include <asm/local.h>
66056 #include "drm.h"
66057
66058 #include <linux/idr.h>
66059 @@ -814,7 +815,7 @@ struct drm_driver {
66060 void (*vgaarb_irq)(struct drm_device *dev, bool state);
66061
66062 /* Driver private ops for this object */
66063 - struct vm_operations_struct *gem_vm_ops;
66064 + const struct vm_operations_struct *gem_vm_ops;
66065
66066 int major;
66067 int minor;
66068 @@ -917,7 +918,7 @@ struct drm_device {
66069
66070 /** \name Usage Counters */
66071 /*@{ */
66072 - int open_count; /**< Outstanding files open */
66073 + local_t open_count; /**< Outstanding files open */
66074 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
66075 atomic_t vma_count; /**< Outstanding vma areas open */
66076 int buf_use; /**< Buffers in use -- cannot alloc */
66077 @@ -928,7 +929,7 @@ struct drm_device {
66078 /*@{ */
66079 unsigned long counters;
66080 enum drm_stat_type types[15];
66081 - atomic_t counts[15];
66082 + atomic_unchecked_t counts[15];
66083 /*@} */
66084
66085 struct list_head filelist;
66086 @@ -1016,7 +1017,7 @@ struct drm_device {
66087 struct pci_controller *hose;
66088 #endif
66089 struct drm_sg_mem *sg; /**< Scatter gather memory */
66090 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
66091 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
66092 void *dev_private; /**< device private data */
66093 void *mm_private;
66094 struct address_space *dev_mapping;
66095 @@ -1042,11 +1043,11 @@ struct drm_device {
66096 spinlock_t object_name_lock;
66097 struct idr object_name_idr;
66098 atomic_t object_count;
66099 - atomic_t object_memory;
66100 + atomic_unchecked_t object_memory;
66101 atomic_t pin_count;
66102 - atomic_t pin_memory;
66103 + atomic_unchecked_t pin_memory;
66104 atomic_t gtt_count;
66105 - atomic_t gtt_memory;
66106 + atomic_unchecked_t gtt_memory;
66107 uint32_t gtt_total;
66108 uint32_t invalidate_domains; /* domains pending invalidation */
66109 uint32_t flush_domains; /* domains pending flush */
66110 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
66111 index b29e201..3413cc9 100644
66112 --- a/include/drm/drm_crtc_helper.h
66113 +++ b/include/drm/drm_crtc_helper.h
66114 @@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
66115
66116 /* reload the current crtc LUT */
66117 void (*load_lut)(struct drm_crtc *crtc);
66118 -};
66119 +} __no_const;
66120
66121 struct drm_encoder_helper_funcs {
66122 void (*dpms)(struct drm_encoder *encoder, int mode);
66123 @@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
66124 struct drm_connector *connector);
66125 /* disable encoder when not in use - more explicit than dpms off */
66126 void (*disable)(struct drm_encoder *encoder);
66127 -};
66128 +} __no_const;
66129
66130 struct drm_connector_helper_funcs {
66131 int (*get_modes)(struct drm_connector *connector);
66132 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
66133 index b199170..6f9e64c 100644
66134 --- a/include/drm/ttm/ttm_memory.h
66135 +++ b/include/drm/ttm/ttm_memory.h
66136 @@ -47,7 +47,7 @@
66137
66138 struct ttm_mem_shrink {
66139 int (*do_shrink) (struct ttm_mem_shrink *);
66140 -};
66141 +} __no_const;
66142
66143 /**
66144 * struct ttm_mem_global - Global memory accounting structure.
66145 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
66146 index e86dfca..40cc55f 100644
66147 --- a/include/linux/a.out.h
66148 +++ b/include/linux/a.out.h
66149 @@ -39,6 +39,14 @@ enum machine_type {
66150 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
66151 };
66152
66153 +/* Constants for the N_FLAGS field */
66154 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
66155 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
66156 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
66157 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
66158 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
66159 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
66160 +
66161 #if !defined (N_MAGIC)
66162 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
66163 #endif
66164 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
66165 index 817b237..62c10bc 100644
66166 --- a/include/linux/atmdev.h
66167 +++ b/include/linux/atmdev.h
66168 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
66169 #endif
66170
66171 struct k_atm_aal_stats {
66172 -#define __HANDLE_ITEM(i) atomic_t i
66173 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
66174 __AAL_STAT_ITEMS
66175 #undef __HANDLE_ITEM
66176 };
66177 diff --git a/include/linux/backlight.h b/include/linux/backlight.h
66178 index 0f5f578..8c4f884 100644
66179 --- a/include/linux/backlight.h
66180 +++ b/include/linux/backlight.h
66181 @@ -36,18 +36,18 @@ struct backlight_device;
66182 struct fb_info;
66183
66184 struct backlight_ops {
66185 - unsigned int options;
66186 + const unsigned int options;
66187
66188 #define BL_CORE_SUSPENDRESUME (1 << 0)
66189
66190 /* Notify the backlight driver some property has changed */
66191 - int (*update_status)(struct backlight_device *);
66192 + int (* const update_status)(struct backlight_device *);
66193 /* Return the current backlight brightness (accounting for power,
66194 fb_blank etc.) */
66195 - int (*get_brightness)(struct backlight_device *);
66196 + int (* const get_brightness)(struct backlight_device *);
66197 /* Check if given framebuffer device is the one bound to this backlight;
66198 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
66199 - int (*check_fb)(struct fb_info *);
66200 + int (* const check_fb)(struct fb_info *);
66201 };
66202
66203 /* This structure defines all the properties of a backlight */
66204 @@ -86,7 +86,7 @@ struct backlight_device {
66205 registered this device has been unloaded, and if class_get_devdata()
66206 points to something in the body of that driver, it is also invalid. */
66207 struct mutex ops_lock;
66208 - struct backlight_ops *ops;
66209 + const struct backlight_ops *ops;
66210
66211 /* The framebuffer notifier block */
66212 struct notifier_block fb_notif;
66213 @@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
66214 }
66215
66216 extern struct backlight_device *backlight_device_register(const char *name,
66217 - struct device *dev, void *devdata, struct backlight_ops *ops);
66218 + struct device *dev, void *devdata, const struct backlight_ops *ops);
66219 extern void backlight_device_unregister(struct backlight_device *bd);
66220 extern void backlight_force_update(struct backlight_device *bd,
66221 enum backlight_update_reason reason);
66222 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
66223 index a3d802e..93a2ef4 100644
66224 --- a/include/linux/binfmts.h
66225 +++ b/include/linux/binfmts.h
66226 @@ -18,7 +18,7 @@ struct pt_regs;
66227 #define BINPRM_BUF_SIZE 128
66228
66229 #ifdef __KERNEL__
66230 -#include <linux/list.h>
66231 +#include <linux/sched.h>
66232
66233 #define CORENAME_MAX_SIZE 128
66234
66235 @@ -58,6 +58,7 @@ struct linux_binprm{
66236 unsigned interp_flags;
66237 unsigned interp_data;
66238 unsigned long loader, exec;
66239 + char tcomm[TASK_COMM_LEN];
66240 };
66241
66242 extern void acct_arg_size(struct linux_binprm *bprm, unsigned long pages);
66243 @@ -83,6 +84,7 @@ struct linux_binfmt {
66244 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
66245 int (*load_shlib)(struct file *);
66246 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
66247 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
66248 unsigned long min_coredump; /* minimal dump size */
66249 int hasvdso;
66250 };
66251 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
66252 index 5eb6cb0..a2906d2 100644
66253 --- a/include/linux/blkdev.h
66254 +++ b/include/linux/blkdev.h
66255 @@ -1281,7 +1281,7 @@ struct block_device_operations {
66256 int (*revalidate_disk) (struct gendisk *);
66257 int (*getgeo)(struct block_device *, struct hd_geometry *);
66258 struct module *owner;
66259 -};
66260 +} __do_const;
66261
66262 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
66263 unsigned long);
66264 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
66265 index 3b73b99..629d21b 100644
66266 --- a/include/linux/blktrace_api.h
66267 +++ b/include/linux/blktrace_api.h
66268 @@ -160,7 +160,7 @@ struct blk_trace {
66269 struct dentry *dir;
66270 struct dentry *dropped_file;
66271 struct dentry *msg_file;
66272 - atomic_t dropped;
66273 + atomic_unchecked_t dropped;
66274 };
66275
66276 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
66277 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
66278 index 83195fb..0b0f77d 100644
66279 --- a/include/linux/byteorder/little_endian.h
66280 +++ b/include/linux/byteorder/little_endian.h
66281 @@ -42,51 +42,51 @@
66282
66283 static inline __le64 __cpu_to_le64p(const __u64 *p)
66284 {
66285 - return (__force __le64)*p;
66286 + return (__force const __le64)*p;
66287 }
66288 static inline __u64 __le64_to_cpup(const __le64 *p)
66289 {
66290 - return (__force __u64)*p;
66291 + return (__force const __u64)*p;
66292 }
66293 static inline __le32 __cpu_to_le32p(const __u32 *p)
66294 {
66295 - return (__force __le32)*p;
66296 + return (__force const __le32)*p;
66297 }
66298 static inline __u32 __le32_to_cpup(const __le32 *p)
66299 {
66300 - return (__force __u32)*p;
66301 + return (__force const __u32)*p;
66302 }
66303 static inline __le16 __cpu_to_le16p(const __u16 *p)
66304 {
66305 - return (__force __le16)*p;
66306 + return (__force const __le16)*p;
66307 }
66308 static inline __u16 __le16_to_cpup(const __le16 *p)
66309 {
66310 - return (__force __u16)*p;
66311 + return (__force const __u16)*p;
66312 }
66313 static inline __be64 __cpu_to_be64p(const __u64 *p)
66314 {
66315 - return (__force __be64)__swab64p(p);
66316 + return (__force const __be64)__swab64p(p);
66317 }
66318 static inline __u64 __be64_to_cpup(const __be64 *p)
66319 {
66320 - return __swab64p((__u64 *)p);
66321 + return __swab64p((const __u64 *)p);
66322 }
66323 static inline __be32 __cpu_to_be32p(const __u32 *p)
66324 {
66325 - return (__force __be32)__swab32p(p);
66326 + return (__force const __be32)__swab32p(p);
66327 }
66328 static inline __u32 __be32_to_cpup(const __be32 *p)
66329 {
66330 - return __swab32p((__u32 *)p);
66331 + return __swab32p((const __u32 *)p);
66332 }
66333 static inline __be16 __cpu_to_be16p(const __u16 *p)
66334 {
66335 - return (__force __be16)__swab16p(p);
66336 + return (__force const __be16)__swab16p(p);
66337 }
66338 static inline __u16 __be16_to_cpup(const __be16 *p)
66339 {
66340 - return __swab16p((__u16 *)p);
66341 + return __swab16p((const __u16 *)p);
66342 }
66343 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
66344 #define __le64_to_cpus(x) do { (void)(x); } while (0)
66345 diff --git a/include/linux/cache.h b/include/linux/cache.h
66346 index 97e2488..e7576b9 100644
66347 --- a/include/linux/cache.h
66348 +++ b/include/linux/cache.h
66349 @@ -16,6 +16,10 @@
66350 #define __read_mostly
66351 #endif
66352
66353 +#ifndef __read_only
66354 +#define __read_only __read_mostly
66355 +#endif
66356 +
66357 #ifndef ____cacheline_aligned
66358 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
66359 #endif
66360 diff --git a/include/linux/capability.h b/include/linux/capability.h
66361 index c8f2a5f7..1618a5c 100644
66362 --- a/include/linux/capability.h
66363 +++ b/include/linux/capability.h
66364 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
66365 (security_real_capable_noaudit((t), (cap)) == 0)
66366
66367 extern int capable(int cap);
66368 +int capable_nolog(int cap);
66369
66370 /* audit system wants to get cap info from files as well */
66371 struct dentry;
66372 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
66373 index 450fa59..86019fb 100644
66374 --- a/include/linux/compiler-gcc4.h
66375 +++ b/include/linux/compiler-gcc4.h
66376 @@ -36,4 +36,16 @@
66377 the kernel context */
66378 #define __cold __attribute__((__cold__))
66379
66380 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
66381 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
66382 +#define __bos0(ptr) __bos((ptr), 0)
66383 +#define __bos1(ptr) __bos((ptr), 1)
66384 +
66385 +#if __GNUC_MINOR__ >= 5
66386 +#ifdef CONSTIFY_PLUGIN
66387 +#define __no_const __attribute__((no_const))
66388 +#define __do_const __attribute__((do_const))
66389 +#endif
66390 +#endif
66391 +
66392 #endif
66393 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
66394 index 04fb513..fd6477b 100644
66395 --- a/include/linux/compiler.h
66396 +++ b/include/linux/compiler.h
66397 @@ -5,11 +5,14 @@
66398
66399 #ifdef __CHECKER__
66400 # define __user __attribute__((noderef, address_space(1)))
66401 +# define __force_user __force __user
66402 # define __kernel /* default address space */
66403 +# define __force_kernel __force __kernel
66404 # define __safe __attribute__((safe))
66405 # define __force __attribute__((force))
66406 # define __nocast __attribute__((nocast))
66407 # define __iomem __attribute__((noderef, address_space(2)))
66408 +# define __force_iomem __force __iomem
66409 # define __acquires(x) __attribute__((context(x,0,1)))
66410 # define __releases(x) __attribute__((context(x,1,0)))
66411 # define __acquire(x) __context__(x,1)
66412 @@ -17,13 +20,34 @@
66413 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
66414 extern void __chk_user_ptr(const volatile void __user *);
66415 extern void __chk_io_ptr(const volatile void __iomem *);
66416 +#elif defined(CHECKER_PLUGIN)
66417 +//# define __user
66418 +//# define __force_user
66419 +//# define __kernel
66420 +//# define __force_kernel
66421 +# define __safe
66422 +# define __force
66423 +# define __nocast
66424 +# define __iomem
66425 +# define __force_iomem
66426 +# define __chk_user_ptr(x) (void)0
66427 +# define __chk_io_ptr(x) (void)0
66428 +# define __builtin_warning(x, y...) (1)
66429 +# define __acquires(x)
66430 +# define __releases(x)
66431 +# define __acquire(x) (void)0
66432 +# define __release(x) (void)0
66433 +# define __cond_lock(x,c) (c)
66434 #else
66435 # define __user
66436 +# define __force_user
66437 # define __kernel
66438 +# define __force_kernel
66439 # define __safe
66440 # define __force
66441 # define __nocast
66442 # define __iomem
66443 +# define __force_iomem
66444 # define __chk_user_ptr(x) (void)0
66445 # define __chk_io_ptr(x) (void)0
66446 # define __builtin_warning(x, y...) (1)
66447 @@ -247,6 +271,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66448 # define __attribute_const__ /* unimplemented */
66449 #endif
66450
66451 +#ifndef __no_const
66452 +# define __no_const
66453 +#endif
66454 +
66455 +#ifndef __do_const
66456 +# define __do_const
66457 +#endif
66458 +
66459 /*
66460 * Tell gcc if a function is cold. The compiler will assume any path
66461 * directly leading to the call is unlikely.
66462 @@ -256,6 +288,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66463 #define __cold
66464 #endif
66465
66466 +#ifndef __alloc_size
66467 +#define __alloc_size(...)
66468 +#endif
66469 +
66470 +#ifndef __bos
66471 +#define __bos(ptr, arg)
66472 +#endif
66473 +
66474 +#ifndef __bos0
66475 +#define __bos0(ptr)
66476 +#endif
66477 +
66478 +#ifndef __bos1
66479 +#define __bos1(ptr)
66480 +#endif
66481 +
66482 /* Simple shorthand for a section definition */
66483 #ifndef __section
66484 # define __section(S) __attribute__ ((__section__(#S)))
66485 @@ -278,6 +326,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66486 * use is to mediate communication between process-level code and irq/NMI
66487 * handlers, all running on the same CPU.
66488 */
66489 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
66490 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
66491 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
66492
66493 #endif /* __LINUX_COMPILER_H */
66494 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
66495 index fd92988..a3164bd 100644
66496 --- a/include/linux/crypto.h
66497 +++ b/include/linux/crypto.h
66498 @@ -394,7 +394,7 @@ struct cipher_tfm {
66499 const u8 *key, unsigned int keylen);
66500 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66501 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66502 -};
66503 +} __no_const;
66504
66505 struct hash_tfm {
66506 int (*init)(struct hash_desc *desc);
66507 @@ -415,13 +415,13 @@ struct compress_tfm {
66508 int (*cot_decompress)(struct crypto_tfm *tfm,
66509 const u8 *src, unsigned int slen,
66510 u8 *dst, unsigned int *dlen);
66511 -};
66512 +} __no_const;
66513
66514 struct rng_tfm {
66515 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
66516 unsigned int dlen);
66517 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
66518 -};
66519 +} __no_const;
66520
66521 #define crt_ablkcipher crt_u.ablkcipher
66522 #define crt_aead crt_u.aead
66523 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
66524 index 30b93b2..cd7a8db 100644
66525 --- a/include/linux/dcache.h
66526 +++ b/include/linux/dcache.h
66527 @@ -119,6 +119,8 @@ struct dentry {
66528 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
66529 };
66530
66531 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
66532 +
66533 /*
66534 * dentry->d_lock spinlock nesting subclasses:
66535 *
66536 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
66537 index 3e9bd6a..f4e1aa0 100644
66538 --- a/include/linux/decompress/mm.h
66539 +++ b/include/linux/decompress/mm.h
66540 @@ -78,7 +78,7 @@ static void free(void *where)
66541 * warnings when not needed (indeed large_malloc / large_free are not
66542 * needed by inflate */
66543
66544 -#define malloc(a) kmalloc(a, GFP_KERNEL)
66545 +#define malloc(a) kmalloc((a), GFP_KERNEL)
66546 #define free(a) kfree(a)
66547
66548 #define large_malloc(a) vmalloc(a)
66549 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
66550 index 91b7618..92a93d32 100644
66551 --- a/include/linux/dma-mapping.h
66552 +++ b/include/linux/dma-mapping.h
66553 @@ -16,51 +16,51 @@ enum dma_data_direction {
66554 };
66555
66556 struct dma_map_ops {
66557 - void* (*alloc_coherent)(struct device *dev, size_t size,
66558 + void* (* const alloc_coherent)(struct device *dev, size_t size,
66559 dma_addr_t *dma_handle, gfp_t gfp);
66560 - void (*free_coherent)(struct device *dev, size_t size,
66561 + void (* const free_coherent)(struct device *dev, size_t size,
66562 void *vaddr, dma_addr_t dma_handle);
66563 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
66564 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
66565 unsigned long offset, size_t size,
66566 enum dma_data_direction dir,
66567 struct dma_attrs *attrs);
66568 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
66569 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
66570 size_t size, enum dma_data_direction dir,
66571 struct dma_attrs *attrs);
66572 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
66573 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
66574 int nents, enum dma_data_direction dir,
66575 struct dma_attrs *attrs);
66576 - void (*unmap_sg)(struct device *dev,
66577 + void (* const unmap_sg)(struct device *dev,
66578 struct scatterlist *sg, int nents,
66579 enum dma_data_direction dir,
66580 struct dma_attrs *attrs);
66581 - void (*sync_single_for_cpu)(struct device *dev,
66582 + void (* const sync_single_for_cpu)(struct device *dev,
66583 dma_addr_t dma_handle, size_t size,
66584 enum dma_data_direction dir);
66585 - void (*sync_single_for_device)(struct device *dev,
66586 + void (* const sync_single_for_device)(struct device *dev,
66587 dma_addr_t dma_handle, size_t size,
66588 enum dma_data_direction dir);
66589 - void (*sync_single_range_for_cpu)(struct device *dev,
66590 + void (* const sync_single_range_for_cpu)(struct device *dev,
66591 dma_addr_t dma_handle,
66592 unsigned long offset,
66593 size_t size,
66594 enum dma_data_direction dir);
66595 - void (*sync_single_range_for_device)(struct device *dev,
66596 + void (* const sync_single_range_for_device)(struct device *dev,
66597 dma_addr_t dma_handle,
66598 unsigned long offset,
66599 size_t size,
66600 enum dma_data_direction dir);
66601 - void (*sync_sg_for_cpu)(struct device *dev,
66602 + void (* const sync_sg_for_cpu)(struct device *dev,
66603 struct scatterlist *sg, int nents,
66604 enum dma_data_direction dir);
66605 - void (*sync_sg_for_device)(struct device *dev,
66606 + void (* const sync_sg_for_device)(struct device *dev,
66607 struct scatterlist *sg, int nents,
66608 enum dma_data_direction dir);
66609 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
66610 - int (*dma_supported)(struct device *dev, u64 mask);
66611 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
66612 + int (* const dma_supported)(struct device *dev, u64 mask);
66613 int (*set_dma_mask)(struct device *dev, u64 mask);
66614 int is_phys;
66615 -};
66616 +} __do_const;
66617
66618 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
66619
66620 diff --git a/include/linux/dst.h b/include/linux/dst.h
66621 index e26fed8..b976d9f 100644
66622 --- a/include/linux/dst.h
66623 +++ b/include/linux/dst.h
66624 @@ -380,7 +380,7 @@ struct dst_node
66625 struct thread_pool *pool;
66626
66627 /* Transaction IDs live here */
66628 - atomic_long_t gen;
66629 + atomic_long_unchecked_t gen;
66630
66631 /*
66632 * How frequently and how many times transaction
66633 diff --git a/include/linux/elf.h b/include/linux/elf.h
66634 index 90a4ed0..d652617 100644
66635 --- a/include/linux/elf.h
66636 +++ b/include/linux/elf.h
66637 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
66638 #define PT_GNU_EH_FRAME 0x6474e550
66639
66640 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
66641 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
66642 +
66643 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
66644 +
66645 +/* Constants for the e_flags field */
66646 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
66647 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
66648 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
66649 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
66650 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
66651 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
66652
66653 /* These constants define the different elf file types */
66654 #define ET_NONE 0
66655 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
66656 #define DT_DEBUG 21
66657 #define DT_TEXTREL 22
66658 #define DT_JMPREL 23
66659 +#define DT_FLAGS 30
66660 + #define DF_TEXTREL 0x00000004
66661 #define DT_ENCODING 32
66662 #define OLD_DT_LOOS 0x60000000
66663 #define DT_LOOS 0x6000000d
66664 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
66665 #define PF_W 0x2
66666 #define PF_X 0x1
66667
66668 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
66669 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
66670 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
66671 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
66672 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
66673 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
66674 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
66675 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
66676 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
66677 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
66678 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
66679 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
66680 +
66681 typedef struct elf32_phdr{
66682 Elf32_Word p_type;
66683 Elf32_Off p_offset;
66684 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
66685 #define EI_OSABI 7
66686 #define EI_PAD 8
66687
66688 +#define EI_PAX 14
66689 +
66690 #define ELFMAG0 0x7f /* EI_MAG */
66691 #define ELFMAG1 'E'
66692 #define ELFMAG2 'L'
66693 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
66694 #define elf_phdr elf32_phdr
66695 #define elf_note elf32_note
66696 #define elf_addr_t Elf32_Off
66697 +#define elf_dyn Elf32_Dyn
66698
66699 #else
66700
66701 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
66702 #define elf_phdr elf64_phdr
66703 #define elf_note elf64_note
66704 #define elf_addr_t Elf64_Off
66705 +#define elf_dyn Elf64_Dyn
66706
66707 #endif
66708
66709 diff --git a/include/linux/fs.h b/include/linux/fs.h
66710 index 1b9a47a..6fe2934 100644
66711 --- a/include/linux/fs.h
66712 +++ b/include/linux/fs.h
66713 @@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
66714 unsigned long, unsigned long);
66715
66716 struct address_space_operations {
66717 - int (*writepage)(struct page *page, struct writeback_control *wbc);
66718 - int (*readpage)(struct file *, struct page *);
66719 - void (*sync_page)(struct page *);
66720 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
66721 + int (* const readpage)(struct file *, struct page *);
66722 + void (* const sync_page)(struct page *);
66723
66724 /* Write back some dirty pages from this mapping. */
66725 - int (*writepages)(struct address_space *, struct writeback_control *);
66726 + int (* const writepages)(struct address_space *, struct writeback_control *);
66727
66728 /* Set a page dirty. Return true if this dirtied it */
66729 - int (*set_page_dirty)(struct page *page);
66730 + int (* const set_page_dirty)(struct page *page);
66731
66732 - int (*readpages)(struct file *filp, struct address_space *mapping,
66733 + int (* const readpages)(struct file *filp, struct address_space *mapping,
66734 struct list_head *pages, unsigned nr_pages);
66735
66736 - int (*write_begin)(struct file *, struct address_space *mapping,
66737 + int (* const write_begin)(struct file *, struct address_space *mapping,
66738 loff_t pos, unsigned len, unsigned flags,
66739 struct page **pagep, void **fsdata);
66740 - int (*write_end)(struct file *, struct address_space *mapping,
66741 + int (* const write_end)(struct file *, struct address_space *mapping,
66742 loff_t pos, unsigned len, unsigned copied,
66743 struct page *page, void *fsdata);
66744
66745 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
66746 - sector_t (*bmap)(struct address_space *, sector_t);
66747 - void (*invalidatepage) (struct page *, unsigned long);
66748 - int (*releasepage) (struct page *, gfp_t);
66749 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
66750 + sector_t (* const bmap)(struct address_space *, sector_t);
66751 + void (* const invalidatepage) (struct page *, unsigned long);
66752 + int (* const releasepage) (struct page *, gfp_t);
66753 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
66754 loff_t offset, unsigned long nr_segs);
66755 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
66756 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
66757 void **, unsigned long *);
66758 /* migrate the contents of a page to the specified target */
66759 - int (*migratepage) (struct address_space *,
66760 + int (* const migratepage) (struct address_space *,
66761 struct page *, struct page *);
66762 - int (*launder_page) (struct page *);
66763 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
66764 + int (* const launder_page) (struct page *);
66765 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
66766 unsigned long);
66767 - int (*error_remove_page)(struct address_space *, struct page *);
66768 + int (* const error_remove_page)(struct address_space *, struct page *);
66769 };
66770
66771 /*
66772 @@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
66773 typedef struct files_struct *fl_owner_t;
66774
66775 struct file_lock_operations {
66776 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
66777 - void (*fl_release_private)(struct file_lock *);
66778 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
66779 + void (* const fl_release_private)(struct file_lock *);
66780 };
66781
66782 struct lock_manager_operations {
66783 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
66784 - void (*fl_notify)(struct file_lock *); /* unblock callback */
66785 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
66786 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
66787 - void (*fl_release_private)(struct file_lock *);
66788 - void (*fl_break)(struct file_lock *);
66789 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
66790 - int (*fl_change)(struct file_lock **, int);
66791 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
66792 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
66793 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
66794 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
66795 + void (* const fl_release_private)(struct file_lock *);
66796 + void (* const fl_break)(struct file_lock *);
66797 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
66798 + int (* const fl_change)(struct file_lock **, int);
66799 };
66800
66801 struct lock_manager {
66802 @@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
66803 unsigned int fi_flags; /* Flags as passed from user */
66804 unsigned int fi_extents_mapped; /* Number of mapped extents */
66805 unsigned int fi_extents_max; /* Size of fiemap_extent array */
66806 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
66807 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
66808 * array */
66809 };
66810 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
66811 @@ -1512,7 +1512,8 @@ struct file_operations {
66812 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
66813 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
66814 int (*setlease)(struct file *, long, struct file_lock **);
66815 -};
66816 +} __do_const;
66817 +typedef struct file_operations __no_const file_operations_no_const;
66818
66819 struct inode_operations {
66820 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
66821 @@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
66822 unsigned long, loff_t *);
66823
66824 struct super_operations {
66825 - struct inode *(*alloc_inode)(struct super_block *sb);
66826 - void (*destroy_inode)(struct inode *);
66827 + struct inode *(* const alloc_inode)(struct super_block *sb);
66828 + void (* const destroy_inode)(struct inode *);
66829
66830 - void (*dirty_inode) (struct inode *);
66831 - int (*write_inode) (struct inode *, int);
66832 - void (*drop_inode) (struct inode *);
66833 - void (*delete_inode) (struct inode *);
66834 - void (*put_super) (struct super_block *);
66835 - void (*write_super) (struct super_block *);
66836 - int (*sync_fs)(struct super_block *sb, int wait);
66837 - int (*freeze_fs) (struct super_block *);
66838 - int (*unfreeze_fs) (struct super_block *);
66839 - int (*statfs) (struct dentry *, struct kstatfs *);
66840 - int (*remount_fs) (struct super_block *, int *, char *);
66841 - void (*clear_inode) (struct inode *);
66842 - void (*umount_begin) (struct super_block *);
66843 + void (* const dirty_inode) (struct inode *);
66844 + int (* const write_inode) (struct inode *, int);
66845 + void (* const drop_inode) (struct inode *);
66846 + void (* const delete_inode) (struct inode *);
66847 + void (* const put_super) (struct super_block *);
66848 + void (* const write_super) (struct super_block *);
66849 + int (* const sync_fs)(struct super_block *sb, int wait);
66850 + int (* const freeze_fs) (struct super_block *);
66851 + int (* const unfreeze_fs) (struct super_block *);
66852 + int (* const statfs) (struct dentry *, struct kstatfs *);
66853 + int (* const remount_fs) (struct super_block *, int *, char *);
66854 + void (* const clear_inode) (struct inode *);
66855 + void (* const umount_begin) (struct super_block *);
66856
66857 - int (*show_options)(struct seq_file *, struct vfsmount *);
66858 - int (*show_stats)(struct seq_file *, struct vfsmount *);
66859 + int (* const show_options)(struct seq_file *, struct vfsmount *);
66860 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
66861 #ifdef CONFIG_QUOTA
66862 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
66863 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
66864 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
66865 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
66866 #endif
66867 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
66868 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
66869 };
66870
66871 /*
66872 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
66873 index 78a05bf..2a7d3e1 100644
66874 --- a/include/linux/fs_struct.h
66875 +++ b/include/linux/fs_struct.h
66876 @@ -4,7 +4,7 @@
66877 #include <linux/path.h>
66878
66879 struct fs_struct {
66880 - int users;
66881 + atomic_t users;
66882 rwlock_t lock;
66883 int umask;
66884 int in_exec;
66885 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
66886 index 7be0c6f..2f63a2b 100644
66887 --- a/include/linux/fscache-cache.h
66888 +++ b/include/linux/fscache-cache.h
66889 @@ -116,7 +116,7 @@ struct fscache_operation {
66890 #endif
66891 };
66892
66893 -extern atomic_t fscache_op_debug_id;
66894 +extern atomic_unchecked_t fscache_op_debug_id;
66895 extern const struct slow_work_ops fscache_op_slow_work_ops;
66896
66897 extern void fscache_enqueue_operation(struct fscache_operation *);
66898 @@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
66899 fscache_operation_release_t release)
66900 {
66901 atomic_set(&op->usage, 1);
66902 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
66903 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
66904 op->release = release;
66905 INIT_LIST_HEAD(&op->pend_link);
66906 fscache_set_op_state(op, "Init");
66907 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
66908 index 4d6f47b..00bcedb 100644
66909 --- a/include/linux/fsnotify_backend.h
66910 +++ b/include/linux/fsnotify_backend.h
66911 @@ -86,6 +86,7 @@ struct fsnotify_ops {
66912 void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
66913 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
66914 };
66915 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
66916
66917 /*
66918 * A group is a "thing" that wants to receive notification about filesystem
66919 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
66920 index 4ec5e67..42f1eb9 100644
66921 --- a/include/linux/ftrace_event.h
66922 +++ b/include/linux/ftrace_event.h
66923 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
66924 int filter_type);
66925 extern int trace_define_common_fields(struct ftrace_event_call *call);
66926
66927 -#define is_signed_type(type) (((type)(-1)) < 0)
66928 +#define is_signed_type(type) (((type)(-1)) < (type)1)
66929
66930 int trace_set_clr_event(const char *system, const char *event, int set);
66931
66932 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
66933 index 297df45..b6a74ff 100644
66934 --- a/include/linux/genhd.h
66935 +++ b/include/linux/genhd.h
66936 @@ -161,7 +161,7 @@ struct gendisk {
66937
66938 struct timer_rand_state *random;
66939
66940 - atomic_t sync_io; /* RAID */
66941 + atomic_unchecked_t sync_io; /* RAID */
66942 struct work_struct async_notify;
66943 #ifdef CONFIG_BLK_DEV_INTEGRITY
66944 struct blk_integrity *integrity;
66945 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
66946 new file mode 100644
66947 index 0000000..af663cf
66948 --- /dev/null
66949 +++ b/include/linux/gracl.h
66950 @@ -0,0 +1,319 @@
66951 +#ifndef GR_ACL_H
66952 +#define GR_ACL_H
66953 +
66954 +#include <linux/grdefs.h>
66955 +#include <linux/resource.h>
66956 +#include <linux/capability.h>
66957 +#include <linux/dcache.h>
66958 +#include <asm/resource.h>
66959 +
66960 +/* Major status information */
66961 +
66962 +#define GR_VERSION "grsecurity 2.9"
66963 +#define GRSECURITY_VERSION 0x2900
66964 +
66965 +enum {
66966 + GR_SHUTDOWN = 0,
66967 + GR_ENABLE = 1,
66968 + GR_SPROLE = 2,
66969 + GR_RELOAD = 3,
66970 + GR_SEGVMOD = 4,
66971 + GR_STATUS = 5,
66972 + GR_UNSPROLE = 6,
66973 + GR_PASSSET = 7,
66974 + GR_SPROLEPAM = 8,
66975 +};
66976 +
66977 +/* Password setup definitions
66978 + * kernel/grhash.c */
66979 +enum {
66980 + GR_PW_LEN = 128,
66981 + GR_SALT_LEN = 16,
66982 + GR_SHA_LEN = 32,
66983 +};
66984 +
66985 +enum {
66986 + GR_SPROLE_LEN = 64,
66987 +};
66988 +
66989 +enum {
66990 + GR_NO_GLOB = 0,
66991 + GR_REG_GLOB,
66992 + GR_CREATE_GLOB
66993 +};
66994 +
66995 +#define GR_NLIMITS 32
66996 +
66997 +/* Begin Data Structures */
66998 +
66999 +struct sprole_pw {
67000 + unsigned char *rolename;
67001 + unsigned char salt[GR_SALT_LEN];
67002 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
67003 +};
67004 +
67005 +struct name_entry {
67006 + __u32 key;
67007 + ino_t inode;
67008 + dev_t device;
67009 + char *name;
67010 + __u16 len;
67011 + __u8 deleted;
67012 + struct name_entry *prev;
67013 + struct name_entry *next;
67014 +};
67015 +
67016 +struct inodev_entry {
67017 + struct name_entry *nentry;
67018 + struct inodev_entry *prev;
67019 + struct inodev_entry *next;
67020 +};
67021 +
67022 +struct acl_role_db {
67023 + struct acl_role_label **r_hash;
67024 + __u32 r_size;
67025 +};
67026 +
67027 +struct inodev_db {
67028 + struct inodev_entry **i_hash;
67029 + __u32 i_size;
67030 +};
67031 +
67032 +struct name_db {
67033 + struct name_entry **n_hash;
67034 + __u32 n_size;
67035 +};
67036 +
67037 +struct crash_uid {
67038 + uid_t uid;
67039 + unsigned long expires;
67040 +};
67041 +
67042 +struct gr_hash_struct {
67043 + void **table;
67044 + void **nametable;
67045 + void *first;
67046 + __u32 table_size;
67047 + __u32 used_size;
67048 + int type;
67049 +};
67050 +
67051 +/* Userspace Grsecurity ACL data structures */
67052 +
67053 +struct acl_subject_label {
67054 + char *filename;
67055 + ino_t inode;
67056 + dev_t device;
67057 + __u32 mode;
67058 + kernel_cap_t cap_mask;
67059 + kernel_cap_t cap_lower;
67060 + kernel_cap_t cap_invert_audit;
67061 +
67062 + struct rlimit res[GR_NLIMITS];
67063 + __u32 resmask;
67064 +
67065 + __u8 user_trans_type;
67066 + __u8 group_trans_type;
67067 + uid_t *user_transitions;
67068 + gid_t *group_transitions;
67069 + __u16 user_trans_num;
67070 + __u16 group_trans_num;
67071 +
67072 + __u32 sock_families[2];
67073 + __u32 ip_proto[8];
67074 + __u32 ip_type;
67075 + struct acl_ip_label **ips;
67076 + __u32 ip_num;
67077 + __u32 inaddr_any_override;
67078 +
67079 + __u32 crashes;
67080 + unsigned long expires;
67081 +
67082 + struct acl_subject_label *parent_subject;
67083 + struct gr_hash_struct *hash;
67084 + struct acl_subject_label *prev;
67085 + struct acl_subject_label *next;
67086 +
67087 + struct acl_object_label **obj_hash;
67088 + __u32 obj_hash_size;
67089 + __u16 pax_flags;
67090 +};
67091 +
67092 +struct role_allowed_ip {
67093 + __u32 addr;
67094 + __u32 netmask;
67095 +
67096 + struct role_allowed_ip *prev;
67097 + struct role_allowed_ip *next;
67098 +};
67099 +
67100 +struct role_transition {
67101 + char *rolename;
67102 +
67103 + struct role_transition *prev;
67104 + struct role_transition *next;
67105 +};
67106 +
67107 +struct acl_role_label {
67108 + char *rolename;
67109 + uid_t uidgid;
67110 + __u16 roletype;
67111 +
67112 + __u16 auth_attempts;
67113 + unsigned long expires;
67114 +
67115 + struct acl_subject_label *root_label;
67116 + struct gr_hash_struct *hash;
67117 +
67118 + struct acl_role_label *prev;
67119 + struct acl_role_label *next;
67120 +
67121 + struct role_transition *transitions;
67122 + struct role_allowed_ip *allowed_ips;
67123 + uid_t *domain_children;
67124 + __u16 domain_child_num;
67125 +
67126 + mode_t umask;
67127 +
67128 + struct acl_subject_label **subj_hash;
67129 + __u32 subj_hash_size;
67130 +};
67131 +
67132 +struct user_acl_role_db {
67133 + struct acl_role_label **r_table;
67134 + __u32 num_pointers; /* Number of allocations to track */
67135 + __u32 num_roles; /* Number of roles */
67136 + __u32 num_domain_children; /* Number of domain children */
67137 + __u32 num_subjects; /* Number of subjects */
67138 + __u32 num_objects; /* Number of objects */
67139 +};
67140 +
67141 +struct acl_object_label {
67142 + char *filename;
67143 + ino_t inode;
67144 + dev_t device;
67145 + __u32 mode;
67146 +
67147 + struct acl_subject_label *nested;
67148 + struct acl_object_label *globbed;
67149 +
67150 + /* next two structures not used */
67151 +
67152 + struct acl_object_label *prev;
67153 + struct acl_object_label *next;
67154 +};
67155 +
67156 +struct acl_ip_label {
67157 + char *iface;
67158 + __u32 addr;
67159 + __u32 netmask;
67160 + __u16 low, high;
67161 + __u8 mode;
67162 + __u32 type;
67163 + __u32 proto[8];
67164 +
67165 + /* next two structures not used */
67166 +
67167 + struct acl_ip_label *prev;
67168 + struct acl_ip_label *next;
67169 +};
67170 +
67171 +struct gr_arg {
67172 + struct user_acl_role_db role_db;
67173 + unsigned char pw[GR_PW_LEN];
67174 + unsigned char salt[GR_SALT_LEN];
67175 + unsigned char sum[GR_SHA_LEN];
67176 + unsigned char sp_role[GR_SPROLE_LEN];
67177 + struct sprole_pw *sprole_pws;
67178 + dev_t segv_device;
67179 + ino_t segv_inode;
67180 + uid_t segv_uid;
67181 + __u16 num_sprole_pws;
67182 + __u16 mode;
67183 +};
67184 +
67185 +struct gr_arg_wrapper {
67186 + struct gr_arg *arg;
67187 + __u32 version;
67188 + __u32 size;
67189 +};
67190 +
67191 +struct subject_map {
67192 + struct acl_subject_label *user;
67193 + struct acl_subject_label *kernel;
67194 + struct subject_map *prev;
67195 + struct subject_map *next;
67196 +};
67197 +
67198 +struct acl_subj_map_db {
67199 + struct subject_map **s_hash;
67200 + __u32 s_size;
67201 +};
67202 +
67203 +/* End Data Structures Section */
67204 +
67205 +/* Hash functions generated by empirical testing by Brad Spengler
67206 + Makes good use of the low bits of the inode. Generally 0-1 times
67207 + in loop for successful match. 0-3 for unsuccessful match.
67208 + Shift/add algorithm with modulus of table size and an XOR*/
67209 +
67210 +static __inline__ unsigned int
67211 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
67212 +{
67213 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
67214 +}
67215 +
67216 + static __inline__ unsigned int
67217 +shash(const struct acl_subject_label *userp, const unsigned int sz)
67218 +{
67219 + return ((const unsigned long)userp % sz);
67220 +}
67221 +
67222 +static __inline__ unsigned int
67223 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
67224 +{
67225 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
67226 +}
67227 +
67228 +static __inline__ unsigned int
67229 +nhash(const char *name, const __u16 len, const unsigned int sz)
67230 +{
67231 + return full_name_hash((const unsigned char *)name, len) % sz;
67232 +}
67233 +
67234 +#define FOR_EACH_ROLE_START(role) \
67235 + role = role_list; \
67236 + while (role) {
67237 +
67238 +#define FOR_EACH_ROLE_END(role) \
67239 + role = role->prev; \
67240 + }
67241 +
67242 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
67243 + subj = NULL; \
67244 + iter = 0; \
67245 + while (iter < role->subj_hash_size) { \
67246 + if (subj == NULL) \
67247 + subj = role->subj_hash[iter]; \
67248 + if (subj == NULL) { \
67249 + iter++; \
67250 + continue; \
67251 + }
67252 +
67253 +#define FOR_EACH_SUBJECT_END(subj,iter) \
67254 + subj = subj->next; \
67255 + if (subj == NULL) \
67256 + iter++; \
67257 + }
67258 +
67259 +
67260 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
67261 + subj = role->hash->first; \
67262 + while (subj != NULL) {
67263 +
67264 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
67265 + subj = subj->next; \
67266 + }
67267 +
67268 +#endif
67269 +
67270 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
67271 new file mode 100644
67272 index 0000000..323ecf2
67273 --- /dev/null
67274 +++ b/include/linux/gralloc.h
67275 @@ -0,0 +1,9 @@
67276 +#ifndef __GRALLOC_H
67277 +#define __GRALLOC_H
67278 +
67279 +void acl_free_all(void);
67280 +int acl_alloc_stack_init(unsigned long size);
67281 +void *acl_alloc(unsigned long len);
67282 +void *acl_alloc_num(unsigned long num, unsigned long len);
67283 +
67284 +#endif
67285 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
67286 new file mode 100644
67287 index 0000000..70d6cd5
67288 --- /dev/null
67289 +++ b/include/linux/grdefs.h
67290 @@ -0,0 +1,140 @@
67291 +#ifndef GRDEFS_H
67292 +#define GRDEFS_H
67293 +
67294 +/* Begin grsecurity status declarations */
67295 +
67296 +enum {
67297 + GR_READY = 0x01,
67298 + GR_STATUS_INIT = 0x00 // disabled state
67299 +};
67300 +
67301 +/* Begin ACL declarations */
67302 +
67303 +/* Role flags */
67304 +
67305 +enum {
67306 + GR_ROLE_USER = 0x0001,
67307 + GR_ROLE_GROUP = 0x0002,
67308 + GR_ROLE_DEFAULT = 0x0004,
67309 + GR_ROLE_SPECIAL = 0x0008,
67310 + GR_ROLE_AUTH = 0x0010,
67311 + GR_ROLE_NOPW = 0x0020,
67312 + GR_ROLE_GOD = 0x0040,
67313 + GR_ROLE_LEARN = 0x0080,
67314 + GR_ROLE_TPE = 0x0100,
67315 + GR_ROLE_DOMAIN = 0x0200,
67316 + GR_ROLE_PAM = 0x0400,
67317 + GR_ROLE_PERSIST = 0x800
67318 +};
67319 +
67320 +/* ACL Subject and Object mode flags */
67321 +enum {
67322 + GR_DELETED = 0x80000000
67323 +};
67324 +
67325 +/* ACL Object-only mode flags */
67326 +enum {
67327 + GR_READ = 0x00000001,
67328 + GR_APPEND = 0x00000002,
67329 + GR_WRITE = 0x00000004,
67330 + GR_EXEC = 0x00000008,
67331 + GR_FIND = 0x00000010,
67332 + GR_INHERIT = 0x00000020,
67333 + GR_SETID = 0x00000040,
67334 + GR_CREATE = 0x00000080,
67335 + GR_DELETE = 0x00000100,
67336 + GR_LINK = 0x00000200,
67337 + GR_AUDIT_READ = 0x00000400,
67338 + GR_AUDIT_APPEND = 0x00000800,
67339 + GR_AUDIT_WRITE = 0x00001000,
67340 + GR_AUDIT_EXEC = 0x00002000,
67341 + GR_AUDIT_FIND = 0x00004000,
67342 + GR_AUDIT_INHERIT= 0x00008000,
67343 + GR_AUDIT_SETID = 0x00010000,
67344 + GR_AUDIT_CREATE = 0x00020000,
67345 + GR_AUDIT_DELETE = 0x00040000,
67346 + GR_AUDIT_LINK = 0x00080000,
67347 + GR_PTRACERD = 0x00100000,
67348 + GR_NOPTRACE = 0x00200000,
67349 + GR_SUPPRESS = 0x00400000,
67350 + GR_NOLEARN = 0x00800000,
67351 + GR_INIT_TRANSFER= 0x01000000
67352 +};
67353 +
67354 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
67355 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
67356 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
67357 +
67358 +/* ACL subject-only mode flags */
67359 +enum {
67360 + GR_KILL = 0x00000001,
67361 + GR_VIEW = 0x00000002,
67362 + GR_PROTECTED = 0x00000004,
67363 + GR_LEARN = 0x00000008,
67364 + GR_OVERRIDE = 0x00000010,
67365 + /* just a placeholder, this mode is only used in userspace */
67366 + GR_DUMMY = 0x00000020,
67367 + GR_PROTSHM = 0x00000040,
67368 + GR_KILLPROC = 0x00000080,
67369 + GR_KILLIPPROC = 0x00000100,
67370 + /* just a placeholder, this mode is only used in userspace */
67371 + GR_NOTROJAN = 0x00000200,
67372 + GR_PROTPROCFD = 0x00000400,
67373 + GR_PROCACCT = 0x00000800,
67374 + GR_RELAXPTRACE = 0x00001000,
67375 + GR_NESTED = 0x00002000,
67376 + GR_INHERITLEARN = 0x00004000,
67377 + GR_PROCFIND = 0x00008000,
67378 + GR_POVERRIDE = 0x00010000,
67379 + GR_KERNELAUTH = 0x00020000,
67380 + GR_ATSECURE = 0x00040000,
67381 + GR_SHMEXEC = 0x00080000
67382 +};
67383 +
67384 +enum {
67385 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
67386 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
67387 + GR_PAX_ENABLE_MPROTECT = 0x0004,
67388 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
67389 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
67390 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
67391 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
67392 + GR_PAX_DISABLE_MPROTECT = 0x0400,
67393 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
67394 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
67395 +};
67396 +
67397 +enum {
67398 + GR_ID_USER = 0x01,
67399 + GR_ID_GROUP = 0x02,
67400 +};
67401 +
67402 +enum {
67403 + GR_ID_ALLOW = 0x01,
67404 + GR_ID_DENY = 0x02,
67405 +};
67406 +
67407 +#define GR_CRASH_RES 31
67408 +#define GR_UIDTABLE_MAX 500
67409 +
67410 +/* begin resource learning section */
67411 +enum {
67412 + GR_RLIM_CPU_BUMP = 60,
67413 + GR_RLIM_FSIZE_BUMP = 50000,
67414 + GR_RLIM_DATA_BUMP = 10000,
67415 + GR_RLIM_STACK_BUMP = 1000,
67416 + GR_RLIM_CORE_BUMP = 10000,
67417 + GR_RLIM_RSS_BUMP = 500000,
67418 + GR_RLIM_NPROC_BUMP = 1,
67419 + GR_RLIM_NOFILE_BUMP = 5,
67420 + GR_RLIM_MEMLOCK_BUMP = 50000,
67421 + GR_RLIM_AS_BUMP = 500000,
67422 + GR_RLIM_LOCKS_BUMP = 2,
67423 + GR_RLIM_SIGPENDING_BUMP = 5,
67424 + GR_RLIM_MSGQUEUE_BUMP = 10000,
67425 + GR_RLIM_NICE_BUMP = 1,
67426 + GR_RLIM_RTPRIO_BUMP = 1,
67427 + GR_RLIM_RTTIME_BUMP = 1000000
67428 +};
67429 +
67430 +#endif
67431 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
67432 new file mode 100644
67433 index 0000000..3826b91
67434 --- /dev/null
67435 +++ b/include/linux/grinternal.h
67436 @@ -0,0 +1,219 @@
67437 +#ifndef __GRINTERNAL_H
67438 +#define __GRINTERNAL_H
67439 +
67440 +#ifdef CONFIG_GRKERNSEC
67441 +
67442 +#include <linux/fs.h>
67443 +#include <linux/mnt_namespace.h>
67444 +#include <linux/nsproxy.h>
67445 +#include <linux/gracl.h>
67446 +#include <linux/grdefs.h>
67447 +#include <linux/grmsg.h>
67448 +
67449 +void gr_add_learn_entry(const char *fmt, ...)
67450 + __attribute__ ((format (printf, 1, 2)));
67451 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
67452 + const struct vfsmount *mnt);
67453 +__u32 gr_check_create(const struct dentry *new_dentry,
67454 + const struct dentry *parent,
67455 + const struct vfsmount *mnt, const __u32 mode);
67456 +int gr_check_protected_task(const struct task_struct *task);
67457 +__u32 to_gr_audit(const __u32 reqmode);
67458 +int gr_set_acls(const int type);
67459 +int gr_apply_subject_to_task(struct task_struct *task);
67460 +int gr_acl_is_enabled(void);
67461 +char gr_roletype_to_char(void);
67462 +
67463 +void gr_handle_alertkill(struct task_struct *task);
67464 +char *gr_to_filename(const struct dentry *dentry,
67465 + const struct vfsmount *mnt);
67466 +char *gr_to_filename1(const struct dentry *dentry,
67467 + const struct vfsmount *mnt);
67468 +char *gr_to_filename2(const struct dentry *dentry,
67469 + const struct vfsmount *mnt);
67470 +char *gr_to_filename3(const struct dentry *dentry,
67471 + const struct vfsmount *mnt);
67472 +
67473 +extern int grsec_enable_ptrace_readexec;
67474 +extern int grsec_enable_harden_ptrace;
67475 +extern int grsec_enable_link;
67476 +extern int grsec_enable_fifo;
67477 +extern int grsec_enable_shm;
67478 +extern int grsec_enable_execlog;
67479 +extern int grsec_enable_signal;
67480 +extern int grsec_enable_audit_ptrace;
67481 +extern int grsec_enable_forkfail;
67482 +extern int grsec_enable_time;
67483 +extern int grsec_enable_rofs;
67484 +extern int grsec_enable_chroot_shmat;
67485 +extern int grsec_enable_chroot_mount;
67486 +extern int grsec_enable_chroot_double;
67487 +extern int grsec_enable_chroot_pivot;
67488 +extern int grsec_enable_chroot_chdir;
67489 +extern int grsec_enable_chroot_chmod;
67490 +extern int grsec_enable_chroot_mknod;
67491 +extern int grsec_enable_chroot_fchdir;
67492 +extern int grsec_enable_chroot_nice;
67493 +extern int grsec_enable_chroot_execlog;
67494 +extern int grsec_enable_chroot_caps;
67495 +extern int grsec_enable_chroot_sysctl;
67496 +extern int grsec_enable_chroot_unix;
67497 +extern int grsec_enable_tpe;
67498 +extern int grsec_tpe_gid;
67499 +extern int grsec_enable_tpe_all;
67500 +extern int grsec_enable_tpe_invert;
67501 +extern int grsec_enable_socket_all;
67502 +extern int grsec_socket_all_gid;
67503 +extern int grsec_enable_socket_client;
67504 +extern int grsec_socket_client_gid;
67505 +extern int grsec_enable_socket_server;
67506 +extern int grsec_socket_server_gid;
67507 +extern int grsec_audit_gid;
67508 +extern int grsec_enable_group;
67509 +extern int grsec_enable_audit_textrel;
67510 +extern int grsec_enable_log_rwxmaps;
67511 +extern int grsec_enable_mount;
67512 +extern int grsec_enable_chdir;
67513 +extern int grsec_resource_logging;
67514 +extern int grsec_enable_blackhole;
67515 +extern int grsec_lastack_retries;
67516 +extern int grsec_enable_brute;
67517 +extern int grsec_lock;
67518 +
67519 +extern spinlock_t grsec_alert_lock;
67520 +extern unsigned long grsec_alert_wtime;
67521 +extern unsigned long grsec_alert_fyet;
67522 +
67523 +extern spinlock_t grsec_audit_lock;
67524 +
67525 +extern rwlock_t grsec_exec_file_lock;
67526 +
67527 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
67528 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
67529 + (tsk)->exec_file->f_vfsmnt) : "/")
67530 +
67531 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
67532 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
67533 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67534 +
67535 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
67536 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
67537 + (tsk)->exec_file->f_vfsmnt) : "/")
67538 +
67539 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
67540 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
67541 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67542 +
67543 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
67544 +
67545 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
67546 +
67547 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
67548 + (task)->pid, (cred)->uid, \
67549 + (cred)->euid, (cred)->gid, (cred)->egid, \
67550 + gr_parent_task_fullpath(task), \
67551 + (task)->real_parent->comm, (task)->real_parent->pid, \
67552 + (pcred)->uid, (pcred)->euid, \
67553 + (pcred)->gid, (pcred)->egid
67554 +
67555 +#define GR_CHROOT_CAPS {{ \
67556 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
67557 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
67558 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
67559 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
67560 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
67561 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
67562 + CAP_TO_MASK(CAP_MAC_ADMIN) }}
67563 +
67564 +#define security_learn(normal_msg,args...) \
67565 +({ \
67566 + read_lock(&grsec_exec_file_lock); \
67567 + gr_add_learn_entry(normal_msg "\n", ## args); \
67568 + read_unlock(&grsec_exec_file_lock); \
67569 +})
67570 +
67571 +enum {
67572 + GR_DO_AUDIT,
67573 + GR_DONT_AUDIT,
67574 + GR_DONT_AUDIT_GOOD
67575 +};
67576 +
67577 +enum {
67578 + GR_TTYSNIFF,
67579 + GR_RBAC,
67580 + GR_RBAC_STR,
67581 + GR_STR_RBAC,
67582 + GR_RBAC_MODE2,
67583 + GR_RBAC_MODE3,
67584 + GR_FILENAME,
67585 + GR_SYSCTL_HIDDEN,
67586 + GR_NOARGS,
67587 + GR_ONE_INT,
67588 + GR_ONE_INT_TWO_STR,
67589 + GR_ONE_STR,
67590 + GR_STR_INT,
67591 + GR_TWO_STR_INT,
67592 + GR_TWO_INT,
67593 + GR_TWO_U64,
67594 + GR_THREE_INT,
67595 + GR_FIVE_INT_TWO_STR,
67596 + GR_TWO_STR,
67597 + GR_THREE_STR,
67598 + GR_FOUR_STR,
67599 + GR_STR_FILENAME,
67600 + GR_FILENAME_STR,
67601 + GR_FILENAME_TWO_INT,
67602 + GR_FILENAME_TWO_INT_STR,
67603 + GR_TEXTREL,
67604 + GR_PTRACE,
67605 + GR_RESOURCE,
67606 + GR_CAP,
67607 + GR_SIG,
67608 + GR_SIG2,
67609 + GR_CRASH1,
67610 + GR_CRASH2,
67611 + GR_PSACCT,
67612 + GR_RWXMAP
67613 +};
67614 +
67615 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
67616 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
67617 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
67618 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
67619 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
67620 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
67621 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
67622 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
67623 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
67624 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
67625 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
67626 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
67627 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
67628 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
67629 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
67630 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
67631 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
67632 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
67633 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
67634 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
67635 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
67636 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
67637 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
67638 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
67639 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
67640 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
67641 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
67642 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
67643 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
67644 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
67645 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
67646 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
67647 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
67648 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
67649 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
67650 +
67651 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
67652 +
67653 +#endif
67654 +
67655 +#endif
67656 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
67657 new file mode 100644
67658 index 0000000..f885406
67659 --- /dev/null
67660 +++ b/include/linux/grmsg.h
67661 @@ -0,0 +1,109 @@
67662 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
67663 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
67664 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
67665 +#define GR_STOPMOD_MSG "denied modification of module state by "
67666 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
67667 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
67668 +#define GR_IOPERM_MSG "denied use of ioperm() by "
67669 +#define GR_IOPL_MSG "denied use of iopl() by "
67670 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
67671 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
67672 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
67673 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
67674 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
67675 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
67676 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
67677 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
67678 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
67679 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
67680 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
67681 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
67682 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
67683 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
67684 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
67685 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
67686 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
67687 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
67688 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
67689 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
67690 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
67691 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
67692 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
67693 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
67694 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
67695 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
67696 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
67697 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
67698 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
67699 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
67700 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
67701 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
67702 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
67703 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
67704 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
67705 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
67706 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
67707 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
67708 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
67709 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
67710 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
67711 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
67712 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
67713 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
67714 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
67715 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
67716 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
67717 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
67718 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
67719 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
67720 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
67721 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
67722 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
67723 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
67724 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
67725 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
67726 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
67727 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
67728 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
67729 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
67730 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
67731 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
67732 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
67733 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
67734 +#define GR_NICE_CHROOT_MSG "denied priority change by "
67735 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
67736 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
67737 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
67738 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
67739 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
67740 +#define GR_TIME_MSG "time set by "
67741 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
67742 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
67743 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
67744 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
67745 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
67746 +#define GR_BIND_MSG "denied bind() by "
67747 +#define GR_CONNECT_MSG "denied connect() by "
67748 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
67749 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
67750 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
67751 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
67752 +#define GR_CAP_ACL_MSG "use of %s denied for "
67753 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
67754 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
67755 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
67756 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
67757 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
67758 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
67759 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
67760 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
67761 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
67762 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
67763 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
67764 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
67765 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
67766 +#define GR_VM86_MSG "denied use of vm86 by "
67767 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
67768 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
67769 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
67770 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
67771 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
67772 new file mode 100644
67773 index 0000000..c1793ae
67774 --- /dev/null
67775 +++ b/include/linux/grsecurity.h
67776 @@ -0,0 +1,219 @@
67777 +#ifndef GR_SECURITY_H
67778 +#define GR_SECURITY_H
67779 +#include <linux/fs.h>
67780 +#include <linux/fs_struct.h>
67781 +#include <linux/binfmts.h>
67782 +#include <linux/gracl.h>
67783 +#include <linux/compat.h>
67784 +
67785 +/* notify of brain-dead configs */
67786 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67787 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
67788 +#endif
67789 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
67790 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
67791 +#endif
67792 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
67793 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
67794 +#endif
67795 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
67796 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
67797 +#endif
67798 +
67799 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
67800 +void gr_handle_brute_check(void);
67801 +void gr_handle_kernel_exploit(void);
67802 +int gr_process_user_ban(void);
67803 +
67804 +char gr_roletype_to_char(void);
67805 +
67806 +int gr_acl_enable_at_secure(void);
67807 +
67808 +int gr_check_user_change(int real, int effective, int fs);
67809 +int gr_check_group_change(int real, int effective, int fs);
67810 +
67811 +void gr_del_task_from_ip_table(struct task_struct *p);
67812 +
67813 +int gr_pid_is_chrooted(struct task_struct *p);
67814 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
67815 +int gr_handle_chroot_nice(void);
67816 +int gr_handle_chroot_sysctl(const int op);
67817 +int gr_handle_chroot_setpriority(struct task_struct *p,
67818 + const int niceval);
67819 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
67820 +int gr_handle_chroot_chroot(const struct dentry *dentry,
67821 + const struct vfsmount *mnt);
67822 +void gr_handle_chroot_chdir(struct path *path);
67823 +int gr_handle_chroot_chmod(const struct dentry *dentry,
67824 + const struct vfsmount *mnt, const int mode);
67825 +int gr_handle_chroot_mknod(const struct dentry *dentry,
67826 + const struct vfsmount *mnt, const int mode);
67827 +int gr_handle_chroot_mount(const struct dentry *dentry,
67828 + const struct vfsmount *mnt,
67829 + const char *dev_name);
67830 +int gr_handle_chroot_pivot(void);
67831 +int gr_handle_chroot_unix(const pid_t pid);
67832 +
67833 +int gr_handle_rawio(const struct inode *inode);
67834 +
67835 +void gr_handle_ioperm(void);
67836 +void gr_handle_iopl(void);
67837 +
67838 +umode_t gr_acl_umask(void);
67839 +
67840 +int gr_tpe_allow(const struct file *file);
67841 +
67842 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
67843 +void gr_clear_chroot_entries(struct task_struct *task);
67844 +
67845 +void gr_log_forkfail(const int retval);
67846 +void gr_log_timechange(void);
67847 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
67848 +void gr_log_chdir(const struct dentry *dentry,
67849 + const struct vfsmount *mnt);
67850 +void gr_log_chroot_exec(const struct dentry *dentry,
67851 + const struct vfsmount *mnt);
67852 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
67853 +#ifdef CONFIG_COMPAT
67854 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
67855 +#endif
67856 +void gr_log_remount(const char *devname, const int retval);
67857 +void gr_log_unmount(const char *devname, const int retval);
67858 +void gr_log_mount(const char *from, const char *to, const int retval);
67859 +void gr_log_textrel(struct vm_area_struct *vma);
67860 +void gr_log_rwxmmap(struct file *file);
67861 +void gr_log_rwxmprotect(struct file *file);
67862 +
67863 +int gr_handle_follow_link(const struct inode *parent,
67864 + const struct inode *inode,
67865 + const struct dentry *dentry,
67866 + const struct vfsmount *mnt);
67867 +int gr_handle_fifo(const struct dentry *dentry,
67868 + const struct vfsmount *mnt,
67869 + const struct dentry *dir, const int flag,
67870 + const int acc_mode);
67871 +int gr_handle_hardlink(const struct dentry *dentry,
67872 + const struct vfsmount *mnt,
67873 + struct inode *inode,
67874 + const int mode, const char *to);
67875 +
67876 +int gr_is_capable(const int cap);
67877 +int gr_is_capable_nolog(const int cap);
67878 +void gr_learn_resource(const struct task_struct *task, const int limit,
67879 + const unsigned long wanted, const int gt);
67880 +void gr_copy_label(struct task_struct *tsk);
67881 +void gr_handle_crash(struct task_struct *task, const int sig);
67882 +int gr_handle_signal(const struct task_struct *p, const int sig);
67883 +int gr_check_crash_uid(const uid_t uid);
67884 +int gr_check_protected_task(const struct task_struct *task);
67885 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
67886 +int gr_acl_handle_mmap(const struct file *file,
67887 + const unsigned long prot);
67888 +int gr_acl_handle_mprotect(const struct file *file,
67889 + const unsigned long prot);
67890 +int gr_check_hidden_task(const struct task_struct *tsk);
67891 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
67892 + const struct vfsmount *mnt);
67893 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
67894 + const struct vfsmount *mnt);
67895 +__u32 gr_acl_handle_access(const struct dentry *dentry,
67896 + const struct vfsmount *mnt, const int fmode);
67897 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
67898 + const struct vfsmount *mnt, umode_t *mode);
67899 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
67900 + const struct vfsmount *mnt);
67901 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
67902 + const struct vfsmount *mnt);
67903 +int gr_handle_ptrace(struct task_struct *task, const long request);
67904 +int gr_handle_proc_ptrace(struct task_struct *task);
67905 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
67906 + const struct vfsmount *mnt);
67907 +int gr_check_crash_exec(const struct file *filp);
67908 +int gr_acl_is_enabled(void);
67909 +void gr_set_kernel_label(struct task_struct *task);
67910 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
67911 + const gid_t gid);
67912 +int gr_set_proc_label(const struct dentry *dentry,
67913 + const struct vfsmount *mnt,
67914 + const int unsafe_flags);
67915 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
67916 + const struct vfsmount *mnt);
67917 +__u32 gr_acl_handle_open(const struct dentry *dentry,
67918 + const struct vfsmount *mnt, int acc_mode);
67919 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
67920 + const struct dentry *p_dentry,
67921 + const struct vfsmount *p_mnt,
67922 + int open_flags, int acc_mode, const int imode);
67923 +void gr_handle_create(const struct dentry *dentry,
67924 + const struct vfsmount *mnt);
67925 +void gr_handle_proc_create(const struct dentry *dentry,
67926 + const struct inode *inode);
67927 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
67928 + const struct dentry *parent_dentry,
67929 + const struct vfsmount *parent_mnt,
67930 + const int mode);
67931 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
67932 + const struct dentry *parent_dentry,
67933 + const struct vfsmount *parent_mnt);
67934 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
67935 + const struct vfsmount *mnt);
67936 +void gr_handle_delete(const ino_t ino, const dev_t dev);
67937 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
67938 + const struct vfsmount *mnt);
67939 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
67940 + const struct dentry *parent_dentry,
67941 + const struct vfsmount *parent_mnt,
67942 + const char *from);
67943 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
67944 + const struct dentry *parent_dentry,
67945 + const struct vfsmount *parent_mnt,
67946 + const struct dentry *old_dentry,
67947 + const struct vfsmount *old_mnt, const char *to);
67948 +int gr_acl_handle_rename(struct dentry *new_dentry,
67949 + struct dentry *parent_dentry,
67950 + const struct vfsmount *parent_mnt,
67951 + struct dentry *old_dentry,
67952 + struct inode *old_parent_inode,
67953 + struct vfsmount *old_mnt, const char *newname);
67954 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
67955 + struct dentry *old_dentry,
67956 + struct dentry *new_dentry,
67957 + struct vfsmount *mnt, const __u8 replace);
67958 +__u32 gr_check_link(const struct dentry *new_dentry,
67959 + const struct dentry *parent_dentry,
67960 + const struct vfsmount *parent_mnt,
67961 + const struct dentry *old_dentry,
67962 + const struct vfsmount *old_mnt);
67963 +int gr_acl_handle_filldir(const struct file *file, const char *name,
67964 + const unsigned int namelen, const ino_t ino);
67965 +
67966 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
67967 + const struct vfsmount *mnt);
67968 +void gr_acl_handle_exit(void);
67969 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
67970 +int gr_acl_handle_procpidmem(const struct task_struct *task);
67971 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
67972 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
67973 +void gr_audit_ptrace(struct task_struct *task);
67974 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
67975 +
67976 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
67977 +
67978 +#ifdef CONFIG_GRKERNSEC
67979 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
67980 +void gr_handle_vm86(void);
67981 +void gr_handle_mem_readwrite(u64 from, u64 to);
67982 +
67983 +void gr_log_badprocpid(const char *entry);
67984 +
67985 +extern int grsec_enable_dmesg;
67986 +extern int grsec_disable_privio;
67987 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
67988 +extern int grsec_enable_chroot_findtask;
67989 +#endif
67990 +#ifdef CONFIG_GRKERNSEC_SETXID
67991 +extern int grsec_enable_setxid;
67992 +#endif
67993 +#endif
67994 +
67995 +#endif
67996 diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
67997 index 6a87154..a3ce57b 100644
67998 --- a/include/linux/hdpu_features.h
67999 +++ b/include/linux/hdpu_features.h
68000 @@ -3,7 +3,7 @@
68001 struct cpustate_t {
68002 spinlock_t lock;
68003 int excl;
68004 - int open_count;
68005 + atomic_t open_count;
68006 unsigned char cached_val;
68007 int inited;
68008 unsigned long *set_addr;
68009 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
68010 index 211ff44..00ab6d7 100644
68011 --- a/include/linux/highmem.h
68012 +++ b/include/linux/highmem.h
68013 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
68014 kunmap_atomic(kaddr, KM_USER0);
68015 }
68016
68017 +static inline void sanitize_highpage(struct page *page)
68018 +{
68019 + void *kaddr;
68020 + unsigned long flags;
68021 +
68022 + local_irq_save(flags);
68023 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
68024 + clear_page(kaddr);
68025 + kunmap_atomic(kaddr, KM_CLEARPAGE);
68026 + local_irq_restore(flags);
68027 +}
68028 +
68029 static inline void zero_user_segments(struct page *page,
68030 unsigned start1, unsigned end1,
68031 unsigned start2, unsigned end2)
68032 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
68033 index 7b40cda..24eb44e 100644
68034 --- a/include/linux/i2c.h
68035 +++ b/include/linux/i2c.h
68036 @@ -325,6 +325,7 @@ struct i2c_algorithm {
68037 /* To determine what the adapter supports */
68038 u32 (*functionality) (struct i2c_adapter *);
68039 };
68040 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
68041
68042 /*
68043 * i2c_adapter is the structure used to identify a physical i2c bus along
68044 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
68045 index 4c4e57d..f3c5303 100644
68046 --- a/include/linux/i2o.h
68047 +++ b/include/linux/i2o.h
68048 @@ -564,7 +564,7 @@ struct i2o_controller {
68049 struct i2o_device *exec; /* Executive */
68050 #if BITS_PER_LONG == 64
68051 spinlock_t context_list_lock; /* lock for context_list */
68052 - atomic_t context_list_counter; /* needed for unique contexts */
68053 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
68054 struct list_head context_list; /* list of context id's
68055 and pointers */
68056 #endif
68057 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
68058 index 21a6f5d..dc42eab 100644
68059 --- a/include/linux/init_task.h
68060 +++ b/include/linux/init_task.h
68061 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
68062 #define INIT_IDS
68063 #endif
68064
68065 +#ifdef CONFIG_X86
68066 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
68067 +#else
68068 +#define INIT_TASK_THREAD_INFO
68069 +#endif
68070 +
68071 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
68072 /*
68073 * Because of the reduced scope of CAP_SETPCAP when filesystem
68074 @@ -156,6 +162,7 @@ extern struct cred init_cred;
68075 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
68076 .comm = "swapper", \
68077 .thread = INIT_THREAD, \
68078 + INIT_TASK_THREAD_INFO \
68079 .fs = &init_fs, \
68080 .files = &init_files, \
68081 .signal = &init_signals, \
68082 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
68083 index 4f0a72a..a849599 100644
68084 --- a/include/linux/intel-iommu.h
68085 +++ b/include/linux/intel-iommu.h
68086 @@ -296,7 +296,7 @@ struct iommu_flush {
68087 u8 fm, u64 type);
68088 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
68089 unsigned int size_order, u64 type);
68090 -};
68091 +} __no_const;
68092
68093 enum {
68094 SR_DMAR_FECTL_REG,
68095 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
68096 index c739150..be577b5 100644
68097 --- a/include/linux/interrupt.h
68098 +++ b/include/linux/interrupt.h
68099 @@ -369,7 +369,7 @@ enum
68100 /* map softirq index to softirq name. update 'softirq_to_name' in
68101 * kernel/softirq.c when adding a new softirq.
68102 */
68103 -extern char *softirq_to_name[NR_SOFTIRQS];
68104 +extern const char * const softirq_to_name[NR_SOFTIRQS];
68105
68106 /* softirq mask and active fields moved to irq_cpustat_t in
68107 * asm/hardirq.h to get better cache usage. KAO
68108 @@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
68109
68110 struct softirq_action
68111 {
68112 - void (*action)(struct softirq_action *);
68113 + void (*action)(void);
68114 };
68115
68116 asmlinkage void do_softirq(void);
68117 asmlinkage void __do_softirq(void);
68118 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
68119 +extern void open_softirq(int nr, void (*action)(void));
68120 extern void softirq_init(void);
68121 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
68122 extern void raise_softirq_irqoff(unsigned int nr);
68123 diff --git a/include/linux/irq.h b/include/linux/irq.h
68124 index 9e5f45a..025865b 100644
68125 --- a/include/linux/irq.h
68126 +++ b/include/linux/irq.h
68127 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
68128 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
68129 bool boot)
68130 {
68131 +#ifdef CONFIG_CPUMASK_OFFSTACK
68132 gfp_t gfp = GFP_ATOMIC;
68133
68134 if (boot)
68135 gfp = GFP_NOWAIT;
68136
68137 -#ifdef CONFIG_CPUMASK_OFFSTACK
68138 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
68139 return false;
68140
68141 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
68142 index 7922742..27306a2 100644
68143 --- a/include/linux/kallsyms.h
68144 +++ b/include/linux/kallsyms.h
68145 @@ -15,7 +15,8 @@
68146
68147 struct module;
68148
68149 -#ifdef CONFIG_KALLSYMS
68150 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
68151 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
68152 /* Lookup the address for a symbol. Returns 0 if not found. */
68153 unsigned long kallsyms_lookup_name(const char *name);
68154
68155 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
68156 /* Stupid that this does nothing, but I didn't create this mess. */
68157 #define __print_symbol(fmt, addr)
68158 #endif /*CONFIG_KALLSYMS*/
68159 +#else /* when included by kallsyms.c, vsnprintf.c, or
68160 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
68161 +extern void __print_symbol(const char *fmt, unsigned long address);
68162 +extern int sprint_symbol(char *buffer, unsigned long address);
68163 +const char *kallsyms_lookup(unsigned long addr,
68164 + unsigned long *symbolsize,
68165 + unsigned long *offset,
68166 + char **modname, char *namebuf);
68167 +#endif
68168
68169 /* This macro allows us to keep printk typechecking */
68170 static void __check_printsym_format(const char *fmt, ...)
68171 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
68172 index 6adcc29..13369e8 100644
68173 --- a/include/linux/kgdb.h
68174 +++ b/include/linux/kgdb.h
68175 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
68176
68177 extern int kgdb_connected;
68178
68179 -extern atomic_t kgdb_setting_breakpoint;
68180 -extern atomic_t kgdb_cpu_doing_single_step;
68181 +extern atomic_unchecked_t kgdb_setting_breakpoint;
68182 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
68183
68184 extern struct task_struct *kgdb_usethread;
68185 extern struct task_struct *kgdb_contthread;
68186 @@ -235,7 +235,7 @@ struct kgdb_arch {
68187 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
68188 void (*remove_all_hw_break)(void);
68189 void (*correct_hw_break)(void);
68190 -};
68191 +} __do_const;
68192
68193 /**
68194 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
68195 @@ -257,14 +257,14 @@ struct kgdb_io {
68196 int (*init) (void);
68197 void (*pre_exception) (void);
68198 void (*post_exception) (void);
68199 -};
68200 +} __do_const;
68201
68202 -extern struct kgdb_arch arch_kgdb_ops;
68203 +extern const struct kgdb_arch arch_kgdb_ops;
68204
68205 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
68206
68207 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
68208 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
68209 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
68210 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
68211
68212 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
68213 extern int kgdb_mem2hex(char *mem, char *buf, int count);
68214 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
68215 index 384ca8b..83dd97d 100644
68216 --- a/include/linux/kmod.h
68217 +++ b/include/linux/kmod.h
68218 @@ -31,6 +31,8 @@
68219 * usually useless though. */
68220 extern int __request_module(bool wait, const char *name, ...) \
68221 __attribute__((format(printf, 2, 3)));
68222 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
68223 + __attribute__((format(printf, 3, 4)));
68224 #define request_module(mod...) __request_module(true, mod)
68225 #define request_module_nowait(mod...) __request_module(false, mod)
68226 #define try_then_request_module(x, mod...) \
68227 diff --git a/include/linux/kobject.h b/include/linux/kobject.h
68228 index 58ae8e0..3950d3c 100644
68229 --- a/include/linux/kobject.h
68230 +++ b/include/linux/kobject.h
68231 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
68232
68233 struct kobj_type {
68234 void (*release)(struct kobject *kobj);
68235 - struct sysfs_ops *sysfs_ops;
68236 + const struct sysfs_ops *sysfs_ops;
68237 struct attribute **default_attrs;
68238 };
68239
68240 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
68241 };
68242
68243 struct kset_uevent_ops {
68244 - int (*filter)(struct kset *kset, struct kobject *kobj);
68245 - const char *(*name)(struct kset *kset, struct kobject *kobj);
68246 - int (*uevent)(struct kset *kset, struct kobject *kobj,
68247 + int (* const filter)(struct kset *kset, struct kobject *kobj);
68248 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
68249 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
68250 struct kobj_uevent_env *env);
68251 };
68252
68253 @@ -132,7 +132,7 @@ struct kobj_attribute {
68254 const char *buf, size_t count);
68255 };
68256
68257 -extern struct sysfs_ops kobj_sysfs_ops;
68258 +extern const struct sysfs_ops kobj_sysfs_ops;
68259
68260 /**
68261 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
68262 @@ -155,14 +155,14 @@ struct kset {
68263 struct list_head list;
68264 spinlock_t list_lock;
68265 struct kobject kobj;
68266 - struct kset_uevent_ops *uevent_ops;
68267 + const struct kset_uevent_ops *uevent_ops;
68268 };
68269
68270 extern void kset_init(struct kset *kset);
68271 extern int __must_check kset_register(struct kset *kset);
68272 extern void kset_unregister(struct kset *kset);
68273 extern struct kset * __must_check kset_create_and_add(const char *name,
68274 - struct kset_uevent_ops *u,
68275 + const struct kset_uevent_ops *u,
68276 struct kobject *parent_kobj);
68277
68278 static inline struct kset *to_kset(struct kobject *kobj)
68279 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
68280 index c728a50..752d821 100644
68281 --- a/include/linux/kvm_host.h
68282 +++ b/include/linux/kvm_host.h
68283 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
68284 void vcpu_load(struct kvm_vcpu *vcpu);
68285 void vcpu_put(struct kvm_vcpu *vcpu);
68286
68287 -int kvm_init(void *opaque, unsigned int vcpu_size,
68288 +int kvm_init(const void *opaque, unsigned int vcpu_size,
68289 struct module *module);
68290 void kvm_exit(void);
68291
68292 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
68293 struct kvm_guest_debug *dbg);
68294 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
68295
68296 -int kvm_arch_init(void *opaque);
68297 +int kvm_arch_init(const void *opaque);
68298 void kvm_arch_exit(void);
68299
68300 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
68301 diff --git a/include/linux/libata.h b/include/linux/libata.h
68302 index a069916..223edde 100644
68303 --- a/include/linux/libata.h
68304 +++ b/include/linux/libata.h
68305 @@ -525,11 +525,11 @@ struct ata_ioports {
68306
68307 struct ata_host {
68308 spinlock_t lock;
68309 - struct device *dev;
68310 + struct device *dev;
68311 void __iomem * const *iomap;
68312 unsigned int n_ports;
68313 void *private_data;
68314 - struct ata_port_operations *ops;
68315 + const struct ata_port_operations *ops;
68316 unsigned long flags;
68317 #ifdef CONFIG_ATA_ACPI
68318 acpi_handle acpi_handle;
68319 @@ -710,7 +710,7 @@ struct ata_link {
68320
68321 struct ata_port {
68322 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
68323 - struct ata_port_operations *ops;
68324 + const struct ata_port_operations *ops;
68325 spinlock_t *lock;
68326 /* Flags owned by the EH context. Only EH should touch these once the
68327 port is active */
68328 @@ -884,7 +884,7 @@ struct ata_port_operations {
68329 * fields must be pointers.
68330 */
68331 const struct ata_port_operations *inherits;
68332 -};
68333 +} __do_const;
68334
68335 struct ata_port_info {
68336 unsigned long flags;
68337 @@ -892,7 +892,7 @@ struct ata_port_info {
68338 unsigned long pio_mask;
68339 unsigned long mwdma_mask;
68340 unsigned long udma_mask;
68341 - struct ata_port_operations *port_ops;
68342 + const struct ata_port_operations *port_ops;
68343 void *private_data;
68344 };
68345
68346 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
68347 extern const unsigned long sata_deb_timing_hotplug[];
68348 extern const unsigned long sata_deb_timing_long[];
68349
68350 -extern struct ata_port_operations ata_dummy_port_ops;
68351 +extern const struct ata_port_operations ata_dummy_port_ops;
68352 extern const struct ata_port_info ata_dummy_port_info;
68353
68354 static inline const unsigned long *
68355 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
68356 struct scsi_host_template *sht);
68357 extern void ata_host_detach(struct ata_host *host);
68358 extern void ata_host_init(struct ata_host *, struct device *,
68359 - unsigned long, struct ata_port_operations *);
68360 + unsigned long, const struct ata_port_operations *);
68361 extern int ata_scsi_detect(struct scsi_host_template *sht);
68362 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
68363 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
68364 diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
68365 index fbc48f8..0886e57 100644
68366 --- a/include/linux/lockd/bind.h
68367 +++ b/include/linux/lockd/bind.h
68368 @@ -23,13 +23,13 @@ struct svc_rqst;
68369 * This is the set of functions for lockd->nfsd communication
68370 */
68371 struct nlmsvc_binding {
68372 - __be32 (*fopen)(struct svc_rqst *,
68373 + __be32 (* const fopen)(struct svc_rqst *,
68374 struct nfs_fh *,
68375 struct file **);
68376 - void (*fclose)(struct file *);
68377 + void (* const fclose)(struct file *);
68378 };
68379
68380 -extern struct nlmsvc_binding * nlmsvc_ops;
68381 +extern const struct nlmsvc_binding * nlmsvc_ops;
68382
68383 /*
68384 * Similar to nfs_client_initdata, but without the NFS-specific
68385 diff --git a/include/linux/mca.h b/include/linux/mca.h
68386 index 3797270..7765ede 100644
68387 --- a/include/linux/mca.h
68388 +++ b/include/linux/mca.h
68389 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
68390 int region);
68391 void * (*mca_transform_memory)(struct mca_device *,
68392 void *memory);
68393 -};
68394 +} __no_const;
68395
68396 struct mca_bus {
68397 u64 default_dma_mask;
68398 diff --git a/include/linux/memory.h b/include/linux/memory.h
68399 index 37fa19b..b597c85 100644
68400 --- a/include/linux/memory.h
68401 +++ b/include/linux/memory.h
68402 @@ -108,7 +108,7 @@ struct memory_accessor {
68403 size_t count);
68404 ssize_t (*write)(struct memory_accessor *, const char *buf,
68405 off_t offset, size_t count);
68406 -};
68407 +} __no_const;
68408
68409 /*
68410 * Kernel text modification mutex, used for code patching. Users of this lock
68411 diff --git a/include/linux/mm.h b/include/linux/mm.h
68412 index 11e5be6..1ff2423 100644
68413 --- a/include/linux/mm.h
68414 +++ b/include/linux/mm.h
68415 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
68416
68417 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
68418 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
68419 +
68420 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68421 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
68422 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
68423 +#else
68424 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
68425 +#endif
68426 +
68427 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
68428 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
68429
68430 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
68431 int set_page_dirty_lock(struct page *page);
68432 int clear_page_dirty_for_io(struct page *page);
68433
68434 -/* Is the vma a continuation of the stack vma above it? */
68435 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
68436 -{
68437 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
68438 -}
68439 -
68440 extern unsigned long move_page_tables(struct vm_area_struct *vma,
68441 unsigned long old_addr, struct vm_area_struct *new_vma,
68442 unsigned long new_addr, unsigned long len);
68443 @@ -890,6 +891,8 @@ struct shrinker {
68444 extern void register_shrinker(struct shrinker *);
68445 extern void unregister_shrinker(struct shrinker *);
68446
68447 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
68448 +
68449 int vma_wants_writenotify(struct vm_area_struct *vma);
68450
68451 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
68452 @@ -1162,6 +1165,7 @@ out:
68453 }
68454
68455 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
68456 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
68457
68458 extern unsigned long do_brk(unsigned long, unsigned long);
68459
68460 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
68461 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
68462 struct vm_area_struct **pprev);
68463
68464 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
68465 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
68466 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
68467 +
68468 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
68469 NULL if none. Assume start_addr < end_addr. */
68470 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
68471 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
68472 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
68473 }
68474
68475 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
68476 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
68477 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
68478 unsigned long pfn, unsigned long size, pgprot_t);
68479 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
68480 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
68481 extern int sysctl_memory_failure_early_kill;
68482 extern int sysctl_memory_failure_recovery;
68483 -extern atomic_long_t mce_bad_pages;
68484 +extern atomic_long_unchecked_t mce_bad_pages;
68485 +
68486 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68487 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
68488 +#else
68489 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
68490 +#endif
68491
68492 #endif /* __KERNEL__ */
68493 #endif /* _LINUX_MM_H */
68494 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
68495 index 9d12ed5..6d9707a 100644
68496 --- a/include/linux/mm_types.h
68497 +++ b/include/linux/mm_types.h
68498 @@ -186,6 +186,8 @@ struct vm_area_struct {
68499 #ifdef CONFIG_NUMA
68500 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
68501 #endif
68502 +
68503 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
68504 };
68505
68506 struct core_thread {
68507 @@ -287,6 +289,24 @@ struct mm_struct {
68508 #ifdef CONFIG_MMU_NOTIFIER
68509 struct mmu_notifier_mm *mmu_notifier_mm;
68510 #endif
68511 +
68512 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
68513 + unsigned long pax_flags;
68514 +#endif
68515 +
68516 +#ifdef CONFIG_PAX_DLRESOLVE
68517 + unsigned long call_dl_resolve;
68518 +#endif
68519 +
68520 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
68521 + unsigned long call_syscall;
68522 +#endif
68523 +
68524 +#ifdef CONFIG_PAX_ASLR
68525 + unsigned long delta_mmap; /* randomized offset */
68526 + unsigned long delta_stack; /* randomized offset */
68527 +#endif
68528 +
68529 };
68530
68531 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
68532 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
68533 index 4e02ee2..afb159e 100644
68534 --- a/include/linux/mmu_notifier.h
68535 +++ b/include/linux/mmu_notifier.h
68536 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
68537 */
68538 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
68539 ({ \
68540 - pte_t __pte; \
68541 + pte_t ___pte; \
68542 struct vm_area_struct *___vma = __vma; \
68543 unsigned long ___address = __address; \
68544 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
68545 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
68546 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
68547 - __pte; \
68548 + ___pte; \
68549 })
68550
68551 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
68552 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
68553 index 6c31a2a..4b0e930 100644
68554 --- a/include/linux/mmzone.h
68555 +++ b/include/linux/mmzone.h
68556 @@ -350,7 +350,7 @@ struct zone {
68557 unsigned long flags; /* zone flags, see below */
68558
68559 /* Zone statistics */
68560 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68561 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68562
68563 /*
68564 * prev_priority holds the scanning priority for this zone. It is
68565 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
68566 index f58e9d8..3503935 100644
68567 --- a/include/linux/mod_devicetable.h
68568 +++ b/include/linux/mod_devicetable.h
68569 @@ -12,7 +12,7 @@
68570 typedef unsigned long kernel_ulong_t;
68571 #endif
68572
68573 -#define PCI_ANY_ID (~0)
68574 +#define PCI_ANY_ID ((__u16)~0)
68575
68576 struct pci_device_id {
68577 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
68578 @@ -131,7 +131,7 @@ struct usb_device_id {
68579 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
68580 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
68581
68582 -#define HID_ANY_ID (~0)
68583 +#define HID_ANY_ID (~0U)
68584
68585 struct hid_device_id {
68586 __u16 bus;
68587 diff --git a/include/linux/module.h b/include/linux/module.h
68588 index 482efc8..642032b 100644
68589 --- a/include/linux/module.h
68590 +++ b/include/linux/module.h
68591 @@ -16,6 +16,7 @@
68592 #include <linux/kobject.h>
68593 #include <linux/moduleparam.h>
68594 #include <linux/tracepoint.h>
68595 +#include <linux/fs.h>
68596
68597 #include <asm/local.h>
68598 #include <asm/module.h>
68599 @@ -287,16 +288,16 @@ struct module
68600 int (*init)(void);
68601
68602 /* If this is non-NULL, vfree after init() returns */
68603 - void *module_init;
68604 + void *module_init_rx, *module_init_rw;
68605
68606 /* Here is the actual code + data, vfree'd on unload. */
68607 - void *module_core;
68608 + void *module_core_rx, *module_core_rw;
68609
68610 /* Here are the sizes of the init and core sections */
68611 - unsigned int init_size, core_size;
68612 + unsigned int init_size_rw, core_size_rw;
68613
68614 /* The size of the executable code in each section. */
68615 - unsigned int init_text_size, core_text_size;
68616 + unsigned int init_size_rx, core_size_rx;
68617
68618 /* Arch-specific module values */
68619 struct mod_arch_specific arch;
68620 @@ -345,6 +346,10 @@ struct module
68621 #ifdef CONFIG_EVENT_TRACING
68622 struct ftrace_event_call *trace_events;
68623 unsigned int num_trace_events;
68624 + struct file_operations trace_id;
68625 + struct file_operations trace_enable;
68626 + struct file_operations trace_format;
68627 + struct file_operations trace_filter;
68628 #endif
68629 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
68630 unsigned long *ftrace_callsites;
68631 @@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
68632 bool is_module_address(unsigned long addr);
68633 bool is_module_text_address(unsigned long addr);
68634
68635 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
68636 +{
68637 +
68638 +#ifdef CONFIG_PAX_KERNEXEC
68639 + if (ktla_ktva(addr) >= (unsigned long)start &&
68640 + ktla_ktva(addr) < (unsigned long)start + size)
68641 + return 1;
68642 +#endif
68643 +
68644 + return ((void *)addr >= start && (void *)addr < start + size);
68645 +}
68646 +
68647 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
68648 +{
68649 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
68650 +}
68651 +
68652 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
68653 +{
68654 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
68655 +}
68656 +
68657 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
68658 +{
68659 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
68660 +}
68661 +
68662 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
68663 +{
68664 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
68665 +}
68666 +
68667 static inline int within_module_core(unsigned long addr, struct module *mod)
68668 {
68669 - return (unsigned long)mod->module_core <= addr &&
68670 - addr < (unsigned long)mod->module_core + mod->core_size;
68671 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
68672 }
68673
68674 static inline int within_module_init(unsigned long addr, struct module *mod)
68675 {
68676 - return (unsigned long)mod->module_init <= addr &&
68677 - addr < (unsigned long)mod->module_init + mod->init_size;
68678 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
68679 }
68680
68681 /* Search for module by name: must hold module_mutex. */
68682 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
68683 index c1f40c2..682ca53 100644
68684 --- a/include/linux/moduleloader.h
68685 +++ b/include/linux/moduleloader.h
68686 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
68687 sections. Returns NULL on failure. */
68688 void *module_alloc(unsigned long size);
68689
68690 +#ifdef CONFIG_PAX_KERNEXEC
68691 +void *module_alloc_exec(unsigned long size);
68692 +#else
68693 +#define module_alloc_exec(x) module_alloc(x)
68694 +#endif
68695 +
68696 /* Free memory returned from module_alloc. */
68697 void module_free(struct module *mod, void *module_region);
68698
68699 +#ifdef CONFIG_PAX_KERNEXEC
68700 +void module_free_exec(struct module *mod, void *module_region);
68701 +#else
68702 +#define module_free_exec(x, y) module_free((x), (y))
68703 +#endif
68704 +
68705 /* Apply the given relocation to the (simplified) ELF. Return -error
68706 or 0. */
68707 int apply_relocate(Elf_Shdr *sechdrs,
68708 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
68709 index 82a9124..8a5f622 100644
68710 --- a/include/linux/moduleparam.h
68711 +++ b/include/linux/moduleparam.h
68712 @@ -132,7 +132,7 @@ struct kparam_array
68713
68714 /* Actually copy string: maxlen param is usually sizeof(string). */
68715 #define module_param_string(name, string, len, perm) \
68716 - static const struct kparam_string __param_string_##name \
68717 + static const struct kparam_string __param_string_##name __used \
68718 = { len, string }; \
68719 __module_param_call(MODULE_PARAM_PREFIX, name, \
68720 param_set_copystring, param_get_string, \
68721 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
68722
68723 /* Comma-separated array: *nump is set to number they actually specified. */
68724 #define module_param_array_named(name, array, type, nump, perm) \
68725 - static const struct kparam_array __param_arr_##name \
68726 + static const struct kparam_array __param_arr_##name __used \
68727 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
68728 sizeof(array[0]), array }; \
68729 __module_param_call(MODULE_PARAM_PREFIX, name, \
68730 diff --git a/include/linux/mutex.h b/include/linux/mutex.h
68731 index 878cab4..c92cb3e 100644
68732 --- a/include/linux/mutex.h
68733 +++ b/include/linux/mutex.h
68734 @@ -51,7 +51,7 @@ struct mutex {
68735 spinlock_t wait_lock;
68736 struct list_head wait_list;
68737 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
68738 - struct thread_info *owner;
68739 + struct task_struct *owner;
68740 #endif
68741 #ifdef CONFIG_DEBUG_MUTEXES
68742 const char *name;
68743 diff --git a/include/linux/namei.h b/include/linux/namei.h
68744 index ec0f607..d19e675 100644
68745 --- a/include/linux/namei.h
68746 +++ b/include/linux/namei.h
68747 @@ -22,7 +22,7 @@ struct nameidata {
68748 unsigned int flags;
68749 int last_type;
68750 unsigned depth;
68751 - char *saved_names[MAX_NESTED_LINKS + 1];
68752 + const char *saved_names[MAX_NESTED_LINKS + 1];
68753
68754 /* Intent data */
68755 union {
68756 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
68757 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
68758 extern void unlock_rename(struct dentry *, struct dentry *);
68759
68760 -static inline void nd_set_link(struct nameidata *nd, char *path)
68761 +static inline void nd_set_link(struct nameidata *nd, const char *path)
68762 {
68763 nd->saved_names[nd->depth] = path;
68764 }
68765
68766 -static inline char *nd_get_link(struct nameidata *nd)
68767 +static inline const char *nd_get_link(const struct nameidata *nd)
68768 {
68769 return nd->saved_names[nd->depth];
68770 }
68771 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
68772 index 9d7e8f7..04428c5 100644
68773 --- a/include/linux/netdevice.h
68774 +++ b/include/linux/netdevice.h
68775 @@ -637,6 +637,7 @@ struct net_device_ops {
68776 u16 xid);
68777 #endif
68778 };
68779 +typedef struct net_device_ops __no_const net_device_ops_no_const;
68780
68781 /*
68782 * The DEVICE structure.
68783 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
68784 new file mode 100644
68785 index 0000000..33f4af8
68786 --- /dev/null
68787 +++ b/include/linux/netfilter/xt_gradm.h
68788 @@ -0,0 +1,9 @@
68789 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
68790 +#define _LINUX_NETFILTER_XT_GRADM_H 1
68791 +
68792 +struct xt_gradm_mtinfo {
68793 + __u16 flags;
68794 + __u16 invflags;
68795 +};
68796 +
68797 +#endif
68798 diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
68799 index b359c4a..c08b334 100644
68800 --- a/include/linux/nodemask.h
68801 +++ b/include/linux/nodemask.h
68802 @@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
68803
68804 #define any_online_node(mask) \
68805 ({ \
68806 - int node; \
68807 - for_each_node_mask(node, (mask)) \
68808 - if (node_online(node)) \
68809 + int __node; \
68810 + for_each_node_mask(__node, (mask)) \
68811 + if (node_online(__node)) \
68812 break; \
68813 - node; \
68814 + __node; \
68815 })
68816
68817 #define num_online_nodes() num_node_state(N_ONLINE)
68818 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
68819 index 5171639..7cf4235 100644
68820 --- a/include/linux/oprofile.h
68821 +++ b/include/linux/oprofile.h
68822 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
68823 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
68824 char const * name, ulong * val);
68825
68826 -/** Create a file for read-only access to an atomic_t. */
68827 +/** Create a file for read-only access to an atomic_unchecked_t. */
68828 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
68829 - char const * name, atomic_t * val);
68830 + char const * name, atomic_unchecked_t * val);
68831
68832 /** create a directory */
68833 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
68834 diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
68835 index 3c62ed4..8924c7c 100644
68836 --- a/include/linux/pagemap.h
68837 +++ b/include/linux/pagemap.h
68838 @@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
68839 if (((unsigned long)uaddr & PAGE_MASK) !=
68840 ((unsigned long)end & PAGE_MASK))
68841 ret = __get_user(c, end);
68842 + (void)c;
68843 }
68844 + (void)c;
68845 return ret;
68846 }
68847
68848 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
68849 index 81c9689..a567a55 100644
68850 --- a/include/linux/perf_event.h
68851 +++ b/include/linux/perf_event.h
68852 @@ -476,7 +476,7 @@ struct hw_perf_event {
68853 struct hrtimer hrtimer;
68854 };
68855 };
68856 - atomic64_t prev_count;
68857 + atomic64_unchecked_t prev_count;
68858 u64 sample_period;
68859 u64 last_period;
68860 atomic64_t period_left;
68861 @@ -557,7 +557,7 @@ struct perf_event {
68862 const struct pmu *pmu;
68863
68864 enum perf_event_active_state state;
68865 - atomic64_t count;
68866 + atomic64_unchecked_t count;
68867
68868 /*
68869 * These are the total time in nanoseconds that the event
68870 @@ -595,8 +595,8 @@ struct perf_event {
68871 * These accumulate total time (in nanoseconds) that children
68872 * events have been enabled and running, respectively.
68873 */
68874 - atomic64_t child_total_time_enabled;
68875 - atomic64_t child_total_time_running;
68876 + atomic64_unchecked_t child_total_time_enabled;
68877 + atomic64_unchecked_t child_total_time_running;
68878
68879 /*
68880 * Protect attach/detach and child_list:
68881 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
68882 index b43a9e0..b77d869 100644
68883 --- a/include/linux/pipe_fs_i.h
68884 +++ b/include/linux/pipe_fs_i.h
68885 @@ -46,9 +46,9 @@ struct pipe_inode_info {
68886 wait_queue_head_t wait;
68887 unsigned int nrbufs, curbuf;
68888 struct page *tmp_page;
68889 - unsigned int readers;
68890 - unsigned int writers;
68891 - unsigned int waiting_writers;
68892 + atomic_t readers;
68893 + atomic_t writers;
68894 + atomic_t waiting_writers;
68895 unsigned int r_counter;
68896 unsigned int w_counter;
68897 struct fasync_struct *fasync_readers;
68898 diff --git a/include/linux/poison.h b/include/linux/poison.h
68899 index 34066ff..e95d744 100644
68900 --- a/include/linux/poison.h
68901 +++ b/include/linux/poison.h
68902 @@ -19,8 +19,8 @@
68903 * under normal circumstances, used to verify that nobody uses
68904 * non-initialized list entries.
68905 */
68906 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
68907 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
68908 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
68909 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
68910
68911 /********** include/linux/timer.h **********/
68912 /*
68913 diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
68914 index 4f71bf4..cd2f68e 100644
68915 --- a/include/linux/posix-timers.h
68916 +++ b/include/linux/posix-timers.h
68917 @@ -82,7 +82,8 @@ struct k_clock {
68918 #define TIMER_RETRY 1
68919 void (*timer_get) (struct k_itimer * timr,
68920 struct itimerspec * cur_setting);
68921 -};
68922 +} __do_const;
68923 +typedef struct k_clock __no_const k_clock_no_const;
68924
68925 void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock);
68926
68927 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
68928 index 72b1a10..13303a9 100644
68929 --- a/include/linux/preempt.h
68930 +++ b/include/linux/preempt.h
68931 @@ -110,7 +110,7 @@ struct preempt_ops {
68932 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
68933 void (*sched_out)(struct preempt_notifier *notifier,
68934 struct task_struct *next);
68935 -};
68936 +} __no_const;
68937
68938 /**
68939 * preempt_notifier - key for installing preemption notifiers
68940 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
68941 index 379eaed..1bf73e3 100644
68942 --- a/include/linux/proc_fs.h
68943 +++ b/include/linux/proc_fs.h
68944 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
68945 return proc_create_data(name, mode, parent, proc_fops, NULL);
68946 }
68947
68948 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
68949 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
68950 +{
68951 +#ifdef CONFIG_GRKERNSEC_PROC_USER
68952 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
68953 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68954 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
68955 +#else
68956 + return proc_create_data(name, mode, parent, proc_fops, NULL);
68957 +#endif
68958 +}
68959 +
68960 +
68961 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
68962 mode_t mode, struct proc_dir_entry *base,
68963 read_proc_t *read_proc, void * data)
68964 @@ -256,7 +269,7 @@ union proc_op {
68965 int (*proc_show)(struct seq_file *m,
68966 struct pid_namespace *ns, struct pid *pid,
68967 struct task_struct *task);
68968 -};
68969 +} __no_const;
68970
68971 struct ctl_table_header;
68972 struct ctl_table;
68973 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
68974 index 7456d7d..6c1cfc9 100644
68975 --- a/include/linux/ptrace.h
68976 +++ b/include/linux/ptrace.h
68977 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
68978 extern void exit_ptrace(struct task_struct *tracer);
68979 #define PTRACE_MODE_READ 1
68980 #define PTRACE_MODE_ATTACH 2
68981 -/* Returns 0 on success, -errno on denial. */
68982 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
68983 /* Returns true on success, false on denial. */
68984 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
68985 +/* Returns true on success, false on denial. */
68986 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
68987
68988 static inline int ptrace_reparented(struct task_struct *child)
68989 {
68990 diff --git a/include/linux/random.h b/include/linux/random.h
68991 index 2948046..3262567 100644
68992 --- a/include/linux/random.h
68993 +++ b/include/linux/random.h
68994 @@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
68995 u32 random32(void);
68996 void srandom32(u32 seed);
68997
68998 +static inline unsigned long pax_get_random_long(void)
68999 +{
69000 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
69001 +}
69002 +
69003 #endif /* __KERNEL___ */
69004
69005 #endif /* _LINUX_RANDOM_H */
69006 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
69007 index 988e55f..17cb4ef 100644
69008 --- a/include/linux/reboot.h
69009 +++ b/include/linux/reboot.h
69010 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
69011 * Architecture-specific implementations of sys_reboot commands.
69012 */
69013
69014 -extern void machine_restart(char *cmd);
69015 -extern void machine_halt(void);
69016 -extern void machine_power_off(void);
69017 +extern void machine_restart(char *cmd) __noreturn;
69018 +extern void machine_halt(void) __noreturn;
69019 +extern void machine_power_off(void) __noreturn;
69020
69021 extern void machine_shutdown(void);
69022 struct pt_regs;
69023 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
69024 */
69025
69026 extern void kernel_restart_prepare(char *cmd);
69027 -extern void kernel_restart(char *cmd);
69028 -extern void kernel_halt(void);
69029 -extern void kernel_power_off(void);
69030 +extern void kernel_restart(char *cmd) __noreturn;
69031 +extern void kernel_halt(void) __noreturn;
69032 +extern void kernel_power_off(void) __noreturn;
69033
69034 void ctrl_alt_del(void);
69035
69036 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
69037 * Emergency restart, callable from an interrupt handler.
69038 */
69039
69040 -extern void emergency_restart(void);
69041 +extern void emergency_restart(void) __noreturn;
69042 #include <asm/emergency-restart.h>
69043
69044 #endif
69045 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
69046 index dd31e7b..5b03c5c 100644
69047 --- a/include/linux/reiserfs_fs.h
69048 +++ b/include/linux/reiserfs_fs.h
69049 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
69050 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
69051
69052 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
69053 -#define get_generation(s) atomic_read (&fs_generation(s))
69054 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
69055 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
69056 #define __fs_changed(gen,s) (gen != get_generation (s))
69057 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
69058 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
69059 */
69060
69061 struct item_operations {
69062 - int (*bytes_number) (struct item_head * ih, int block_size);
69063 - void (*decrement_key) (struct cpu_key *);
69064 - int (*is_left_mergeable) (struct reiserfs_key * ih,
69065 + int (* const bytes_number) (struct item_head * ih, int block_size);
69066 + void (* const decrement_key) (struct cpu_key *);
69067 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
69068 unsigned long bsize);
69069 - void (*print_item) (struct item_head *, char *item);
69070 - void (*check_item) (struct item_head *, char *item);
69071 + void (* const print_item) (struct item_head *, char *item);
69072 + void (* const check_item) (struct item_head *, char *item);
69073
69074 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
69075 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
69076 int is_affected, int insert_size);
69077 - int (*check_left) (struct virtual_item * vi, int free,
69078 + int (* const check_left) (struct virtual_item * vi, int free,
69079 int start_skip, int end_skip);
69080 - int (*check_right) (struct virtual_item * vi, int free);
69081 - int (*part_size) (struct virtual_item * vi, int from, int to);
69082 - int (*unit_num) (struct virtual_item * vi);
69083 - void (*print_vi) (struct virtual_item * vi);
69084 + int (* const check_right) (struct virtual_item * vi, int free);
69085 + int (* const part_size) (struct virtual_item * vi, int from, int to);
69086 + int (* const unit_num) (struct virtual_item * vi);
69087 + void (* const print_vi) (struct virtual_item * vi);
69088 };
69089
69090 -extern struct item_operations *item_ops[TYPE_ANY + 1];
69091 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
69092
69093 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
69094 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
69095 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
69096 index dab68bb..0688727 100644
69097 --- a/include/linux/reiserfs_fs_sb.h
69098 +++ b/include/linux/reiserfs_fs_sb.h
69099 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
69100 /* Comment? -Hans */
69101 wait_queue_head_t s_wait;
69102 /* To be obsoleted soon by per buffer seals.. -Hans */
69103 - atomic_t s_generation_counter; // increased by one every time the
69104 + atomic_unchecked_t s_generation_counter; // increased by one every time the
69105 // tree gets re-balanced
69106 unsigned long s_properties; /* File system properties. Currently holds
69107 on-disk FS format */
69108 diff --git a/include/linux/relay.h b/include/linux/relay.h
69109 index 14a86bc..17d0700 100644
69110 --- a/include/linux/relay.h
69111 +++ b/include/linux/relay.h
69112 @@ -159,7 +159,7 @@ struct rchan_callbacks
69113 * The callback should return 0 if successful, negative if not.
69114 */
69115 int (*remove_buf_file)(struct dentry *dentry);
69116 -};
69117 +} __no_const;
69118
69119 /*
69120 * CONFIG_RELAY kernel API, kernel/relay.c
69121 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
69122 index 3392c59..a746428 100644
69123 --- a/include/linux/rfkill.h
69124 +++ b/include/linux/rfkill.h
69125 @@ -144,6 +144,7 @@ struct rfkill_ops {
69126 void (*query)(struct rfkill *rfkill, void *data);
69127 int (*set_block)(void *data, bool blocked);
69128 };
69129 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
69130
69131 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
69132 /**
69133 diff --git a/include/linux/sched.h b/include/linux/sched.h
69134 index 71849bf..2ef383dc3 100644
69135 --- a/include/linux/sched.h
69136 +++ b/include/linux/sched.h
69137 @@ -101,6 +101,7 @@ struct bio;
69138 struct fs_struct;
69139 struct bts_context;
69140 struct perf_event_context;
69141 +struct linux_binprm;
69142
69143 /*
69144 * List of flags we want to share for kernel threads,
69145 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
69146 extern signed long schedule_timeout_uninterruptible(signed long timeout);
69147 asmlinkage void __schedule(void);
69148 asmlinkage void schedule(void);
69149 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
69150 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
69151
69152 struct nsproxy;
69153 struct user_namespace;
69154 @@ -371,9 +372,12 @@ struct user_namespace;
69155 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
69156
69157 extern int sysctl_max_map_count;
69158 +extern unsigned long sysctl_heap_stack_gap;
69159
69160 #include <linux/aio.h>
69161
69162 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
69163 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
69164 extern unsigned long
69165 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
69166 unsigned long, unsigned long);
69167 @@ -666,6 +670,16 @@ struct signal_struct {
69168 struct tty_audit_buf *tty_audit_buf;
69169 #endif
69170
69171 +#ifdef CONFIG_GRKERNSEC
69172 + u32 curr_ip;
69173 + u32 saved_ip;
69174 + u32 gr_saddr;
69175 + u32 gr_daddr;
69176 + u16 gr_sport;
69177 + u16 gr_dport;
69178 + u8 used_accept:1;
69179 +#endif
69180 +
69181 int oom_adj; /* OOM kill score adjustment (bit shift) */
69182 };
69183
69184 @@ -723,6 +737,11 @@ struct user_struct {
69185 struct key *session_keyring; /* UID's default session keyring */
69186 #endif
69187
69188 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
69189 + unsigned int banned;
69190 + unsigned long ban_expires;
69191 +#endif
69192 +
69193 /* Hash table maintenance information */
69194 struct hlist_node uidhash_node;
69195 uid_t uid;
69196 @@ -1328,8 +1347,8 @@ struct task_struct {
69197 struct list_head thread_group;
69198
69199 struct completion *vfork_done; /* for vfork() */
69200 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
69201 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
69202 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
69203 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
69204
69205 cputime_t utime, stime, utimescaled, stimescaled;
69206 cputime_t gtime;
69207 @@ -1343,16 +1362,6 @@ struct task_struct {
69208 struct task_cputime cputime_expires;
69209 struct list_head cpu_timers[3];
69210
69211 -/* process credentials */
69212 - const struct cred *real_cred; /* objective and real subjective task
69213 - * credentials (COW) */
69214 - const struct cred *cred; /* effective (overridable) subjective task
69215 - * credentials (COW) */
69216 - struct mutex cred_guard_mutex; /* guard against foreign influences on
69217 - * credential calculations
69218 - * (notably. ptrace) */
69219 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
69220 -
69221 char comm[TASK_COMM_LEN]; /* executable name excluding path
69222 - access with [gs]et_task_comm (which lock
69223 it with task_lock())
69224 @@ -1369,6 +1378,10 @@ struct task_struct {
69225 #endif
69226 /* CPU-specific state of this task */
69227 struct thread_struct thread;
69228 +/* thread_info moved to task_struct */
69229 +#ifdef CONFIG_X86
69230 + struct thread_info tinfo;
69231 +#endif
69232 /* filesystem information */
69233 struct fs_struct *fs;
69234 /* open file information */
69235 @@ -1436,6 +1449,15 @@ struct task_struct {
69236 int hardirq_context;
69237 int softirq_context;
69238 #endif
69239 +
69240 +/* process credentials */
69241 + const struct cred *real_cred; /* objective and real subjective task
69242 + * credentials (COW) */
69243 + struct mutex cred_guard_mutex; /* guard against foreign influences on
69244 + * credential calculations
69245 + * (notably. ptrace) */
69246 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
69247 +
69248 #ifdef CONFIG_LOCKDEP
69249 # define MAX_LOCK_DEPTH 48UL
69250 u64 curr_chain_key;
69251 @@ -1456,6 +1478,9 @@ struct task_struct {
69252
69253 struct backing_dev_info *backing_dev_info;
69254
69255 + const struct cred *cred; /* effective (overridable) subjective task
69256 + * credentials (COW) */
69257 +
69258 struct io_context *io_context;
69259
69260 unsigned long ptrace_message;
69261 @@ -1519,6 +1544,27 @@ struct task_struct {
69262 unsigned long default_timer_slack_ns;
69263
69264 struct list_head *scm_work_list;
69265 +
69266 +#ifdef CONFIG_GRKERNSEC
69267 + /* grsecurity */
69268 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69269 + u64 exec_id;
69270 +#endif
69271 +#ifdef CONFIG_GRKERNSEC_SETXID
69272 + const struct cred *delayed_cred;
69273 +#endif
69274 + struct dentry *gr_chroot_dentry;
69275 + struct acl_subject_label *acl;
69276 + struct acl_role_label *role;
69277 + struct file *exec_file;
69278 + u16 acl_role_id;
69279 + /* is this the task that authenticated to the special role */
69280 + u8 acl_sp_role;
69281 + u8 is_writable;
69282 + u8 brute;
69283 + u8 gr_is_chrooted;
69284 +#endif
69285 +
69286 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
69287 /* Index of current stored adress in ret_stack */
69288 int curr_ret_stack;
69289 @@ -1542,6 +1588,57 @@ struct task_struct {
69290 #endif /* CONFIG_TRACING */
69291 };
69292
69293 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
69294 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
69295 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
69296 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
69297 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
69298 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
69299 +
69300 +#ifdef CONFIG_PAX_SOFTMODE
69301 +extern int pax_softmode;
69302 +#endif
69303 +
69304 +extern int pax_check_flags(unsigned long *);
69305 +
69306 +/* if tsk != current then task_lock must be held on it */
69307 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
69308 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
69309 +{
69310 + if (likely(tsk->mm))
69311 + return tsk->mm->pax_flags;
69312 + else
69313 + return 0UL;
69314 +}
69315 +
69316 +/* if tsk != current then task_lock must be held on it */
69317 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
69318 +{
69319 + if (likely(tsk->mm)) {
69320 + tsk->mm->pax_flags = flags;
69321 + return 0;
69322 + }
69323 + return -EINVAL;
69324 +}
69325 +#endif
69326 +
69327 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
69328 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
69329 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
69330 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
69331 +#endif
69332 +
69333 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
69334 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
69335 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
69336 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
69337 +
69338 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
69339 +extern void pax_track_stack(void);
69340 +#else
69341 +static inline void pax_track_stack(void) {}
69342 +#endif
69343 +
69344 /* Future-safe accessor for struct task_struct's cpus_allowed. */
69345 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
69346
69347 @@ -1740,7 +1837,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
69348 #define PF_DUMPCORE 0x00000200 /* dumped core */
69349 #define PF_SIGNALED 0x00000400 /* killed by a signal */
69350 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
69351 -#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
69352 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
69353 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
69354 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
69355 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
69356 @@ -1978,7 +2075,9 @@ void yield(void);
69357 extern struct exec_domain default_exec_domain;
69358
69359 union thread_union {
69360 +#ifndef CONFIG_X86
69361 struct thread_info thread_info;
69362 +#endif
69363 unsigned long stack[THREAD_SIZE/sizeof(long)];
69364 };
69365
69366 @@ -2011,6 +2110,7 @@ extern struct pid_namespace init_pid_ns;
69367 */
69368
69369 extern struct task_struct *find_task_by_vpid(pid_t nr);
69370 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
69371 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
69372 struct pid_namespace *ns);
69373
69374 @@ -2155,7 +2255,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
69375 extern void exit_itimers(struct signal_struct *);
69376 extern void flush_itimer_signals(void);
69377
69378 -extern NORET_TYPE void do_group_exit(int);
69379 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
69380
69381 extern void daemonize(const char *, ...);
69382 extern int allow_signal(int);
69383 @@ -2284,13 +2384,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
69384
69385 #endif
69386
69387 -static inline int object_is_on_stack(void *obj)
69388 +static inline int object_starts_on_stack(void *obj)
69389 {
69390 - void *stack = task_stack_page(current);
69391 + const void *stack = task_stack_page(current);
69392
69393 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
69394 }
69395
69396 +#ifdef CONFIG_PAX_USERCOPY
69397 +extern int object_is_on_stack(const void *obj, unsigned long len);
69398 +#endif
69399 +
69400 extern void thread_info_cache_init(void);
69401
69402 #ifdef CONFIG_DEBUG_STACK_USAGE
69403 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
69404 index 1ee2c05..81b7ec4 100644
69405 --- a/include/linux/screen_info.h
69406 +++ b/include/linux/screen_info.h
69407 @@ -42,7 +42,8 @@ struct screen_info {
69408 __u16 pages; /* 0x32 */
69409 __u16 vesa_attributes; /* 0x34 */
69410 __u32 capabilities; /* 0x36 */
69411 - __u8 _reserved[6]; /* 0x3a */
69412 + __u16 vesapm_size; /* 0x3a */
69413 + __u8 _reserved[4]; /* 0x3c */
69414 } __attribute__((packed));
69415
69416 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
69417 diff --git a/include/linux/security.h b/include/linux/security.h
69418 index d40d23f..d739b08 100644
69419 --- a/include/linux/security.h
69420 +++ b/include/linux/security.h
69421 @@ -34,6 +34,7 @@
69422 #include <linux/key.h>
69423 #include <linux/xfrm.h>
69424 #include <linux/gfp.h>
69425 +#include <linux/grsecurity.h>
69426 #include <net/flow.h>
69427
69428 /* Maximum number of letters for an LSM name string */
69429 @@ -76,7 +77,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
69430 extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
69431 extern int cap_task_setioprio(struct task_struct *p, int ioprio);
69432 extern int cap_task_setnice(struct task_struct *p, int nice);
69433 -extern int cap_syslog(int type);
69434 +extern int cap_syslog(int type, bool from_file);
69435 extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
69436
69437 struct msghdr;
69438 @@ -1331,6 +1332,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
69439 * logging to the console.
69440 * See the syslog(2) manual page for an explanation of the @type values.
69441 * @type contains the type of action.
69442 + * @from_file indicates the context of action (if it came from /proc).
69443 * Return 0 if permission is granted.
69444 * @settime:
69445 * Check permission to change the system time.
69446 @@ -1445,7 +1447,7 @@ struct security_operations {
69447 int (*sysctl) (struct ctl_table *table, int op);
69448 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
69449 int (*quota_on) (struct dentry *dentry);
69450 - int (*syslog) (int type);
69451 + int (*syslog) (int type, bool from_file);
69452 int (*settime) (struct timespec *ts, struct timezone *tz);
69453 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
69454
69455 @@ -1740,7 +1742,7 @@ int security_acct(struct file *file);
69456 int security_sysctl(struct ctl_table *table, int op);
69457 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
69458 int security_quota_on(struct dentry *dentry);
69459 -int security_syslog(int type);
69460 +int security_syslog(int type, bool from_file);
69461 int security_settime(struct timespec *ts, struct timezone *tz);
69462 int security_vm_enough_memory(long pages);
69463 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
69464 @@ -1986,9 +1988,9 @@ static inline int security_quota_on(struct dentry *dentry)
69465 return 0;
69466 }
69467
69468 -static inline int security_syslog(int type)
69469 +static inline int security_syslog(int type, bool from_file)
69470 {
69471 - return cap_syslog(type);
69472 + return cap_syslog(type, from_file);
69473 }
69474
69475 static inline int security_settime(struct timespec *ts, struct timezone *tz)
69476 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
69477 index 8366d8f..cc5f9d6 100644
69478 --- a/include/linux/seq_file.h
69479 +++ b/include/linux/seq_file.h
69480 @@ -23,6 +23,9 @@ struct seq_file {
69481 u64 version;
69482 struct mutex lock;
69483 const struct seq_operations *op;
69484 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69485 + u64 exec_id;
69486 +#endif
69487 void *private;
69488 };
69489
69490 @@ -32,6 +35,7 @@ struct seq_operations {
69491 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
69492 int (*show) (struct seq_file *m, void *v);
69493 };
69494 +typedef struct seq_operations __no_const seq_operations_no_const;
69495
69496 #define SEQ_SKIP 1
69497
69498 diff --git a/include/linux/shm.h b/include/linux/shm.h
69499 index eca6235..c7417ed 100644
69500 --- a/include/linux/shm.h
69501 +++ b/include/linux/shm.h
69502 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
69503 pid_t shm_cprid;
69504 pid_t shm_lprid;
69505 struct user_struct *mlock_user;
69506 +#ifdef CONFIG_GRKERNSEC
69507 + time_t shm_createtime;
69508 + pid_t shm_lapid;
69509 +#endif
69510 };
69511
69512 /* shm_mode upper byte flags */
69513 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
69514 index bcdd660..6e12e11 100644
69515 --- a/include/linux/skbuff.h
69516 +++ b/include/linux/skbuff.h
69517 @@ -14,6 +14,7 @@
69518 #ifndef _LINUX_SKBUFF_H
69519 #define _LINUX_SKBUFF_H
69520
69521 +#include <linux/const.h>
69522 #include <linux/kernel.h>
69523 #include <linux/kmemcheck.h>
69524 #include <linux/compiler.h>
69525 @@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
69526 */
69527 static inline int skb_queue_empty(const struct sk_buff_head *list)
69528 {
69529 - return list->next == (struct sk_buff *)list;
69530 + return list->next == (const struct sk_buff *)list;
69531 }
69532
69533 /**
69534 @@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
69535 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69536 const struct sk_buff *skb)
69537 {
69538 - return (skb->next == (struct sk_buff *) list);
69539 + return (skb->next == (const struct sk_buff *) list);
69540 }
69541
69542 /**
69543 @@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69544 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
69545 const struct sk_buff *skb)
69546 {
69547 - return (skb->prev == (struct sk_buff *) list);
69548 + return (skb->prev == (const struct sk_buff *) list);
69549 }
69550
69551 /**
69552 @@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
69553 * headroom, you should not reduce this.
69554 */
69555 #ifndef NET_SKB_PAD
69556 -#define NET_SKB_PAD 32
69557 +#define NET_SKB_PAD (_AC(32,UL))
69558 #endif
69559
69560 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
69561 diff --git a/include/linux/slab.h b/include/linux/slab.h
69562 index 2da8372..a3be824 100644
69563 --- a/include/linux/slab.h
69564 +++ b/include/linux/slab.h
69565 @@ -11,12 +11,20 @@
69566
69567 #include <linux/gfp.h>
69568 #include <linux/types.h>
69569 +#include <linux/err.h>
69570
69571 /*
69572 * Flags to pass to kmem_cache_create().
69573 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
69574 */
69575 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
69576 +
69577 +#ifdef CONFIG_PAX_USERCOPY
69578 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
69579 +#else
69580 +#define SLAB_USERCOPY 0x00000000UL
69581 +#endif
69582 +
69583 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
69584 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
69585 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
69586 @@ -82,10 +90,13 @@
69587 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
69588 * Both make kfree a no-op.
69589 */
69590 -#define ZERO_SIZE_PTR ((void *)16)
69591 +#define ZERO_SIZE_PTR \
69592 +({ \
69593 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
69594 + (void *)(-MAX_ERRNO-1L); \
69595 +})
69596
69597 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
69598 - (unsigned long)ZERO_SIZE_PTR)
69599 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
69600
69601 /*
69602 * struct kmem_cache related prototypes
69603 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
69604 void kfree(const void *);
69605 void kzfree(const void *);
69606 size_t ksize(const void *);
69607 +void check_object_size(const void *ptr, unsigned long n, bool to);
69608
69609 /*
69610 * Allocator specific definitions. These are mainly used to establish optimized
69611 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
69612
69613 void __init kmem_cache_init_late(void);
69614
69615 +#define kmalloc(x, y) \
69616 +({ \
69617 + void *___retval; \
69618 + intoverflow_t ___x = (intoverflow_t)x; \
69619 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
69620 + ___retval = NULL; \
69621 + else \
69622 + ___retval = kmalloc((size_t)___x, (y)); \
69623 + ___retval; \
69624 +})
69625 +
69626 +#define kmalloc_node(x, y, z) \
69627 +({ \
69628 + void *___retval; \
69629 + intoverflow_t ___x = (intoverflow_t)x; \
69630 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
69631 + ___retval = NULL; \
69632 + else \
69633 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
69634 + ___retval; \
69635 +})
69636 +
69637 +#define kzalloc(x, y) \
69638 +({ \
69639 + void *___retval; \
69640 + intoverflow_t ___x = (intoverflow_t)x; \
69641 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
69642 + ___retval = NULL; \
69643 + else \
69644 + ___retval = kzalloc((size_t)___x, (y)); \
69645 + ___retval; \
69646 +})
69647 +
69648 #endif /* _LINUX_SLAB_H */
69649 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
69650 index 850d057..d9dfe3c 100644
69651 --- a/include/linux/slab_def.h
69652 +++ b/include/linux/slab_def.h
69653 @@ -69,10 +69,10 @@ struct kmem_cache {
69654 unsigned long node_allocs;
69655 unsigned long node_frees;
69656 unsigned long node_overflow;
69657 - atomic_t allochit;
69658 - atomic_t allocmiss;
69659 - atomic_t freehit;
69660 - atomic_t freemiss;
69661 + atomic_unchecked_t allochit;
69662 + atomic_unchecked_t allocmiss;
69663 + atomic_unchecked_t freehit;
69664 + atomic_unchecked_t freemiss;
69665
69666 /*
69667 * If debugging is enabled, then the allocator can add additional
69668 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
69669 index 5ad70a6..57f9f65 100644
69670 --- a/include/linux/slub_def.h
69671 +++ b/include/linux/slub_def.h
69672 @@ -86,7 +86,7 @@ struct kmem_cache {
69673 struct kmem_cache_order_objects max;
69674 struct kmem_cache_order_objects min;
69675 gfp_t allocflags; /* gfp flags to use on each alloc */
69676 - int refcount; /* Refcount for slab cache destroy */
69677 + atomic_t refcount; /* Refcount for slab cache destroy */
69678 void (*ctor)(void *);
69679 int inuse; /* Offset to metadata */
69680 int align; /* Alignment */
69681 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
69682 #endif
69683
69684 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
69685 -void *__kmalloc(size_t size, gfp_t flags);
69686 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
69687
69688 #ifdef CONFIG_KMEMTRACE
69689 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
69690 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
69691 index 67ad11f..0bbd8af 100644
69692 --- a/include/linux/sonet.h
69693 +++ b/include/linux/sonet.h
69694 @@ -61,7 +61,7 @@ struct sonet_stats {
69695 #include <asm/atomic.h>
69696
69697 struct k_sonet_stats {
69698 -#define __HANDLE_ITEM(i) atomic_t i
69699 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
69700 __SONET_ITEMS
69701 #undef __HANDLE_ITEM
69702 };
69703 diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
69704 index 6f52b4d..5500323 100644
69705 --- a/include/linux/sunrpc/cache.h
69706 +++ b/include/linux/sunrpc/cache.h
69707 @@ -125,7 +125,7 @@ struct cache_detail {
69708 */
69709 struct cache_req {
69710 struct cache_deferred_req *(*defer)(struct cache_req *req);
69711 -};
69712 +} __no_const;
69713 /* this must be embedded in a deferred_request that is being
69714 * delayed awaiting cache-fill
69715 */
69716 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
69717 index 8ed9642..101ceab 100644
69718 --- a/include/linux/sunrpc/clnt.h
69719 +++ b/include/linux/sunrpc/clnt.h
69720 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
69721 {
69722 switch (sap->sa_family) {
69723 case AF_INET:
69724 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
69725 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
69726 case AF_INET6:
69727 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
69728 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
69729 }
69730 return 0;
69731 }
69732 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
69733 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
69734 const struct sockaddr *src)
69735 {
69736 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
69737 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
69738 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
69739
69740 dsin->sin_family = ssin->sin_family;
69741 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
69742 if (sa->sa_family != AF_INET6)
69743 return 0;
69744
69745 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
69746 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
69747 }
69748
69749 #endif /* __KERNEL__ */
69750 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
69751 index c14fe86..393245e 100644
69752 --- a/include/linux/sunrpc/svc_rdma.h
69753 +++ b/include/linux/sunrpc/svc_rdma.h
69754 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
69755 extern unsigned int svcrdma_max_requests;
69756 extern unsigned int svcrdma_max_req_size;
69757
69758 -extern atomic_t rdma_stat_recv;
69759 -extern atomic_t rdma_stat_read;
69760 -extern atomic_t rdma_stat_write;
69761 -extern atomic_t rdma_stat_sq_starve;
69762 -extern atomic_t rdma_stat_rq_starve;
69763 -extern atomic_t rdma_stat_rq_poll;
69764 -extern atomic_t rdma_stat_rq_prod;
69765 -extern atomic_t rdma_stat_sq_poll;
69766 -extern atomic_t rdma_stat_sq_prod;
69767 +extern atomic_unchecked_t rdma_stat_recv;
69768 +extern atomic_unchecked_t rdma_stat_read;
69769 +extern atomic_unchecked_t rdma_stat_write;
69770 +extern atomic_unchecked_t rdma_stat_sq_starve;
69771 +extern atomic_unchecked_t rdma_stat_rq_starve;
69772 +extern atomic_unchecked_t rdma_stat_rq_poll;
69773 +extern atomic_unchecked_t rdma_stat_rq_prod;
69774 +extern atomic_unchecked_t rdma_stat_sq_poll;
69775 +extern atomic_unchecked_t rdma_stat_sq_prod;
69776
69777 #define RPCRDMA_VERSION 1
69778
69779 diff --git a/include/linux/suspend.h b/include/linux/suspend.h
69780 index 5e781d8..1e62818 100644
69781 --- a/include/linux/suspend.h
69782 +++ b/include/linux/suspend.h
69783 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
69784 * which require special recovery actions in that situation.
69785 */
69786 struct platform_suspend_ops {
69787 - int (*valid)(suspend_state_t state);
69788 - int (*begin)(suspend_state_t state);
69789 - int (*prepare)(void);
69790 - int (*prepare_late)(void);
69791 - int (*enter)(suspend_state_t state);
69792 - void (*wake)(void);
69793 - void (*finish)(void);
69794 - void (*end)(void);
69795 - void (*recover)(void);
69796 + int (* const valid)(suspend_state_t state);
69797 + int (* const begin)(suspend_state_t state);
69798 + int (* const prepare)(void);
69799 + int (* const prepare_late)(void);
69800 + int (* const enter)(suspend_state_t state);
69801 + void (* const wake)(void);
69802 + void (* const finish)(void);
69803 + void (* const end)(void);
69804 + void (* const recover)(void);
69805 };
69806
69807 #ifdef CONFIG_SUSPEND
69808 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
69809 * suspend_set_ops - set platform dependent suspend operations
69810 * @ops: The new suspend operations to set.
69811 */
69812 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
69813 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
69814 extern int suspend_valid_only_mem(suspend_state_t state);
69815
69816 /**
69817 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
69818 #else /* !CONFIG_SUSPEND */
69819 #define suspend_valid_only_mem NULL
69820
69821 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
69822 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
69823 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
69824 #endif /* !CONFIG_SUSPEND */
69825
69826 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
69827 * platforms which require special recovery actions in that situation.
69828 */
69829 struct platform_hibernation_ops {
69830 - int (*begin)(void);
69831 - void (*end)(void);
69832 - int (*pre_snapshot)(void);
69833 - void (*finish)(void);
69834 - int (*prepare)(void);
69835 - int (*enter)(void);
69836 - void (*leave)(void);
69837 - int (*pre_restore)(void);
69838 - void (*restore_cleanup)(void);
69839 - void (*recover)(void);
69840 + int (* const begin)(void);
69841 + void (* const end)(void);
69842 + int (* const pre_snapshot)(void);
69843 + void (* const finish)(void);
69844 + int (* const prepare)(void);
69845 + int (* const enter)(void);
69846 + void (* const leave)(void);
69847 + int (* const pre_restore)(void);
69848 + void (* const restore_cleanup)(void);
69849 + void (* const recover)(void);
69850 };
69851
69852 #ifdef CONFIG_HIBERNATION
69853 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
69854 extern void swsusp_unset_page_free(struct page *);
69855 extern unsigned long get_safe_page(gfp_t gfp_mask);
69856
69857 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
69858 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
69859 extern int hibernate(void);
69860 extern bool system_entering_hibernation(void);
69861 #else /* CONFIG_HIBERNATION */
69862 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
69863 static inline void swsusp_set_page_free(struct page *p) {}
69864 static inline void swsusp_unset_page_free(struct page *p) {}
69865
69866 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
69867 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
69868 static inline int hibernate(void) { return -ENOSYS; }
69869 static inline bool system_entering_hibernation(void) { return false; }
69870 #endif /* CONFIG_HIBERNATION */
69871 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
69872 index 0eb6942..a805cb6 100644
69873 --- a/include/linux/sysctl.h
69874 +++ b/include/linux/sysctl.h
69875 @@ -164,7 +164,11 @@ enum
69876 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
69877 };
69878
69879 -
69880 +#ifdef CONFIG_PAX_SOFTMODE
69881 +enum {
69882 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
69883 +};
69884 +#endif
69885
69886 /* CTL_VM names: */
69887 enum
69888 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
69889
69890 extern int proc_dostring(struct ctl_table *, int,
69891 void __user *, size_t *, loff_t *);
69892 +extern int proc_dostring_modpriv(struct ctl_table *, int,
69893 + void __user *, size_t *, loff_t *);
69894 extern int proc_dointvec(struct ctl_table *, int,
69895 void __user *, size_t *, loff_t *);
69896 extern int proc_dointvec_minmax(struct ctl_table *, int,
69897 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
69898
69899 extern ctl_handler sysctl_data;
69900 extern ctl_handler sysctl_string;
69901 +extern ctl_handler sysctl_string_modpriv;
69902 extern ctl_handler sysctl_intvec;
69903 extern ctl_handler sysctl_jiffies;
69904 extern ctl_handler sysctl_ms_jiffies;
69905 diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
69906 index 9d68fed..71f02cc 100644
69907 --- a/include/linux/sysfs.h
69908 +++ b/include/linux/sysfs.h
69909 @@ -75,8 +75,8 @@ struct bin_attribute {
69910 };
69911
69912 struct sysfs_ops {
69913 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
69914 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
69915 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
69916 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
69917 };
69918
69919 struct sysfs_dirent;
69920 diff --git a/include/linux/syslog.h b/include/linux/syslog.h
69921 new file mode 100644
69922 index 0000000..3891139
69923 --- /dev/null
69924 +++ b/include/linux/syslog.h
69925 @@ -0,0 +1,52 @@
69926 +/* Syslog internals
69927 + *
69928 + * Copyright 2010 Canonical, Ltd.
69929 + * Author: Kees Cook <kees.cook@canonical.com>
69930 + *
69931 + * This program is free software; you can redistribute it and/or modify
69932 + * it under the terms of the GNU General Public License as published by
69933 + * the Free Software Foundation; either version 2, or (at your option)
69934 + * any later version.
69935 + *
69936 + * This program is distributed in the hope that it will be useful,
69937 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
69938 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69939 + * GNU General Public License for more details.
69940 + *
69941 + * You should have received a copy of the GNU General Public License
69942 + * along with this program; see the file COPYING. If not, write to
69943 + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
69944 + */
69945 +
69946 +#ifndef _LINUX_SYSLOG_H
69947 +#define _LINUX_SYSLOG_H
69948 +
69949 +/* Close the log. Currently a NOP. */
69950 +#define SYSLOG_ACTION_CLOSE 0
69951 +/* Open the log. Currently a NOP. */
69952 +#define SYSLOG_ACTION_OPEN 1
69953 +/* Read from the log. */
69954 +#define SYSLOG_ACTION_READ 2
69955 +/* Read all messages remaining in the ring buffer. */
69956 +#define SYSLOG_ACTION_READ_ALL 3
69957 +/* Read and clear all messages remaining in the ring buffer */
69958 +#define SYSLOG_ACTION_READ_CLEAR 4
69959 +/* Clear ring buffer. */
69960 +#define SYSLOG_ACTION_CLEAR 5
69961 +/* Disable printk's to console */
69962 +#define SYSLOG_ACTION_CONSOLE_OFF 6
69963 +/* Enable printk's to console */
69964 +#define SYSLOG_ACTION_CONSOLE_ON 7
69965 +/* Set level of messages printed to console */
69966 +#define SYSLOG_ACTION_CONSOLE_LEVEL 8
69967 +/* Return number of unread characters in the log buffer */
69968 +#define SYSLOG_ACTION_SIZE_UNREAD 9
69969 +/* Return size of the log buffer */
69970 +#define SYSLOG_ACTION_SIZE_BUFFER 10
69971 +
69972 +#define SYSLOG_FROM_CALL 0
69973 +#define SYSLOG_FROM_FILE 1
69974 +
69975 +int do_syslog(int type, char __user *buf, int count, bool from_file);
69976 +
69977 +#endif /* _LINUX_SYSLOG_H */
69978 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
69979 index a8cc4e1..98d3b85 100644
69980 --- a/include/linux/thread_info.h
69981 +++ b/include/linux/thread_info.h
69982 @@ -23,7 +23,7 @@ struct restart_block {
69983 };
69984 /* For futex_wait and futex_wait_requeue_pi */
69985 struct {
69986 - u32 *uaddr;
69987 + u32 __user *uaddr;
69988 u32 val;
69989 u32 flags;
69990 u32 bitset;
69991 diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
69992 index 1eb44a9..f582df3 100644
69993 --- a/include/linux/tracehook.h
69994 +++ b/include/linux/tracehook.h
69995 @@ -69,12 +69,12 @@ static inline int tracehook_expect_breakpoints(struct task_struct *task)
69996 /*
69997 * ptrace report for syscall entry and exit looks identical.
69998 */
69999 -static inline void ptrace_report_syscall(struct pt_regs *regs)
70000 +static inline int ptrace_report_syscall(struct pt_regs *regs)
70001 {
70002 int ptrace = task_ptrace(current);
70003
70004 if (!(ptrace & PT_PTRACED))
70005 - return;
70006 + return 0;
70007
70008 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
70009
70010 @@ -87,6 +87,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
70011 send_sig(current->exit_code, current, 1);
70012 current->exit_code = 0;
70013 }
70014 +
70015 + return fatal_signal_pending(current);
70016 }
70017
70018 /**
70019 @@ -111,8 +113,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
70020 static inline __must_check int tracehook_report_syscall_entry(
70021 struct pt_regs *regs)
70022 {
70023 - ptrace_report_syscall(regs);
70024 - return 0;
70025 + return ptrace_report_syscall(regs);
70026 }
70027
70028 /**
70029 diff --git a/include/linux/tty.h b/include/linux/tty.h
70030 index e9c57e9..ee6d489 100644
70031 --- a/include/linux/tty.h
70032 +++ b/include/linux/tty.h
70033 @@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
70034 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
70035 extern void tty_ldisc_enable(struct tty_struct *tty);
70036
70037 -
70038 /* n_tty.c */
70039 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
70040
70041 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
70042 index 0c4ee9b..9f7c426 100644
70043 --- a/include/linux/tty_ldisc.h
70044 +++ b/include/linux/tty_ldisc.h
70045 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
70046
70047 struct module *owner;
70048
70049 - int refcount;
70050 + atomic_t refcount;
70051 };
70052
70053 struct tty_ldisc {
70054 diff --git a/include/linux/types.h b/include/linux/types.h
70055 index c42724f..d190eee 100644
70056 --- a/include/linux/types.h
70057 +++ b/include/linux/types.h
70058 @@ -191,10 +191,26 @@ typedef struct {
70059 volatile int counter;
70060 } atomic_t;
70061
70062 +#ifdef CONFIG_PAX_REFCOUNT
70063 +typedef struct {
70064 + volatile int counter;
70065 +} atomic_unchecked_t;
70066 +#else
70067 +typedef atomic_t atomic_unchecked_t;
70068 +#endif
70069 +
70070 #ifdef CONFIG_64BIT
70071 typedef struct {
70072 volatile long counter;
70073 } atomic64_t;
70074 +
70075 +#ifdef CONFIG_PAX_REFCOUNT
70076 +typedef struct {
70077 + volatile long counter;
70078 +} atomic64_unchecked_t;
70079 +#else
70080 +typedef atomic64_t atomic64_unchecked_t;
70081 +#endif
70082 #endif
70083
70084 struct ustat {
70085 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
70086 index 6b58367..53a3e8e 100644
70087 --- a/include/linux/uaccess.h
70088 +++ b/include/linux/uaccess.h
70089 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
70090 long ret; \
70091 mm_segment_t old_fs = get_fs(); \
70092 \
70093 - set_fs(KERNEL_DS); \
70094 pagefault_disable(); \
70095 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
70096 - pagefault_enable(); \
70097 + set_fs(KERNEL_DS); \
70098 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
70099 set_fs(old_fs); \
70100 + pagefault_enable(); \
70101 ret; \
70102 })
70103
70104 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
70105 * Safely read from address @src to the buffer at @dst. If a kernel fault
70106 * happens, handle that and return -EFAULT.
70107 */
70108 -extern long probe_kernel_read(void *dst, void *src, size_t size);
70109 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
70110
70111 /*
70112 * probe_kernel_write(): safely attempt to write to a location
70113 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
70114 * Safely write to address @dst from the buffer at @src. If a kernel fault
70115 * happens, handle that and return -EFAULT.
70116 */
70117 -extern long probe_kernel_write(void *dst, void *src, size_t size);
70118 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
70119
70120 #endif /* __LINUX_UACCESS_H__ */
70121 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
70122 index 99c1b4d..bb94261 100644
70123 --- a/include/linux/unaligned/access_ok.h
70124 +++ b/include/linux/unaligned/access_ok.h
70125 @@ -6,32 +6,32 @@
70126
70127 static inline u16 get_unaligned_le16(const void *p)
70128 {
70129 - return le16_to_cpup((__le16 *)p);
70130 + return le16_to_cpup((const __le16 *)p);
70131 }
70132
70133 static inline u32 get_unaligned_le32(const void *p)
70134 {
70135 - return le32_to_cpup((__le32 *)p);
70136 + return le32_to_cpup((const __le32 *)p);
70137 }
70138
70139 static inline u64 get_unaligned_le64(const void *p)
70140 {
70141 - return le64_to_cpup((__le64 *)p);
70142 + return le64_to_cpup((const __le64 *)p);
70143 }
70144
70145 static inline u16 get_unaligned_be16(const void *p)
70146 {
70147 - return be16_to_cpup((__be16 *)p);
70148 + return be16_to_cpup((const __be16 *)p);
70149 }
70150
70151 static inline u32 get_unaligned_be32(const void *p)
70152 {
70153 - return be32_to_cpup((__be32 *)p);
70154 + return be32_to_cpup((const __be32 *)p);
70155 }
70156
70157 static inline u64 get_unaligned_be64(const void *p)
70158 {
70159 - return be64_to_cpup((__be64 *)p);
70160 + return be64_to_cpup((const __be64 *)p);
70161 }
70162
70163 static inline void put_unaligned_le16(u16 val, void *p)
70164 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
70165 index 79b9837..b5a56f9 100644
70166 --- a/include/linux/vermagic.h
70167 +++ b/include/linux/vermagic.h
70168 @@ -26,9 +26,35 @@
70169 #define MODULE_ARCH_VERMAGIC ""
70170 #endif
70171
70172 +#ifdef CONFIG_PAX_REFCOUNT
70173 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
70174 +#else
70175 +#define MODULE_PAX_REFCOUNT ""
70176 +#endif
70177 +
70178 +#ifdef CONSTIFY_PLUGIN
70179 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
70180 +#else
70181 +#define MODULE_CONSTIFY_PLUGIN ""
70182 +#endif
70183 +
70184 +#ifdef STACKLEAK_PLUGIN
70185 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
70186 +#else
70187 +#define MODULE_STACKLEAK_PLUGIN ""
70188 +#endif
70189 +
70190 +#ifdef CONFIG_GRKERNSEC
70191 +#define MODULE_GRSEC "GRSEC "
70192 +#else
70193 +#define MODULE_GRSEC ""
70194 +#endif
70195 +
70196 #define VERMAGIC_STRING \
70197 UTS_RELEASE " " \
70198 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
70199 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
70200 - MODULE_ARCH_VERMAGIC
70201 + MODULE_ARCH_VERMAGIC \
70202 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
70203 + MODULE_GRSEC
70204
70205 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
70206 index 819a634..462ac12 100644
70207 --- a/include/linux/vmalloc.h
70208 +++ b/include/linux/vmalloc.h
70209 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
70210 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
70211 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
70212 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
70213 +
70214 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70215 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
70216 +#endif
70217 +
70218 /* bits [20..32] reserved for arch specific ioremap internals */
70219
70220 /*
70221 @@ -124,4 +129,81 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
70222
70223 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
70224
70225 +#define vmalloc(x) \
70226 +({ \
70227 + void *___retval; \
70228 + intoverflow_t ___x = (intoverflow_t)x; \
70229 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
70230 + ___retval = NULL; \
70231 + else \
70232 + ___retval = vmalloc((unsigned long)___x); \
70233 + ___retval; \
70234 +})
70235 +
70236 +#define __vmalloc(x, y, z) \
70237 +({ \
70238 + void *___retval; \
70239 + intoverflow_t ___x = (intoverflow_t)x; \
70240 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
70241 + ___retval = NULL; \
70242 + else \
70243 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
70244 + ___retval; \
70245 +})
70246 +
70247 +#define vmalloc_user(x) \
70248 +({ \
70249 + void *___retval; \
70250 + intoverflow_t ___x = (intoverflow_t)x; \
70251 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
70252 + ___retval = NULL; \
70253 + else \
70254 + ___retval = vmalloc_user((unsigned long)___x); \
70255 + ___retval; \
70256 +})
70257 +
70258 +#define vmalloc_exec(x) \
70259 +({ \
70260 + void *___retval; \
70261 + intoverflow_t ___x = (intoverflow_t)x; \
70262 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
70263 + ___retval = NULL; \
70264 + else \
70265 + ___retval = vmalloc_exec((unsigned long)___x); \
70266 + ___retval; \
70267 +})
70268 +
70269 +#define vmalloc_node(x, y) \
70270 +({ \
70271 + void *___retval; \
70272 + intoverflow_t ___x = (intoverflow_t)x; \
70273 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
70274 + ___retval = NULL; \
70275 + else \
70276 + ___retval = vmalloc_node((unsigned long)___x, (y));\
70277 + ___retval; \
70278 +})
70279 +
70280 +#define vmalloc_32(x) \
70281 +({ \
70282 + void *___retval; \
70283 + intoverflow_t ___x = (intoverflow_t)x; \
70284 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
70285 + ___retval = NULL; \
70286 + else \
70287 + ___retval = vmalloc_32((unsigned long)___x); \
70288 + ___retval; \
70289 +})
70290 +
70291 +#define vmalloc_32_user(x) \
70292 +({ \
70293 + void *___retval; \
70294 + intoverflow_t ___x = (intoverflow_t)x; \
70295 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
70296 + ___retval = NULL; \
70297 + else \
70298 + ___retval = vmalloc_32_user((unsigned long)___x);\
70299 + ___retval; \
70300 +})
70301 +
70302 #endif /* _LINUX_VMALLOC_H */
70303 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
70304 index 13070d6..aa4159a 100644
70305 --- a/include/linux/vmstat.h
70306 +++ b/include/linux/vmstat.h
70307 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
70308 /*
70309 * Zone based page accounting with per cpu differentials.
70310 */
70311 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70312 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70313
70314 static inline void zone_page_state_add(long x, struct zone *zone,
70315 enum zone_stat_item item)
70316 {
70317 - atomic_long_add(x, &zone->vm_stat[item]);
70318 - atomic_long_add(x, &vm_stat[item]);
70319 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
70320 + atomic_long_add_unchecked(x, &vm_stat[item]);
70321 }
70322
70323 static inline unsigned long global_page_state(enum zone_stat_item item)
70324 {
70325 - long x = atomic_long_read(&vm_stat[item]);
70326 + long x = atomic_long_read_unchecked(&vm_stat[item]);
70327 #ifdef CONFIG_SMP
70328 if (x < 0)
70329 x = 0;
70330 @@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
70331 static inline unsigned long zone_page_state(struct zone *zone,
70332 enum zone_stat_item item)
70333 {
70334 - long x = atomic_long_read(&zone->vm_stat[item]);
70335 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70336 #ifdef CONFIG_SMP
70337 if (x < 0)
70338 x = 0;
70339 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
70340 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
70341 enum zone_stat_item item)
70342 {
70343 - long x = atomic_long_read(&zone->vm_stat[item]);
70344 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70345
70346 #ifdef CONFIG_SMP
70347 int cpu;
70348 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
70349
70350 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
70351 {
70352 - atomic_long_inc(&zone->vm_stat[item]);
70353 - atomic_long_inc(&vm_stat[item]);
70354 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
70355 + atomic_long_inc_unchecked(&vm_stat[item]);
70356 }
70357
70358 static inline void __inc_zone_page_state(struct page *page,
70359 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
70360
70361 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
70362 {
70363 - atomic_long_dec(&zone->vm_stat[item]);
70364 - atomic_long_dec(&vm_stat[item]);
70365 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
70366 + atomic_long_dec_unchecked(&vm_stat[item]);
70367 }
70368
70369 static inline void __dec_zone_page_state(struct page *page,
70370 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
70371 index 5c84af8..1a3b6e2 100644
70372 --- a/include/linux/xattr.h
70373 +++ b/include/linux/xattr.h
70374 @@ -33,6 +33,11 @@
70375 #define XATTR_USER_PREFIX "user."
70376 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
70377
70378 +/* User namespace */
70379 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
70380 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
70381 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
70382 +
70383 struct inode;
70384 struct dentry;
70385
70386 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
70387 index eed5fcc..5080d24 100644
70388 --- a/include/media/saa7146_vv.h
70389 +++ b/include/media/saa7146_vv.h
70390 @@ -167,7 +167,7 @@ struct saa7146_ext_vv
70391 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
70392
70393 /* the extension can override this */
70394 - struct v4l2_ioctl_ops ops;
70395 + v4l2_ioctl_ops_no_const ops;
70396 /* pointer to the saa7146 core ops */
70397 const struct v4l2_ioctl_ops *core_ops;
70398
70399 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
70400 index 73c9867..2da8837 100644
70401 --- a/include/media/v4l2-dev.h
70402 +++ b/include/media/v4l2-dev.h
70403 @@ -34,7 +34,7 @@ struct v4l2_device;
70404 #define V4L2_FL_UNREGISTERED (0)
70405
70406 struct v4l2_file_operations {
70407 - struct module *owner;
70408 + struct module * const owner;
70409 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
70410 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
70411 unsigned int (*poll) (struct file *, struct poll_table_struct *);
70412 @@ -46,6 +46,7 @@ struct v4l2_file_operations {
70413 int (*open) (struct file *);
70414 int (*release) (struct file *);
70415 };
70416 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
70417
70418 /*
70419 * Newer version of video_device, handled by videodev2.c
70420 diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
70421 index 5d5d550..f559ef1 100644
70422 --- a/include/media/v4l2-device.h
70423 +++ b/include/media/v4l2-device.h
70424 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
70425 this function returns 0. If the name ends with a digit (e.g. cx18),
70426 then the name will be set to cx18-0 since cx180 looks really odd. */
70427 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
70428 - atomic_t *instance);
70429 + atomic_unchecked_t *instance);
70430
70431 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
70432 Since the parent disappears this ensures that v4l2_dev doesn't have an
70433 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
70434 index 7a4529d..7244290 100644
70435 --- a/include/media/v4l2-ioctl.h
70436 +++ b/include/media/v4l2-ioctl.h
70437 @@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
70438 long (*vidioc_default) (struct file *file, void *fh,
70439 int cmd, void *arg);
70440 };
70441 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
70442
70443
70444 /* v4l debugging and diagnostics */
70445 diff --git a/include/net/flow.h b/include/net/flow.h
70446 index 809970b..c3df4f3 100644
70447 --- a/include/net/flow.h
70448 +++ b/include/net/flow.h
70449 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
70450 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
70451 u8 dir, flow_resolve_t resolver);
70452 extern void flow_cache_flush(void);
70453 -extern atomic_t flow_cache_genid;
70454 +extern atomic_unchecked_t flow_cache_genid;
70455
70456 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
70457 {
70458 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
70459 index 15e1f8fe..668837c 100644
70460 --- a/include/net/inetpeer.h
70461 +++ b/include/net/inetpeer.h
70462 @@ -24,7 +24,7 @@ struct inet_peer
70463 __u32 dtime; /* the time of last use of not
70464 * referenced entries */
70465 atomic_t refcnt;
70466 - atomic_t rid; /* Frag reception counter */
70467 + atomic_unchecked_t rid; /* Frag reception counter */
70468 __u32 tcp_ts;
70469 unsigned long tcp_ts_stamp;
70470 };
70471 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
70472 index 98978e7..2243a3d 100644
70473 --- a/include/net/ip_vs.h
70474 +++ b/include/net/ip_vs.h
70475 @@ -365,7 +365,7 @@ struct ip_vs_conn {
70476 struct ip_vs_conn *control; /* Master control connection */
70477 atomic_t n_control; /* Number of controlled ones */
70478 struct ip_vs_dest *dest; /* real server */
70479 - atomic_t in_pkts; /* incoming packet counter */
70480 + atomic_unchecked_t in_pkts; /* incoming packet counter */
70481
70482 /* packet transmitter for different forwarding methods. If it
70483 mangles the packet, it must return NF_DROP or better NF_STOLEN,
70484 @@ -466,7 +466,7 @@ struct ip_vs_dest {
70485 union nf_inet_addr addr; /* IP address of the server */
70486 __be16 port; /* port number of the server */
70487 volatile unsigned flags; /* dest status flags */
70488 - atomic_t conn_flags; /* flags to copy to conn */
70489 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
70490 atomic_t weight; /* server weight */
70491
70492 atomic_t refcnt; /* reference counter */
70493 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
70494 index 69b610a..fe3962c 100644
70495 --- a/include/net/irda/ircomm_core.h
70496 +++ b/include/net/irda/ircomm_core.h
70497 @@ -51,7 +51,7 @@ typedef struct {
70498 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
70499 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
70500 struct ircomm_info *);
70501 -} call_t;
70502 +} __no_const call_t;
70503
70504 struct ircomm_cb {
70505 irda_queue_t queue;
70506 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
70507 index eea2e61..08c692d 100644
70508 --- a/include/net/irda/ircomm_tty.h
70509 +++ b/include/net/irda/ircomm_tty.h
70510 @@ -35,6 +35,7 @@
70511 #include <linux/termios.h>
70512 #include <linux/timer.h>
70513 #include <linux/tty.h> /* struct tty_struct */
70514 +#include <asm/local.h>
70515
70516 #include <net/irda/irias_object.h>
70517 #include <net/irda/ircomm_core.h>
70518 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
70519 unsigned short close_delay;
70520 unsigned short closing_wait; /* time to wait before closing */
70521
70522 - int open_count;
70523 - int blocked_open; /* # of blocked opens */
70524 + local_t open_count;
70525 + local_t blocked_open; /* # of blocked opens */
70526
70527 /* Protect concurent access to :
70528 * o self->open_count
70529 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
70530 index f82a1e8..82d81e8 100644
70531 --- a/include/net/iucv/af_iucv.h
70532 +++ b/include/net/iucv/af_iucv.h
70533 @@ -87,7 +87,7 @@ struct iucv_sock {
70534 struct iucv_sock_list {
70535 struct hlist_head head;
70536 rwlock_t lock;
70537 - atomic_t autobind_name;
70538 + atomic_unchecked_t autobind_name;
70539 };
70540
70541 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
70542 diff --git a/include/net/lapb.h b/include/net/lapb.h
70543 index 96cb5dd..25e8d4f 100644
70544 --- a/include/net/lapb.h
70545 +++ b/include/net/lapb.h
70546 @@ -95,7 +95,7 @@ struct lapb_cb {
70547 struct sk_buff_head write_queue;
70548 struct sk_buff_head ack_queue;
70549 unsigned char window;
70550 - struct lapb_register_struct callbacks;
70551 + struct lapb_register_struct *callbacks;
70552
70553 /* FRMR control information */
70554 struct lapb_frame frmr_data;
70555 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
70556 index 3817fda..cdb2343 100644
70557 --- a/include/net/neighbour.h
70558 +++ b/include/net/neighbour.h
70559 @@ -131,7 +131,7 @@ struct neigh_ops
70560 int (*connected_output)(struct sk_buff*);
70561 int (*hh_output)(struct sk_buff*);
70562 int (*queue_xmit)(struct sk_buff*);
70563 -};
70564 +} __do_const;
70565
70566 struct pneigh_entry
70567 {
70568 diff --git a/include/net/netlink.h b/include/net/netlink.h
70569 index c344646..4778c71 100644
70570 --- a/include/net/netlink.h
70571 +++ b/include/net/netlink.h
70572 @@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
70573 {
70574 return (remaining >= (int) sizeof(struct nlmsghdr) &&
70575 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
70576 - nlh->nlmsg_len <= remaining);
70577 + nlh->nlmsg_len <= (unsigned int)remaining);
70578 }
70579
70580 /**
70581 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
70582 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
70583 {
70584 if (mark)
70585 - skb_trim(skb, (unsigned char *) mark - skb->data);
70586 + skb_trim(skb, (const unsigned char *) mark - skb->data);
70587 }
70588
70589 /**
70590 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
70591 index 9a4b8b7..e49e077 100644
70592 --- a/include/net/netns/ipv4.h
70593 +++ b/include/net/netns/ipv4.h
70594 @@ -54,7 +54,7 @@ struct netns_ipv4 {
70595 int current_rt_cache_rebuild_count;
70596
70597 struct timer_list rt_secret_timer;
70598 - atomic_t rt_genid;
70599 + atomic_unchecked_t rt_genid;
70600
70601 #ifdef CONFIG_IP_MROUTE
70602 struct sock *mroute_sk;
70603 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
70604 index 8a6d529..171f401 100644
70605 --- a/include/net/sctp/sctp.h
70606 +++ b/include/net/sctp/sctp.h
70607 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
70608
70609 #else /* SCTP_DEBUG */
70610
70611 -#define SCTP_DEBUG_PRINTK(whatever...)
70612 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
70613 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
70614 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
70615 #define SCTP_ENABLE_DEBUG
70616 #define SCTP_DISABLE_DEBUG
70617 #define SCTP_ASSERT(expr, str, func)
70618 diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
70619 index d97f689..f3b90ab 100644
70620 --- a/include/net/secure_seq.h
70621 +++ b/include/net/secure_seq.h
70622 @@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
70623 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
70624 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
70625 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
70626 - __be16 dport);
70627 + __be16 dport);
70628 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
70629 __be16 sport, __be16 dport);
70630 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70631 - __be16 sport, __be16 dport);
70632 + __be16 sport, __be16 dport);
70633 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
70634 - __be16 sport, __be16 dport);
70635 + __be16 sport, __be16 dport);
70636 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70637 - __be16 sport, __be16 dport);
70638 + __be16 sport, __be16 dport);
70639
70640 #endif /* _NET_SECURE_SEQ */
70641 diff --git a/include/net/sock.h b/include/net/sock.h
70642 index 78adf52..99afd29 100644
70643 --- a/include/net/sock.h
70644 +++ b/include/net/sock.h
70645 @@ -272,7 +272,7 @@ struct sock {
70646 rwlock_t sk_callback_lock;
70647 int sk_err,
70648 sk_err_soft;
70649 - atomic_t sk_drops;
70650 + atomic_unchecked_t sk_drops;
70651 unsigned short sk_ack_backlog;
70652 unsigned short sk_max_ack_backlog;
70653 __u32 sk_priority;
70654 @@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
70655 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
70656 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
70657 #else
70658 -static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
70659 +static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
70660 int inc)
70661 {
70662 }
70663 diff --git a/include/net/tcp.h b/include/net/tcp.h
70664 index 6cfe18b..dd21acb 100644
70665 --- a/include/net/tcp.h
70666 +++ b/include/net/tcp.h
70667 @@ -1444,8 +1444,8 @@ enum tcp_seq_states {
70668 struct tcp_seq_afinfo {
70669 char *name;
70670 sa_family_t family;
70671 - struct file_operations seq_fops;
70672 - struct seq_operations seq_ops;
70673 + file_operations_no_const seq_fops;
70674 + seq_operations_no_const seq_ops;
70675 };
70676
70677 struct tcp_iter_state {
70678 diff --git a/include/net/udp.h b/include/net/udp.h
70679 index f98abd2..b4b042f 100644
70680 --- a/include/net/udp.h
70681 +++ b/include/net/udp.h
70682 @@ -187,8 +187,8 @@ struct udp_seq_afinfo {
70683 char *name;
70684 sa_family_t family;
70685 struct udp_table *udp_table;
70686 - struct file_operations seq_fops;
70687 - struct seq_operations seq_ops;
70688 + file_operations_no_const seq_fops;
70689 + seq_operations_no_const seq_ops;
70690 };
70691
70692 struct udp_iter_state {
70693 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
70694 index cbb822e..e9c1cbe 100644
70695 --- a/include/rdma/iw_cm.h
70696 +++ b/include/rdma/iw_cm.h
70697 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
70698 int backlog);
70699
70700 int (*destroy_listen)(struct iw_cm_id *cm_id);
70701 -};
70702 +} __no_const;
70703
70704 /**
70705 * iw_create_cm_id - Create an IW CM identifier.
70706 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
70707 index 09a124b..caa8ca8 100644
70708 --- a/include/scsi/libfc.h
70709 +++ b/include/scsi/libfc.h
70710 @@ -675,6 +675,7 @@ struct libfc_function_template {
70711 */
70712 void (*disc_stop_final) (struct fc_lport *);
70713 };
70714 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
70715
70716 /* information used by the discovery layer */
70717 struct fc_disc {
70718 @@ -707,7 +708,7 @@ struct fc_lport {
70719 struct fc_disc disc;
70720
70721 /* Operational Information */
70722 - struct libfc_function_template tt;
70723 + libfc_function_template_no_const tt;
70724 u8 link_up;
70725 u8 qfull;
70726 enum fc_lport_state state;
70727 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
70728 index de8e180..f15e0d7 100644
70729 --- a/include/scsi/scsi_device.h
70730 +++ b/include/scsi/scsi_device.h
70731 @@ -156,9 +156,9 @@ struct scsi_device {
70732 unsigned int max_device_blocked; /* what device_blocked counts down from */
70733 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
70734
70735 - atomic_t iorequest_cnt;
70736 - atomic_t iodone_cnt;
70737 - atomic_t ioerr_cnt;
70738 + atomic_unchecked_t iorequest_cnt;
70739 + atomic_unchecked_t iodone_cnt;
70740 + atomic_unchecked_t ioerr_cnt;
70741
70742 struct device sdev_gendev,
70743 sdev_dev;
70744 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
70745 index fc50bd6..81ba9cb 100644
70746 --- a/include/scsi/scsi_transport_fc.h
70747 +++ b/include/scsi/scsi_transport_fc.h
70748 @@ -708,7 +708,7 @@ struct fc_function_template {
70749 unsigned long show_host_system_hostname:1;
70750
70751 unsigned long disable_target_scan:1;
70752 -};
70753 +} __do_const;
70754
70755
70756 /**
70757 diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
70758 index 3dae3f7..8440d6f 100644
70759 --- a/include/sound/ac97_codec.h
70760 +++ b/include/sound/ac97_codec.h
70761 @@ -419,15 +419,15 @@
70762 struct snd_ac97;
70763
70764 struct snd_ac97_build_ops {
70765 - int (*build_3d) (struct snd_ac97 *ac97);
70766 - int (*build_specific) (struct snd_ac97 *ac97);
70767 - int (*build_spdif) (struct snd_ac97 *ac97);
70768 - int (*build_post_spdif) (struct snd_ac97 *ac97);
70769 + int (* const build_3d) (struct snd_ac97 *ac97);
70770 + int (* const build_specific) (struct snd_ac97 *ac97);
70771 + int (* const build_spdif) (struct snd_ac97 *ac97);
70772 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
70773 #ifdef CONFIG_PM
70774 - void (*suspend) (struct snd_ac97 *ac97);
70775 - void (*resume) (struct snd_ac97 *ac97);
70776 + void (* const suspend) (struct snd_ac97 *ac97);
70777 + void (* const resume) (struct snd_ac97 *ac97);
70778 #endif
70779 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
70780 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
70781 };
70782
70783 struct snd_ac97_bus_ops {
70784 @@ -477,7 +477,7 @@ struct snd_ac97_template {
70785
70786 struct snd_ac97 {
70787 /* -- lowlevel (hardware) driver specific -- */
70788 - struct snd_ac97_build_ops * build_ops;
70789 + const struct snd_ac97_build_ops * build_ops;
70790 void *private_data;
70791 void (*private_free) (struct snd_ac97 *ac97);
70792 /* --- */
70793 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
70794 index 891cf1a..a94ba2b 100644
70795 --- a/include/sound/ak4xxx-adda.h
70796 +++ b/include/sound/ak4xxx-adda.h
70797 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
70798 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
70799 unsigned char val);
70800 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
70801 -};
70802 +} __no_const;
70803
70804 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
70805
70806 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
70807 index 8c05e47..2b5df97 100644
70808 --- a/include/sound/hwdep.h
70809 +++ b/include/sound/hwdep.h
70810 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
70811 struct snd_hwdep_dsp_status *status);
70812 int (*dsp_load)(struct snd_hwdep *hw,
70813 struct snd_hwdep_dsp_image *image);
70814 -};
70815 +} __no_const;
70816
70817 struct snd_hwdep {
70818 struct snd_card *card;
70819 diff --git a/include/sound/info.h b/include/sound/info.h
70820 index 112e894..6fda5b5 100644
70821 --- a/include/sound/info.h
70822 +++ b/include/sound/info.h
70823 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
70824 struct snd_info_buffer *buffer);
70825 void (*write)(struct snd_info_entry *entry,
70826 struct snd_info_buffer *buffer);
70827 -};
70828 +} __no_const;
70829
70830 struct snd_info_entry_ops {
70831 int (*open)(struct snd_info_entry *entry,
70832 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
70833 index de6d981..590a550 100644
70834 --- a/include/sound/pcm.h
70835 +++ b/include/sound/pcm.h
70836 @@ -80,6 +80,7 @@ struct snd_pcm_ops {
70837 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
70838 int (*ack)(struct snd_pcm_substream *substream);
70839 };
70840 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
70841
70842 /*
70843 *
70844 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
70845 index 736eac7..fe8a80f 100644
70846 --- a/include/sound/sb16_csp.h
70847 +++ b/include/sound/sb16_csp.h
70848 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
70849 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
70850 int (*csp_stop) (struct snd_sb_csp * p);
70851 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
70852 -};
70853 +} __no_const;
70854
70855 /*
70856 * CSP private data
70857 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
70858 index 444cd6b..3327cc5 100644
70859 --- a/include/sound/ymfpci.h
70860 +++ b/include/sound/ymfpci.h
70861 @@ -358,7 +358,7 @@ struct snd_ymfpci {
70862 spinlock_t reg_lock;
70863 spinlock_t voice_lock;
70864 wait_queue_head_t interrupt_sleep;
70865 - atomic_t interrupt_sleep_count;
70866 + atomic_unchecked_t interrupt_sleep_count;
70867 struct snd_info_entry *proc_entry;
70868 const struct firmware *dsp_microcode;
70869 const struct firmware *controller_microcode;
70870 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
70871 index b89f9db..f097b38 100644
70872 --- a/include/trace/events/irq.h
70873 +++ b/include/trace/events/irq.h
70874 @@ -34,7 +34,7 @@
70875 */
70876 TRACE_EVENT(irq_handler_entry,
70877
70878 - TP_PROTO(int irq, struct irqaction *action),
70879 + TP_PROTO(int irq, const struct irqaction *action),
70880
70881 TP_ARGS(irq, action),
70882
70883 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
70884 */
70885 TRACE_EVENT(irq_handler_exit,
70886
70887 - TP_PROTO(int irq, struct irqaction *action, int ret),
70888 + TP_PROTO(int irq, const struct irqaction *action, int ret),
70889
70890 TP_ARGS(irq, action, ret),
70891
70892 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
70893 */
70894 TRACE_EVENT(softirq_entry,
70895
70896 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
70897 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
70898
70899 TP_ARGS(h, vec),
70900
70901 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
70902 */
70903 TRACE_EVENT(softirq_exit,
70904
70905 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
70906 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
70907
70908 TP_ARGS(h, vec),
70909
70910 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
70911 index 0993a22..32ba2fe 100644
70912 --- a/include/video/uvesafb.h
70913 +++ b/include/video/uvesafb.h
70914 @@ -177,6 +177,7 @@ struct uvesafb_par {
70915 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
70916 u8 pmi_setpal; /* PMI for palette changes */
70917 u16 *pmi_base; /* protected mode interface location */
70918 + u8 *pmi_code; /* protected mode code location */
70919 void *pmi_start;
70920 void *pmi_pal;
70921 u8 *vbe_state_orig; /*
70922 diff --git a/init/Kconfig b/init/Kconfig
70923 index d72691b..3996e54 100644
70924 --- a/init/Kconfig
70925 +++ b/init/Kconfig
70926 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
70927
70928 config COMPAT_BRK
70929 bool "Disable heap randomization"
70930 - default y
70931 + default n
70932 help
70933 Randomizing heap placement makes heap exploits harder, but it
70934 also breaks ancient binaries (including anything libc5 based).
70935 diff --git a/init/do_mounts.c b/init/do_mounts.c
70936 index bb008d0..4fa3933 100644
70937 --- a/init/do_mounts.c
70938 +++ b/init/do_mounts.c
70939 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
70940
70941 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
70942 {
70943 - int err = sys_mount(name, "/root", fs, flags, data);
70944 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
70945 if (err)
70946 return err;
70947
70948 - sys_chdir("/root");
70949 + sys_chdir((__force const char __user *)"/root");
70950 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
70951 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
70952 current->fs->pwd.mnt->mnt_sb->s_type->name,
70953 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
70954 va_start(args, fmt);
70955 vsprintf(buf, fmt, args);
70956 va_end(args);
70957 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
70958 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
70959 if (fd >= 0) {
70960 sys_ioctl(fd, FDEJECT, 0);
70961 sys_close(fd);
70962 }
70963 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
70964 - fd = sys_open("/dev/console", O_RDWR, 0);
70965 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
70966 if (fd >= 0) {
70967 sys_ioctl(fd, TCGETS, (long)&termios);
70968 termios.c_lflag &= ~ICANON;
70969 sys_ioctl(fd, TCSETSF, (long)&termios);
70970 - sys_read(fd, &c, 1);
70971 + sys_read(fd, (char __user *)&c, 1);
70972 termios.c_lflag |= ICANON;
70973 sys_ioctl(fd, TCSETSF, (long)&termios);
70974 sys_close(fd);
70975 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
70976 mount_root();
70977 out:
70978 devtmpfs_mount("dev");
70979 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
70980 - sys_chroot(".");
70981 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
70982 + sys_chroot((__force char __user *)".");
70983 }
70984 diff --git a/init/do_mounts.h b/init/do_mounts.h
70985 index f5b978a..69dbfe8 100644
70986 --- a/init/do_mounts.h
70987 +++ b/init/do_mounts.h
70988 @@ -15,15 +15,15 @@ extern int root_mountflags;
70989
70990 static inline int create_dev(char *name, dev_t dev)
70991 {
70992 - sys_unlink(name);
70993 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
70994 + sys_unlink((char __force_user *)name);
70995 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
70996 }
70997
70998 #if BITS_PER_LONG == 32
70999 static inline u32 bstat(char *name)
71000 {
71001 struct stat64 stat;
71002 - if (sys_stat64(name, &stat) != 0)
71003 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
71004 return 0;
71005 if (!S_ISBLK(stat.st_mode))
71006 return 0;
71007 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
71008 static inline u32 bstat(char *name)
71009 {
71010 struct stat stat;
71011 - if (sys_newstat(name, &stat) != 0)
71012 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
71013 return 0;
71014 if (!S_ISBLK(stat.st_mode))
71015 return 0;
71016 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
71017 index 614241b..4da046b 100644
71018 --- a/init/do_mounts_initrd.c
71019 +++ b/init/do_mounts_initrd.c
71020 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
71021 sys_close(old_fd);sys_close(root_fd);
71022 sys_close(0);sys_close(1);sys_close(2);
71023 sys_setsid();
71024 - (void) sys_open("/dev/console",O_RDWR,0);
71025 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
71026 (void) sys_dup(0);
71027 (void) sys_dup(0);
71028 return kernel_execve(shell, argv, envp_init);
71029 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
71030 create_dev("/dev/root.old", Root_RAM0);
71031 /* mount initrd on rootfs' /root */
71032 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
71033 - sys_mkdir("/old", 0700);
71034 - root_fd = sys_open("/", 0, 0);
71035 - old_fd = sys_open("/old", 0, 0);
71036 + sys_mkdir((const char __force_user *)"/old", 0700);
71037 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
71038 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
71039 /* move initrd over / and chdir/chroot in initrd root */
71040 - sys_chdir("/root");
71041 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
71042 - sys_chroot(".");
71043 + sys_chdir((const char __force_user *)"/root");
71044 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
71045 + sys_chroot((const char __force_user *)".");
71046
71047 /*
71048 * In case that a resume from disk is carried out by linuxrc or one of
71049 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
71050
71051 /* move initrd to rootfs' /old */
71052 sys_fchdir(old_fd);
71053 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
71054 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
71055 /* switch root and cwd back to / of rootfs */
71056 sys_fchdir(root_fd);
71057 - sys_chroot(".");
71058 + sys_chroot((const char __force_user *)".");
71059 sys_close(old_fd);
71060 sys_close(root_fd);
71061
71062 if (new_decode_dev(real_root_dev) == Root_RAM0) {
71063 - sys_chdir("/old");
71064 + sys_chdir((const char __force_user *)"/old");
71065 return;
71066 }
71067
71068 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
71069 mount_root();
71070
71071 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
71072 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
71073 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
71074 if (!error)
71075 printk("okay\n");
71076 else {
71077 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
71078 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
71079 if (error == -ENOENT)
71080 printk("/initrd does not exist. Ignored.\n");
71081 else
71082 printk("failed\n");
71083 printk(KERN_NOTICE "Unmounting old root\n");
71084 - sys_umount("/old", MNT_DETACH);
71085 + sys_umount((char __force_user *)"/old", MNT_DETACH);
71086 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
71087 if (fd < 0) {
71088 error = fd;
71089 @@ -119,11 +119,11 @@ int __init initrd_load(void)
71090 * mounted in the normal path.
71091 */
71092 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
71093 - sys_unlink("/initrd.image");
71094 + sys_unlink((const char __force_user *)"/initrd.image");
71095 handle_initrd();
71096 return 1;
71097 }
71098 }
71099 - sys_unlink("/initrd.image");
71100 + sys_unlink((const char __force_user *)"/initrd.image");
71101 return 0;
71102 }
71103 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
71104 index 69aebbf..c0bf6a7 100644
71105 --- a/init/do_mounts_md.c
71106 +++ b/init/do_mounts_md.c
71107 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
71108 partitioned ? "_d" : "", minor,
71109 md_setup_args[ent].device_names);
71110
71111 - fd = sys_open(name, 0, 0);
71112 + fd = sys_open((char __force_user *)name, 0, 0);
71113 if (fd < 0) {
71114 printk(KERN_ERR "md: open failed - cannot start "
71115 "array %s\n", name);
71116 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
71117 * array without it
71118 */
71119 sys_close(fd);
71120 - fd = sys_open(name, 0, 0);
71121 + fd = sys_open((char __force_user *)name, 0, 0);
71122 sys_ioctl(fd, BLKRRPART, 0);
71123 }
71124 sys_close(fd);
71125 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
71126
71127 wait_for_device_probe();
71128
71129 - fd = sys_open("/dev/md0", 0, 0);
71130 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
71131 if (fd >= 0) {
71132 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
71133 sys_close(fd);
71134 diff --git a/init/initramfs.c b/init/initramfs.c
71135 index 1fd59b8..a01b079 100644
71136 --- a/init/initramfs.c
71137 +++ b/init/initramfs.c
71138 @@ -74,7 +74,7 @@ static void __init free_hash(void)
71139 }
71140 }
71141
71142 -static long __init do_utime(char __user *filename, time_t mtime)
71143 +static long __init do_utime(__force char __user *filename, time_t mtime)
71144 {
71145 struct timespec t[2];
71146
71147 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
71148 struct dir_entry *de, *tmp;
71149 list_for_each_entry_safe(de, tmp, &dir_list, list) {
71150 list_del(&de->list);
71151 - do_utime(de->name, de->mtime);
71152 + do_utime((char __force_user *)de->name, de->mtime);
71153 kfree(de->name);
71154 kfree(de);
71155 }
71156 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
71157 if (nlink >= 2) {
71158 char *old = find_link(major, minor, ino, mode, collected);
71159 if (old)
71160 - return (sys_link(old, collected) < 0) ? -1 : 1;
71161 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
71162 }
71163 return 0;
71164 }
71165 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
71166 {
71167 struct stat st;
71168
71169 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
71170 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
71171 if (S_ISDIR(st.st_mode))
71172 - sys_rmdir(path);
71173 + sys_rmdir((char __force_user *)path);
71174 else
71175 - sys_unlink(path);
71176 + sys_unlink((char __force_user *)path);
71177 }
71178 }
71179
71180 @@ -305,7 +305,7 @@ static int __init do_name(void)
71181 int openflags = O_WRONLY|O_CREAT;
71182 if (ml != 1)
71183 openflags |= O_TRUNC;
71184 - wfd = sys_open(collected, openflags, mode);
71185 + wfd = sys_open((char __force_user *)collected, openflags, mode);
71186
71187 if (wfd >= 0) {
71188 sys_fchown(wfd, uid, gid);
71189 @@ -317,17 +317,17 @@ static int __init do_name(void)
71190 }
71191 }
71192 } else if (S_ISDIR(mode)) {
71193 - sys_mkdir(collected, mode);
71194 - sys_chown(collected, uid, gid);
71195 - sys_chmod(collected, mode);
71196 + sys_mkdir((char __force_user *)collected, mode);
71197 + sys_chown((char __force_user *)collected, uid, gid);
71198 + sys_chmod((char __force_user *)collected, mode);
71199 dir_add(collected, mtime);
71200 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
71201 S_ISFIFO(mode) || S_ISSOCK(mode)) {
71202 if (maybe_link() == 0) {
71203 - sys_mknod(collected, mode, rdev);
71204 - sys_chown(collected, uid, gid);
71205 - sys_chmod(collected, mode);
71206 - do_utime(collected, mtime);
71207 + sys_mknod((char __force_user *)collected, mode, rdev);
71208 + sys_chown((char __force_user *)collected, uid, gid);
71209 + sys_chmod((char __force_user *)collected, mode);
71210 + do_utime((char __force_user *)collected, mtime);
71211 }
71212 }
71213 return 0;
71214 @@ -336,15 +336,15 @@ static int __init do_name(void)
71215 static int __init do_copy(void)
71216 {
71217 if (count >= body_len) {
71218 - sys_write(wfd, victim, body_len);
71219 + sys_write(wfd, (char __force_user *)victim, body_len);
71220 sys_close(wfd);
71221 - do_utime(vcollected, mtime);
71222 + do_utime((char __force_user *)vcollected, mtime);
71223 kfree(vcollected);
71224 eat(body_len);
71225 state = SkipIt;
71226 return 0;
71227 } else {
71228 - sys_write(wfd, victim, count);
71229 + sys_write(wfd, (char __force_user *)victim, count);
71230 body_len -= count;
71231 eat(count);
71232 return 1;
71233 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
71234 {
71235 collected[N_ALIGN(name_len) + body_len] = '\0';
71236 clean_path(collected, 0);
71237 - sys_symlink(collected + N_ALIGN(name_len), collected);
71238 - sys_lchown(collected, uid, gid);
71239 - do_utime(collected, mtime);
71240 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
71241 + sys_lchown((char __force_user *)collected, uid, gid);
71242 + do_utime((char __force_user *)collected, mtime);
71243 state = SkipIt;
71244 next_state = Reset;
71245 return 0;
71246 diff --git a/init/main.c b/init/main.c
71247 index 1eb4bd5..fea5bbe 100644
71248 --- a/init/main.c
71249 +++ b/init/main.c
71250 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
71251 #ifdef CONFIG_TC
71252 extern void tc_init(void);
71253 #endif
71254 +extern void grsecurity_init(void);
71255
71256 enum system_states system_state __read_mostly;
71257 EXPORT_SYMBOL(system_state);
71258 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
71259
71260 __setup("reset_devices", set_reset_devices);
71261
71262 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
71263 +extern char pax_enter_kernel_user[];
71264 +extern char pax_exit_kernel_user[];
71265 +extern pgdval_t clone_pgd_mask;
71266 +#endif
71267 +
71268 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
71269 +static int __init setup_pax_nouderef(char *str)
71270 +{
71271 +#ifdef CONFIG_X86_32
71272 + unsigned int cpu;
71273 + struct desc_struct *gdt;
71274 +
71275 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
71276 + gdt = get_cpu_gdt_table(cpu);
71277 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
71278 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
71279 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
71280 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
71281 + }
71282 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
71283 +#else
71284 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
71285 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
71286 + clone_pgd_mask = ~(pgdval_t)0UL;
71287 +#endif
71288 +
71289 + return 0;
71290 +}
71291 +early_param("pax_nouderef", setup_pax_nouderef);
71292 +#endif
71293 +
71294 +#ifdef CONFIG_PAX_SOFTMODE
71295 +int pax_softmode;
71296 +
71297 +static int __init setup_pax_softmode(char *str)
71298 +{
71299 + get_option(&str, &pax_softmode);
71300 + return 1;
71301 +}
71302 +__setup("pax_softmode=", setup_pax_softmode);
71303 +#endif
71304 +
71305 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
71306 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
71307 static const char *panic_later, *panic_param;
71308 @@ -705,52 +749,53 @@ int initcall_debug;
71309 core_param(initcall_debug, initcall_debug, bool, 0644);
71310
71311 static char msgbuf[64];
71312 -static struct boot_trace_call call;
71313 -static struct boot_trace_ret ret;
71314 +static struct boot_trace_call trace_call;
71315 +static struct boot_trace_ret trace_ret;
71316
71317 int do_one_initcall(initcall_t fn)
71318 {
71319 int count = preempt_count();
71320 ktime_t calltime, delta, rettime;
71321 + const char *msg1 = "", *msg2 = "";
71322
71323 if (initcall_debug) {
71324 - call.caller = task_pid_nr(current);
71325 - printk("calling %pF @ %i\n", fn, call.caller);
71326 + trace_call.caller = task_pid_nr(current);
71327 + printk("calling %pF @ %i\n", fn, trace_call.caller);
71328 calltime = ktime_get();
71329 - trace_boot_call(&call, fn);
71330 + trace_boot_call(&trace_call, fn);
71331 enable_boot_trace();
71332 }
71333
71334 - ret.result = fn();
71335 + trace_ret.result = fn();
71336
71337 if (initcall_debug) {
71338 disable_boot_trace();
71339 rettime = ktime_get();
71340 delta = ktime_sub(rettime, calltime);
71341 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
71342 - trace_boot_ret(&ret, fn);
71343 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
71344 + trace_boot_ret(&trace_ret, fn);
71345 printk("initcall %pF returned %d after %Ld usecs\n", fn,
71346 - ret.result, ret.duration);
71347 + trace_ret.result, trace_ret.duration);
71348 }
71349
71350 msgbuf[0] = 0;
71351
71352 - if (ret.result && ret.result != -ENODEV && initcall_debug)
71353 - sprintf(msgbuf, "error code %d ", ret.result);
71354 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
71355 + sprintf(msgbuf, "error code %d ", trace_ret.result);
71356
71357 if (preempt_count() != count) {
71358 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
71359 + msg1 = " preemption imbalance";
71360 preempt_count() = count;
71361 }
71362 if (irqs_disabled()) {
71363 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
71364 + msg2 = " disabled interrupts";
71365 local_irq_enable();
71366 }
71367 - if (msgbuf[0]) {
71368 - printk("initcall %pF returned with %s\n", fn, msgbuf);
71369 + if (msgbuf[0] || *msg1 || *msg2) {
71370 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
71371 }
71372
71373 - return ret.result;
71374 + return trace_ret.result;
71375 }
71376
71377
71378 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
71379 if (!ramdisk_execute_command)
71380 ramdisk_execute_command = "/init";
71381
71382 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
71383 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
71384 ramdisk_execute_command = NULL;
71385 prepare_namespace();
71386 }
71387
71388 + grsecurity_init();
71389 +
71390 /*
71391 * Ok, we have completed the initial bootup, and
71392 * we're essentially up and running. Get rid of the
71393 diff --git a/init/noinitramfs.c b/init/noinitramfs.c
71394 index f4c1a3a..96c19bd 100644
71395 --- a/init/noinitramfs.c
71396 +++ b/init/noinitramfs.c
71397 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
71398 {
71399 int err;
71400
71401 - err = sys_mkdir("/dev", 0755);
71402 + err = sys_mkdir((const char __user *)"/dev", 0755);
71403 if (err < 0)
71404 goto out;
71405
71406 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
71407 if (err < 0)
71408 goto out;
71409
71410 - err = sys_mkdir("/root", 0700);
71411 + err = sys_mkdir((const char __user *)"/root", 0700);
71412 if (err < 0)
71413 goto out;
71414
71415 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
71416 index d01bc14..8df81db 100644
71417 --- a/ipc/mqueue.c
71418 +++ b/ipc/mqueue.c
71419 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
71420 mq_bytes = (mq_msg_tblsz +
71421 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
71422
71423 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
71424 spin_lock(&mq_lock);
71425 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
71426 u->mq_bytes + mq_bytes >
71427 diff --git a/ipc/msg.c b/ipc/msg.c
71428 index 779f762..4af9e36 100644
71429 --- a/ipc/msg.c
71430 +++ b/ipc/msg.c
71431 @@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
71432 return security_msg_queue_associate(msq, msgflg);
71433 }
71434
71435 +static struct ipc_ops msg_ops = {
71436 + .getnew = newque,
71437 + .associate = msg_security,
71438 + .more_checks = NULL
71439 +};
71440 +
71441 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
71442 {
71443 struct ipc_namespace *ns;
71444 - struct ipc_ops msg_ops;
71445 struct ipc_params msg_params;
71446
71447 ns = current->nsproxy->ipc_ns;
71448
71449 - msg_ops.getnew = newque;
71450 - msg_ops.associate = msg_security;
71451 - msg_ops.more_checks = NULL;
71452 -
71453 msg_params.key = key;
71454 msg_params.flg = msgflg;
71455
71456 diff --git a/ipc/sem.c b/ipc/sem.c
71457 index b781007..f738b04 100644
71458 --- a/ipc/sem.c
71459 +++ b/ipc/sem.c
71460 @@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
71461 return 0;
71462 }
71463
71464 +static struct ipc_ops sem_ops = {
71465 + .getnew = newary,
71466 + .associate = sem_security,
71467 + .more_checks = sem_more_checks
71468 +};
71469 +
71470 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
71471 {
71472 struct ipc_namespace *ns;
71473 - struct ipc_ops sem_ops;
71474 struct ipc_params sem_params;
71475
71476 ns = current->nsproxy->ipc_ns;
71477 @@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
71478 if (nsems < 0 || nsems > ns->sc_semmsl)
71479 return -EINVAL;
71480
71481 - sem_ops.getnew = newary;
71482 - sem_ops.associate = sem_security;
71483 - sem_ops.more_checks = sem_more_checks;
71484 -
71485 sem_params.key = key;
71486 sem_params.flg = semflg;
71487 sem_params.u.nsems = nsems;
71488 @@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
71489 ushort* sem_io = fast_sem_io;
71490 int nsems;
71491
71492 + pax_track_stack();
71493 +
71494 sma = sem_lock_check(ns, semid);
71495 if (IS_ERR(sma))
71496 return PTR_ERR(sma);
71497 @@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
71498 unsigned long jiffies_left = 0;
71499 struct ipc_namespace *ns;
71500
71501 + pax_track_stack();
71502 +
71503 ns = current->nsproxy->ipc_ns;
71504
71505 if (nsops < 1 || semid < 0)
71506 diff --git a/ipc/shm.c b/ipc/shm.c
71507 index d30732c..e4992cd 100644
71508 --- a/ipc/shm.c
71509 +++ b/ipc/shm.c
71510 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
71511 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
71512 #endif
71513
71514 +#ifdef CONFIG_GRKERNSEC
71515 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71516 + const time_t shm_createtime, const uid_t cuid,
71517 + const int shmid);
71518 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71519 + const time_t shm_createtime);
71520 +#endif
71521 +
71522 void shm_init_ns(struct ipc_namespace *ns)
71523 {
71524 ns->shm_ctlmax = SHMMAX;
71525 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
71526 shp->shm_lprid = 0;
71527 shp->shm_atim = shp->shm_dtim = 0;
71528 shp->shm_ctim = get_seconds();
71529 +#ifdef CONFIG_GRKERNSEC
71530 + {
71531 + struct timespec timeval;
71532 + do_posix_clock_monotonic_gettime(&timeval);
71533 +
71534 + shp->shm_createtime = timeval.tv_sec;
71535 + }
71536 +#endif
71537 shp->shm_segsz = size;
71538 shp->shm_nattch = 0;
71539 shp->shm_file = file;
71540 @@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
71541 return 0;
71542 }
71543
71544 +static struct ipc_ops shm_ops = {
71545 + .getnew = newseg,
71546 + .associate = shm_security,
71547 + .more_checks = shm_more_checks
71548 +};
71549 +
71550 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
71551 {
71552 struct ipc_namespace *ns;
71553 - struct ipc_ops shm_ops;
71554 struct ipc_params shm_params;
71555
71556 ns = current->nsproxy->ipc_ns;
71557
71558 - shm_ops.getnew = newseg;
71559 - shm_ops.associate = shm_security;
71560 - shm_ops.more_checks = shm_more_checks;
71561 -
71562 shm_params.key = key;
71563 shm_params.flg = shmflg;
71564 shm_params.u.size = size;
71565 @@ -857,6 +874,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71566 f_mode = FMODE_READ | FMODE_WRITE;
71567 }
71568 if (shmflg & SHM_EXEC) {
71569 +
71570 +#ifdef CONFIG_PAX_MPROTECT
71571 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
71572 + goto out;
71573 +#endif
71574 +
71575 prot |= PROT_EXEC;
71576 acc_mode |= S_IXUGO;
71577 }
71578 @@ -880,9 +903,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71579 if (err)
71580 goto out_unlock;
71581
71582 +#ifdef CONFIG_GRKERNSEC
71583 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
71584 + shp->shm_perm.cuid, shmid) ||
71585 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
71586 + err = -EACCES;
71587 + goto out_unlock;
71588 + }
71589 +#endif
71590 +
71591 path.dentry = dget(shp->shm_file->f_path.dentry);
71592 path.mnt = shp->shm_file->f_path.mnt;
71593 shp->shm_nattch++;
71594 +#ifdef CONFIG_GRKERNSEC
71595 + shp->shm_lapid = current->pid;
71596 +#endif
71597 size = i_size_read(path.dentry->d_inode);
71598 shm_unlock(shp);
71599
71600 diff --git a/kernel/acct.c b/kernel/acct.c
71601 index a6605ca..ca91111 100644
71602 --- a/kernel/acct.c
71603 +++ b/kernel/acct.c
71604 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
71605 */
71606 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
71607 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
71608 - file->f_op->write(file, (char *)&ac,
71609 + file->f_op->write(file, (char __force_user *)&ac,
71610 sizeof(acct_t), &file->f_pos);
71611 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
71612 set_fs(fs);
71613 diff --git a/kernel/audit.c b/kernel/audit.c
71614 index 5feed23..48415fd 100644
71615 --- a/kernel/audit.c
71616 +++ b/kernel/audit.c
71617 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
71618 3) suppressed due to audit_rate_limit
71619 4) suppressed due to audit_backlog_limit
71620 */
71621 -static atomic_t audit_lost = ATOMIC_INIT(0);
71622 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
71623
71624 /* The netlink socket. */
71625 static struct sock *audit_sock;
71626 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
71627 unsigned long now;
71628 int print;
71629
71630 - atomic_inc(&audit_lost);
71631 + atomic_inc_unchecked(&audit_lost);
71632
71633 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
71634
71635 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
71636 printk(KERN_WARNING
71637 "audit: audit_lost=%d audit_rate_limit=%d "
71638 "audit_backlog_limit=%d\n",
71639 - atomic_read(&audit_lost),
71640 + atomic_read_unchecked(&audit_lost),
71641 audit_rate_limit,
71642 audit_backlog_limit);
71643 audit_panic(message);
71644 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71645 status_set.pid = audit_pid;
71646 status_set.rate_limit = audit_rate_limit;
71647 status_set.backlog_limit = audit_backlog_limit;
71648 - status_set.lost = atomic_read(&audit_lost);
71649 + status_set.lost = atomic_read_unchecked(&audit_lost);
71650 status_set.backlog = skb_queue_len(&audit_skb_queue);
71651 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
71652 &status_set, sizeof(status_set));
71653 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71654 spin_unlock_irq(&tsk->sighand->siglock);
71655 }
71656 read_unlock(&tasklist_lock);
71657 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
71658 - &s, sizeof(s));
71659 +
71660 + if (!err)
71661 + audit_send_reply(NETLINK_CB(skb).pid, seq,
71662 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
71663 break;
71664 }
71665 case AUDIT_TTY_SET: {
71666 @@ -1262,12 +1264,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
71667 avail = audit_expand(ab,
71668 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
71669 if (!avail)
71670 - goto out;
71671 + goto out_va_end;
71672 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
71673 }
71674 - va_end(args2);
71675 if (len > 0)
71676 skb_put(skb, len);
71677 +out_va_end:
71678 + va_end(args2);
71679 out:
71680 return;
71681 }
71682 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
71683 index 267e484..ac41bc3 100644
71684 --- a/kernel/auditsc.c
71685 +++ b/kernel/auditsc.c
71686 @@ -1157,8 +1157,8 @@ static void audit_log_execve_info(struct audit_context *context,
71687 struct audit_buffer **ab,
71688 struct audit_aux_data_execve *axi)
71689 {
71690 - int i;
71691 - size_t len, len_sent = 0;
71692 + int i, len;
71693 + size_t len_sent = 0;
71694 const char __user *p;
71695 char *buf;
71696
71697 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
71698 }
71699
71700 /* global counter which is incremented every time something logs in */
71701 -static atomic_t session_id = ATOMIC_INIT(0);
71702 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
71703
71704 /**
71705 * audit_set_loginuid - set a task's audit_context loginuid
71706 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
71707 */
71708 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
71709 {
71710 - unsigned int sessionid = atomic_inc_return(&session_id);
71711 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
71712 struct audit_context *context = task->audit_context;
71713
71714 if (context && context->in_syscall) {
71715 diff --git a/kernel/capability.c b/kernel/capability.c
71716 index 8a944f5..db5001e 100644
71717 --- a/kernel/capability.c
71718 +++ b/kernel/capability.c
71719 @@ -305,10 +305,26 @@ int capable(int cap)
71720 BUG();
71721 }
71722
71723 - if (security_capable(cap) == 0) {
71724 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
71725 current->flags |= PF_SUPERPRIV;
71726 return 1;
71727 }
71728 return 0;
71729 }
71730 +
71731 +int capable_nolog(int cap)
71732 +{
71733 + if (unlikely(!cap_valid(cap))) {
71734 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
71735 + BUG();
71736 + }
71737 +
71738 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
71739 + current->flags |= PF_SUPERPRIV;
71740 + return 1;
71741 + }
71742 + return 0;
71743 +}
71744 +
71745 EXPORT_SYMBOL(capable);
71746 +EXPORT_SYMBOL(capable_nolog);
71747 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
71748 index 1fbcc74..7000012 100644
71749 --- a/kernel/cgroup.c
71750 +++ b/kernel/cgroup.c
71751 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
71752 struct hlist_head *hhead;
71753 struct cg_cgroup_link *link;
71754
71755 + pax_track_stack();
71756 +
71757 /* First see if we already have a cgroup group that matches
71758 * the desired set */
71759 read_lock(&css_set_lock);
71760 diff --git a/kernel/compat.c b/kernel/compat.c
71761 index 8bc5578..186e44a 100644
71762 --- a/kernel/compat.c
71763 +++ b/kernel/compat.c
71764 @@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
71765 mm_segment_t oldfs;
71766 long ret;
71767
71768 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
71769 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
71770 oldfs = get_fs();
71771 set_fs(KERNEL_DS);
71772 ret = hrtimer_nanosleep_restart(restart);
71773 @@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
71774 oldfs = get_fs();
71775 set_fs(KERNEL_DS);
71776 ret = hrtimer_nanosleep(&tu,
71777 - rmtp ? (struct timespec __user *)&rmt : NULL,
71778 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
71779 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
71780 set_fs(oldfs);
71781
71782 @@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
71783 mm_segment_t old_fs = get_fs();
71784
71785 set_fs(KERNEL_DS);
71786 - ret = sys_sigpending((old_sigset_t __user *) &s);
71787 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
71788 set_fs(old_fs);
71789 if (ret == 0)
71790 ret = put_user(s, set);
71791 @@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
71792 old_fs = get_fs();
71793 set_fs(KERNEL_DS);
71794 ret = sys_sigprocmask(how,
71795 - set ? (old_sigset_t __user *) &s : NULL,
71796 - oset ? (old_sigset_t __user *) &s : NULL);
71797 + set ? (old_sigset_t __force_user *) &s : NULL,
71798 + oset ? (old_sigset_t __force_user *) &s : NULL);
71799 set_fs(old_fs);
71800 if (ret == 0)
71801 if (oset)
71802 @@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
71803 mm_segment_t old_fs = get_fs();
71804
71805 set_fs(KERNEL_DS);
71806 - ret = sys_old_getrlimit(resource, &r);
71807 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
71808 set_fs(old_fs);
71809
71810 if (!ret) {
71811 @@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
71812 mm_segment_t old_fs = get_fs();
71813
71814 set_fs(KERNEL_DS);
71815 - ret = sys_getrusage(who, (struct rusage __user *) &r);
71816 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
71817 set_fs(old_fs);
71818
71819 if (ret)
71820 @@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
71821 set_fs (KERNEL_DS);
71822 ret = sys_wait4(pid,
71823 (stat_addr ?
71824 - (unsigned int __user *) &status : NULL),
71825 - options, (struct rusage __user *) &r);
71826 + (unsigned int __force_user *) &status : NULL),
71827 + options, (struct rusage __force_user *) &r);
71828 set_fs (old_fs);
71829
71830 if (ret > 0) {
71831 @@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
71832 memset(&info, 0, sizeof(info));
71833
71834 set_fs(KERNEL_DS);
71835 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
71836 - uru ? (struct rusage __user *)&ru : NULL);
71837 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
71838 + uru ? (struct rusage __force_user *)&ru : NULL);
71839 set_fs(old_fs);
71840
71841 if ((ret < 0) || (info.si_signo == 0))
71842 @@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
71843 oldfs = get_fs();
71844 set_fs(KERNEL_DS);
71845 err = sys_timer_settime(timer_id, flags,
71846 - (struct itimerspec __user *) &newts,
71847 - (struct itimerspec __user *) &oldts);
71848 + (struct itimerspec __force_user *) &newts,
71849 + (struct itimerspec __force_user *) &oldts);
71850 set_fs(oldfs);
71851 if (!err && old && put_compat_itimerspec(old, &oldts))
71852 return -EFAULT;
71853 @@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
71854 oldfs = get_fs();
71855 set_fs(KERNEL_DS);
71856 err = sys_timer_gettime(timer_id,
71857 - (struct itimerspec __user *) &ts);
71858 + (struct itimerspec __force_user *) &ts);
71859 set_fs(oldfs);
71860 if (!err && put_compat_itimerspec(setting, &ts))
71861 return -EFAULT;
71862 @@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
71863 oldfs = get_fs();
71864 set_fs(KERNEL_DS);
71865 err = sys_clock_settime(which_clock,
71866 - (struct timespec __user *) &ts);
71867 + (struct timespec __force_user *) &ts);
71868 set_fs(oldfs);
71869 return err;
71870 }
71871 @@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
71872 oldfs = get_fs();
71873 set_fs(KERNEL_DS);
71874 err = sys_clock_gettime(which_clock,
71875 - (struct timespec __user *) &ts);
71876 + (struct timespec __force_user *) &ts);
71877 set_fs(oldfs);
71878 if (!err && put_compat_timespec(&ts, tp))
71879 return -EFAULT;
71880 @@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
71881 oldfs = get_fs();
71882 set_fs(KERNEL_DS);
71883 err = sys_clock_getres(which_clock,
71884 - (struct timespec __user *) &ts);
71885 + (struct timespec __force_user *) &ts);
71886 set_fs(oldfs);
71887 if (!err && tp && put_compat_timespec(&ts, tp))
71888 return -EFAULT;
71889 @@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
71890 long err;
71891 mm_segment_t oldfs;
71892 struct timespec tu;
71893 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
71894 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
71895
71896 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
71897 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
71898 oldfs = get_fs();
71899 set_fs(KERNEL_DS);
71900 err = clock_nanosleep_restart(restart);
71901 @@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
71902 oldfs = get_fs();
71903 set_fs(KERNEL_DS);
71904 err = sys_clock_nanosleep(which_clock, flags,
71905 - (struct timespec __user *) &in,
71906 - (struct timespec __user *) &out);
71907 + (struct timespec __force_user *) &in,
71908 + (struct timespec __force_user *) &out);
71909 set_fs(oldfs);
71910
71911 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
71912 diff --git a/kernel/configs.c b/kernel/configs.c
71913 index abaee68..047facd 100644
71914 --- a/kernel/configs.c
71915 +++ b/kernel/configs.c
71916 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
71917 struct proc_dir_entry *entry;
71918
71919 /* create the current config file */
71920 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
71921 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
71922 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
71923 + &ikconfig_file_ops);
71924 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71925 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
71926 + &ikconfig_file_ops);
71927 +#endif
71928 +#else
71929 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
71930 &ikconfig_file_ops);
71931 +#endif
71932 +
71933 if (!entry)
71934 return -ENOMEM;
71935
71936 diff --git a/kernel/cpu.c b/kernel/cpu.c
71937 index 3f2f04f..4e53ded 100644
71938 --- a/kernel/cpu.c
71939 +++ b/kernel/cpu.c
71940 @@ -20,7 +20,7 @@
71941 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
71942 static DEFINE_MUTEX(cpu_add_remove_lock);
71943
71944 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
71945 +static RAW_NOTIFIER_HEAD(cpu_chain);
71946
71947 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
71948 * Should always be manipulated under cpu_add_remove_lock
71949 diff --git a/kernel/cred.c b/kernel/cred.c
71950 index 0b5b5fc..f7fe51a 100644
71951 --- a/kernel/cred.c
71952 +++ b/kernel/cred.c
71953 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
71954 */
71955 void __put_cred(struct cred *cred)
71956 {
71957 + pax_track_stack();
71958 +
71959 kdebug("__put_cred(%p{%d,%d})", cred,
71960 atomic_read(&cred->usage),
71961 read_cred_subscribers(cred));
71962 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
71963 {
71964 struct cred *cred;
71965
71966 + pax_track_stack();
71967 +
71968 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
71969 atomic_read(&tsk->cred->usage),
71970 read_cred_subscribers(tsk->cred));
71971 @@ -206,6 +210,15 @@ void exit_creds(struct task_struct *tsk)
71972 validate_creds(cred);
71973 put_cred(cred);
71974 }
71975 +
71976 +#ifdef CONFIG_GRKERNSEC_SETXID
71977 + cred = (struct cred *) tsk->delayed_cred;
71978 + if (cred) {
71979 + tsk->delayed_cred = NULL;
71980 + validate_creds(cred);
71981 + put_cred(cred);
71982 + }
71983 +#endif
71984 }
71985
71986 /**
71987 @@ -222,6 +235,8 @@ const struct cred *get_task_cred(struct task_struct *task)
71988 {
71989 const struct cred *cred;
71990
71991 + pax_track_stack();
71992 +
71993 rcu_read_lock();
71994
71995 do {
71996 @@ -241,6 +256,8 @@ struct cred *cred_alloc_blank(void)
71997 {
71998 struct cred *new;
71999
72000 + pax_track_stack();
72001 +
72002 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
72003 if (!new)
72004 return NULL;
72005 @@ -289,6 +306,8 @@ struct cred *prepare_creds(void)
72006 const struct cred *old;
72007 struct cred *new;
72008
72009 + pax_track_stack();
72010 +
72011 validate_process_creds();
72012
72013 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
72014 @@ -335,6 +354,8 @@ struct cred *prepare_exec_creds(void)
72015 struct thread_group_cred *tgcred = NULL;
72016 struct cred *new;
72017
72018 + pax_track_stack();
72019 +
72020 #ifdef CONFIG_KEYS
72021 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
72022 if (!tgcred)
72023 @@ -441,6 +462,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
72024 struct cred *new;
72025 int ret;
72026
72027 + pax_track_stack();
72028 +
72029 mutex_init(&p->cred_guard_mutex);
72030
72031 if (
72032 @@ -523,11 +546,13 @@ error_put:
72033 * Always returns 0 thus allowing this function to be tail-called at the end
72034 * of, say, sys_setgid().
72035 */
72036 -int commit_creds(struct cred *new)
72037 +static int __commit_creds(struct cred *new)
72038 {
72039 struct task_struct *task = current;
72040 const struct cred *old = task->real_cred;
72041
72042 + pax_track_stack();
72043 +
72044 kdebug("commit_creds(%p{%d,%d})", new,
72045 atomic_read(&new->usage),
72046 read_cred_subscribers(new));
72047 @@ -544,6 +569,8 @@ int commit_creds(struct cred *new)
72048
72049 get_cred(new); /* we will require a ref for the subj creds too */
72050
72051 + gr_set_role_label(task, new->uid, new->gid);
72052 +
72053 /* dumpability changes */
72054 if (old->euid != new->euid ||
72055 old->egid != new->egid ||
72056 @@ -563,10 +590,8 @@ int commit_creds(struct cred *new)
72057 key_fsgid_changed(task);
72058
72059 /* do it
72060 - * - What if a process setreuid()'s and this brings the
72061 - * new uid over his NPROC rlimit? We can check this now
72062 - * cheaply with the new uid cache, so if it matters
72063 - * we should be checking for it. -DaveM
72064 + * RLIMIT_NPROC limits on user->processes have already been checked
72065 + * in set_user().
72066 */
72067 alter_cred_subscribers(new, 2);
72068 if (new->user != old->user)
72069 @@ -595,8 +620,96 @@ int commit_creds(struct cred *new)
72070 put_cred(old);
72071 return 0;
72072 }
72073 +
72074 +#ifdef CONFIG_GRKERNSEC_SETXID
72075 +extern int set_user(struct cred *new);
72076 +
72077 +void gr_delayed_cred_worker(void)
72078 +{
72079 + const struct cred *new = current->delayed_cred;
72080 + struct cred *ncred;
72081 +
72082 + current->delayed_cred = NULL;
72083 +
72084 + if (current_uid() && new != NULL) {
72085 + // from doing get_cred on it when queueing this
72086 + put_cred(new);
72087 + return;
72088 + } else if (new == NULL)
72089 + return;
72090 +
72091 + ncred = prepare_creds();
72092 + if (!ncred)
72093 + goto die;
72094 + // uids
72095 + ncred->uid = new->uid;
72096 + ncred->euid = new->euid;
72097 + ncred->suid = new->suid;
72098 + ncred->fsuid = new->fsuid;
72099 + // gids
72100 + ncred->gid = new->gid;
72101 + ncred->egid = new->egid;
72102 + ncred->sgid = new->sgid;
72103 + ncred->fsgid = new->fsgid;
72104 + // groups
72105 + if (set_groups(ncred, new->group_info) < 0) {
72106 + abort_creds(ncred);
72107 + goto die;
72108 + }
72109 + // caps
72110 + ncred->securebits = new->securebits;
72111 + ncred->cap_inheritable = new->cap_inheritable;
72112 + ncred->cap_permitted = new->cap_permitted;
72113 + ncred->cap_effective = new->cap_effective;
72114 + ncred->cap_bset = new->cap_bset;
72115 +
72116 + if (set_user(ncred)) {
72117 + abort_creds(ncred);
72118 + goto die;
72119 + }
72120 +
72121 + // from doing get_cred on it when queueing this
72122 + put_cred(new);
72123 +
72124 + __commit_creds(ncred);
72125 + return;
72126 +die:
72127 + // from doing get_cred on it when queueing this
72128 + put_cred(new);
72129 + do_group_exit(SIGKILL);
72130 +}
72131 +#endif
72132 +
72133 +int commit_creds(struct cred *new)
72134 +{
72135 +#ifdef CONFIG_GRKERNSEC_SETXID
72136 + struct task_struct *t;
72137 +
72138 + /* we won't get called with tasklist_lock held for writing
72139 + and interrupts disabled as the cred struct in that case is
72140 + init_cred
72141 + */
72142 + if (grsec_enable_setxid && !current_is_single_threaded() &&
72143 + !current_uid() && new->uid) {
72144 + rcu_read_lock();
72145 + read_lock(&tasklist_lock);
72146 + for (t = next_thread(current); t != current;
72147 + t = next_thread(t)) {
72148 + if (t->delayed_cred == NULL) {
72149 + t->delayed_cred = get_cred(new);
72150 + set_tsk_need_resched(t);
72151 + }
72152 + }
72153 + read_unlock(&tasklist_lock);
72154 + rcu_read_unlock();
72155 + }
72156 +#endif
72157 + return __commit_creds(new);
72158 +}
72159 +
72160 EXPORT_SYMBOL(commit_creds);
72161
72162 +
72163 /**
72164 * abort_creds - Discard a set of credentials and unlock the current task
72165 * @new: The credentials that were going to be applied
72166 @@ -606,6 +719,8 @@ EXPORT_SYMBOL(commit_creds);
72167 */
72168 void abort_creds(struct cred *new)
72169 {
72170 + pax_track_stack();
72171 +
72172 kdebug("abort_creds(%p{%d,%d})", new,
72173 atomic_read(&new->usage),
72174 read_cred_subscribers(new));
72175 @@ -629,6 +744,8 @@ const struct cred *override_creds(const struct cred *new)
72176 {
72177 const struct cred *old = current->cred;
72178
72179 + pax_track_stack();
72180 +
72181 kdebug("override_creds(%p{%d,%d})", new,
72182 atomic_read(&new->usage),
72183 read_cred_subscribers(new));
72184 @@ -658,6 +775,8 @@ void revert_creds(const struct cred *old)
72185 {
72186 const struct cred *override = current->cred;
72187
72188 + pax_track_stack();
72189 +
72190 kdebug("revert_creds(%p{%d,%d})", old,
72191 atomic_read(&old->usage),
72192 read_cred_subscribers(old));
72193 @@ -704,6 +823,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
72194 const struct cred *old;
72195 struct cred *new;
72196
72197 + pax_track_stack();
72198 +
72199 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
72200 if (!new)
72201 return NULL;
72202 @@ -758,6 +879,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
72203 */
72204 int set_security_override(struct cred *new, u32 secid)
72205 {
72206 + pax_track_stack();
72207 +
72208 return security_kernel_act_as(new, secid);
72209 }
72210 EXPORT_SYMBOL(set_security_override);
72211 @@ -777,6 +900,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
72212 u32 secid;
72213 int ret;
72214
72215 + pax_track_stack();
72216 +
72217 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
72218 if (ret < 0)
72219 return ret;
72220 diff --git a/kernel/exit.c b/kernel/exit.c
72221 index 0f8fae3..9344a56 100644
72222 --- a/kernel/exit.c
72223 +++ b/kernel/exit.c
72224 @@ -55,6 +55,10 @@
72225 #include <asm/pgtable.h>
72226 #include <asm/mmu_context.h>
72227
72228 +#ifdef CONFIG_GRKERNSEC
72229 +extern rwlock_t grsec_exec_file_lock;
72230 +#endif
72231 +
72232 static void exit_mm(struct task_struct * tsk);
72233
72234 static void __unhash_process(struct task_struct *p)
72235 @@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
72236 struct task_struct *leader;
72237 int zap_leader;
72238 repeat:
72239 +#ifdef CONFIG_NET
72240 + gr_del_task_from_ip_table(p);
72241 +#endif
72242 +
72243 tracehook_prepare_release_task(p);
72244 /* don't need to get the RCU readlock here - the process is dead and
72245 * can't be modifying its own credentials */
72246 @@ -397,7 +405,7 @@ int allow_signal(int sig)
72247 * know it'll be handled, so that they don't get converted to
72248 * SIGKILL or just silently dropped.
72249 */
72250 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
72251 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
72252 recalc_sigpending();
72253 spin_unlock_irq(&current->sighand->siglock);
72254 return 0;
72255 @@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
72256 vsnprintf(current->comm, sizeof(current->comm), name, args);
72257 va_end(args);
72258
72259 +#ifdef CONFIG_GRKERNSEC
72260 + write_lock(&grsec_exec_file_lock);
72261 + if (current->exec_file) {
72262 + fput(current->exec_file);
72263 + current->exec_file = NULL;
72264 + }
72265 + write_unlock(&grsec_exec_file_lock);
72266 +#endif
72267 +
72268 + gr_set_kernel_label(current);
72269 +
72270 /*
72271 * If we were started as result of loading a module, close all of the
72272 * user space pages. We don't need them, and if we didn't close them
72273 @@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
72274 struct task_struct *tsk = current;
72275 int group_dead;
72276
72277 - profile_task_exit(tsk);
72278 -
72279 - WARN_ON(atomic_read(&tsk->fs_excl));
72280 -
72281 + /*
72282 + * Check this first since set_fs() below depends on
72283 + * current_thread_info(), which we better not access when we're in
72284 + * interrupt context. Other than that, we want to do the set_fs()
72285 + * as early as possible.
72286 + */
72287 if (unlikely(in_interrupt()))
72288 panic("Aiee, killing interrupt handler!");
72289 - if (unlikely(!tsk->pid))
72290 - panic("Attempted to kill the idle task!");
72291
72292 /*
72293 - * If do_exit is called because this processes oopsed, it's possible
72294 + * If do_exit is called because this processes Oops'ed, it's possible
72295 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
72296 * continuing. Amongst other possible reasons, this is to prevent
72297 * mm_release()->clear_child_tid() from writing to a user-controlled
72298 @@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
72299 */
72300 set_fs(USER_DS);
72301
72302 + profile_task_exit(tsk);
72303 +
72304 + WARN_ON(atomic_read(&tsk->fs_excl));
72305 +
72306 + if (unlikely(!tsk->pid))
72307 + panic("Attempted to kill the idle task!");
72308 +
72309 tracehook_report_exit(&code);
72310
72311 validate_creds_for_do_exit(tsk);
72312 @@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
72313 tsk->exit_code = code;
72314 taskstats_exit(tsk, group_dead);
72315
72316 + gr_acl_handle_psacct(tsk, code);
72317 + gr_acl_handle_exit();
72318 +
72319 exit_mm(tsk);
72320
72321 if (group_dead)
72322 @@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
72323
72324 if (unlikely(wo->wo_flags & WNOWAIT)) {
72325 int exit_code = p->exit_code;
72326 - int why, status;
72327 + int why;
72328
72329 get_task_struct(p);
72330 read_unlock(&tasklist_lock);
72331 diff --git a/kernel/fork.c b/kernel/fork.c
72332 index 4bde56f..29a9bab 100644
72333 --- a/kernel/fork.c
72334 +++ b/kernel/fork.c
72335 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
72336 *stackend = STACK_END_MAGIC; /* for overflow detection */
72337
72338 #ifdef CONFIG_CC_STACKPROTECTOR
72339 - tsk->stack_canary = get_random_int();
72340 + tsk->stack_canary = pax_get_random_long();
72341 #endif
72342
72343 /* One for us, one for whoever does the "release_task()" (usually parent) */
72344 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72345 mm->locked_vm = 0;
72346 mm->mmap = NULL;
72347 mm->mmap_cache = NULL;
72348 - mm->free_area_cache = oldmm->mmap_base;
72349 - mm->cached_hole_size = ~0UL;
72350 + mm->free_area_cache = oldmm->free_area_cache;
72351 + mm->cached_hole_size = oldmm->cached_hole_size;
72352 mm->map_count = 0;
72353 cpumask_clear(mm_cpumask(mm));
72354 mm->mm_rb = RB_ROOT;
72355 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72356 tmp->vm_flags &= ~VM_LOCKED;
72357 tmp->vm_mm = mm;
72358 tmp->vm_next = tmp->vm_prev = NULL;
72359 + tmp->vm_mirror = NULL;
72360 anon_vma_link(tmp);
72361 file = tmp->vm_file;
72362 if (file) {
72363 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72364 if (retval)
72365 goto out;
72366 }
72367 +
72368 +#ifdef CONFIG_PAX_SEGMEXEC
72369 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
72370 + struct vm_area_struct *mpnt_m;
72371 +
72372 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
72373 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
72374 +
72375 + if (!mpnt->vm_mirror)
72376 + continue;
72377 +
72378 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
72379 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
72380 + mpnt->vm_mirror = mpnt_m;
72381 + } else {
72382 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
72383 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
72384 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
72385 + mpnt->vm_mirror->vm_mirror = mpnt;
72386 + }
72387 + }
72388 + BUG_ON(mpnt_m);
72389 + }
72390 +#endif
72391 +
72392 /* a new mm has just been created */
72393 arch_dup_mmap(oldmm, mm);
72394 retval = 0;
72395 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
72396 write_unlock(&fs->lock);
72397 return -EAGAIN;
72398 }
72399 - fs->users++;
72400 + atomic_inc(&fs->users);
72401 write_unlock(&fs->lock);
72402 return 0;
72403 }
72404 tsk->fs = copy_fs_struct(fs);
72405 if (!tsk->fs)
72406 return -ENOMEM;
72407 + gr_set_chroot_entries(tsk, &tsk->fs->root);
72408 return 0;
72409 }
72410
72411 @@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
72412 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
72413 #endif
72414 retval = -EAGAIN;
72415 +
72416 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
72417 +
72418 if (atomic_read(&p->real_cred->user->processes) >=
72419 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
72420 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
72421 - p->real_cred->user != INIT_USER)
72422 + if (p->real_cred->user != INIT_USER &&
72423 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
72424 goto bad_fork_free;
72425 }
72426 + current->flags &= ~PF_NPROC_EXCEEDED;
72427
72428 retval = copy_creds(p, clone_flags);
72429 if (retval < 0)
72430 @@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
72431 goto bad_fork_free_pid;
72432 }
72433
72434 + gr_copy_label(p);
72435 +
72436 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
72437 /*
72438 * Clear TID on mm_release()?
72439 @@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
72440 bad_fork_free:
72441 free_task(p);
72442 fork_out:
72443 + gr_log_forkfail(retval);
72444 +
72445 return ERR_PTR(retval);
72446 }
72447
72448 @@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
72449 if (clone_flags & CLONE_PARENT_SETTID)
72450 put_user(nr, parent_tidptr);
72451
72452 + gr_handle_brute_check();
72453 +
72454 if (clone_flags & CLONE_VFORK) {
72455 p->vfork_done = &vfork;
72456 init_completion(&vfork);
72457 @@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
72458 return 0;
72459
72460 /* don't need lock here; in the worst case we'll do useless copy */
72461 - if (fs->users == 1)
72462 + if (atomic_read(&fs->users) == 1)
72463 return 0;
72464
72465 *new_fsp = copy_fs_struct(fs);
72466 @@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
72467 fs = current->fs;
72468 write_lock(&fs->lock);
72469 current->fs = new_fs;
72470 - if (--fs->users)
72471 + gr_set_chroot_entries(current, &current->fs->root);
72472 + if (atomic_dec_return(&fs->users))
72473 new_fs = NULL;
72474 else
72475 new_fs = fs;
72476 diff --git a/kernel/futex.c b/kernel/futex.c
72477 index fb98c9f..333faec 100644
72478 --- a/kernel/futex.c
72479 +++ b/kernel/futex.c
72480 @@ -54,6 +54,7 @@
72481 #include <linux/mount.h>
72482 #include <linux/pagemap.h>
72483 #include <linux/syscalls.h>
72484 +#include <linux/ptrace.h>
72485 #include <linux/signal.h>
72486 #include <linux/module.h>
72487 #include <linux/magic.h>
72488 @@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
72489 struct page *page;
72490 int err, ro = 0;
72491
72492 +#ifdef CONFIG_PAX_SEGMEXEC
72493 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
72494 + return -EFAULT;
72495 +#endif
72496 +
72497 /*
72498 * The futex address must be "naturally" aligned.
72499 */
72500 @@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
72501 struct futex_q q;
72502 int ret;
72503
72504 + pax_track_stack();
72505 +
72506 if (!bitset)
72507 return -EINVAL;
72508
72509 @@ -1871,7 +1879,7 @@ retry:
72510
72511 restart = &current_thread_info()->restart_block;
72512 restart->fn = futex_wait_restart;
72513 - restart->futex.uaddr = (u32 *)uaddr;
72514 + restart->futex.uaddr = uaddr;
72515 restart->futex.val = val;
72516 restart->futex.time = abs_time->tv64;
72517 restart->futex.bitset = bitset;
72518 @@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
72519 struct futex_q q;
72520 int res, ret;
72521
72522 + pax_track_stack();
72523 +
72524 if (!bitset)
72525 return -EINVAL;
72526
72527 @@ -2423,6 +2433,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
72528 if (!p)
72529 goto err_unlock;
72530 ret = -EPERM;
72531 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72532 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
72533 + goto err_unlock;
72534 +#endif
72535 pcred = __task_cred(p);
72536 if (cred->euid != pcred->euid &&
72537 cred->euid != pcred->uid &&
72538 @@ -2489,7 +2503,7 @@ retry:
72539 */
72540 static inline int fetch_robust_entry(struct robust_list __user **entry,
72541 struct robust_list __user * __user *head,
72542 - int *pi)
72543 + unsigned int *pi)
72544 {
72545 unsigned long uentry;
72546
72547 @@ -2670,6 +2684,7 @@ static int __init futex_init(void)
72548 {
72549 u32 curval;
72550 int i;
72551 + mm_segment_t oldfs;
72552
72553 /*
72554 * This will fail and we want it. Some arch implementations do
72555 @@ -2681,7 +2696,10 @@ static int __init futex_init(void)
72556 * implementation, the non functional ones will return
72557 * -ENOSYS.
72558 */
72559 + oldfs = get_fs();
72560 + set_fs(USER_DS);
72561 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
72562 + set_fs(oldfs);
72563 if (curval == -EFAULT)
72564 futex_cmpxchg_enabled = 1;
72565
72566 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
72567 index 2357165..eb25501 100644
72568 --- a/kernel/futex_compat.c
72569 +++ b/kernel/futex_compat.c
72570 @@ -10,6 +10,7 @@
72571 #include <linux/compat.h>
72572 #include <linux/nsproxy.h>
72573 #include <linux/futex.h>
72574 +#include <linux/ptrace.h>
72575
72576 #include <asm/uaccess.h>
72577
72578 @@ -135,7 +136,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72579 {
72580 struct compat_robust_list_head __user *head;
72581 unsigned long ret;
72582 - const struct cred *cred = current_cred(), *pcred;
72583 + const struct cred *cred = current_cred();
72584 + const struct cred *pcred;
72585
72586 if (!futex_cmpxchg_enabled)
72587 return -ENOSYS;
72588 @@ -151,6 +153,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72589 if (!p)
72590 goto err_unlock;
72591 ret = -EPERM;
72592 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72593 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
72594 + goto err_unlock;
72595 +#endif
72596 pcred = __task_cred(p);
72597 if (cred->euid != pcred->euid &&
72598 cred->euid != pcred->uid &&
72599 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
72600 index 9b22d03..6295b62 100644
72601 --- a/kernel/gcov/base.c
72602 +++ b/kernel/gcov/base.c
72603 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
72604 }
72605
72606 #ifdef CONFIG_MODULES
72607 -static inline int within(void *addr, void *start, unsigned long size)
72608 -{
72609 - return ((addr >= start) && (addr < start + size));
72610 -}
72611 -
72612 /* Update list and generate events when modules are unloaded. */
72613 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72614 void *data)
72615 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72616 prev = NULL;
72617 /* Remove entries located in module from linked list. */
72618 for (info = gcov_info_head; info; info = info->next) {
72619 - if (within(info, mod->module_core, mod->core_size)) {
72620 + if (within_module_core_rw((unsigned long)info, mod)) {
72621 if (prev)
72622 prev->next = info->next;
72623 else
72624 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
72625 index a6e9d00..a0da4f9 100644
72626 --- a/kernel/hrtimer.c
72627 +++ b/kernel/hrtimer.c
72628 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
72629 local_irq_restore(flags);
72630 }
72631
72632 -static void run_hrtimer_softirq(struct softirq_action *h)
72633 +static void run_hrtimer_softirq(void)
72634 {
72635 hrtimer_peek_ahead_timers();
72636 }
72637 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
72638 index 8b6b8b6..6bc87df 100644
72639 --- a/kernel/kallsyms.c
72640 +++ b/kernel/kallsyms.c
72641 @@ -11,6 +11,9 @@
72642 * Changed the compression method from stem compression to "table lookup"
72643 * compression (see scripts/kallsyms.c for a more complete description)
72644 */
72645 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72646 +#define __INCLUDED_BY_HIDESYM 1
72647 +#endif
72648 #include <linux/kallsyms.h>
72649 #include <linux/module.h>
72650 #include <linux/init.h>
72651 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
72652
72653 static inline int is_kernel_inittext(unsigned long addr)
72654 {
72655 + if (system_state != SYSTEM_BOOTING)
72656 + return 0;
72657 +
72658 if (addr >= (unsigned long)_sinittext
72659 && addr <= (unsigned long)_einittext)
72660 return 1;
72661 return 0;
72662 }
72663
72664 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72665 +#ifdef CONFIG_MODULES
72666 +static inline int is_module_text(unsigned long addr)
72667 +{
72668 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
72669 + return 1;
72670 +
72671 + addr = ktla_ktva(addr);
72672 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
72673 +}
72674 +#else
72675 +static inline int is_module_text(unsigned long addr)
72676 +{
72677 + return 0;
72678 +}
72679 +#endif
72680 +#endif
72681 +
72682 static inline int is_kernel_text(unsigned long addr)
72683 {
72684 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
72685 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
72686
72687 static inline int is_kernel(unsigned long addr)
72688 {
72689 +
72690 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72691 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
72692 + return 1;
72693 +
72694 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
72695 +#else
72696 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
72697 +#endif
72698 +
72699 return 1;
72700 return in_gate_area_no_task(addr);
72701 }
72702
72703 static int is_ksym_addr(unsigned long addr)
72704 {
72705 +
72706 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72707 + if (is_module_text(addr))
72708 + return 0;
72709 +#endif
72710 +
72711 if (all_var)
72712 return is_kernel(addr);
72713
72714 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
72715
72716 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
72717 {
72718 - iter->name[0] = '\0';
72719 iter->nameoff = get_symbol_offset(new_pos);
72720 iter->pos = new_pos;
72721 }
72722 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
72723 {
72724 struct kallsym_iter *iter = m->private;
72725
72726 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72727 + if (current_uid())
72728 + return 0;
72729 +#endif
72730 +
72731 /* Some debugging symbols have no name. Ignore them. */
72732 if (!iter->name[0])
72733 return 0;
72734 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
72735 struct kallsym_iter *iter;
72736 int ret;
72737
72738 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
72739 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
72740 if (!iter)
72741 return -ENOMEM;
72742 reset_iter(iter, 0);
72743 diff --git a/kernel/kexec.c b/kernel/kexec.c
72744 index f336e21..9c1c20b 100644
72745 --- a/kernel/kexec.c
72746 +++ b/kernel/kexec.c
72747 @@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
72748 unsigned long flags)
72749 {
72750 struct compat_kexec_segment in;
72751 - struct kexec_segment out, __user *ksegments;
72752 + struct kexec_segment out;
72753 + struct kexec_segment __user *ksegments;
72754 unsigned long i, result;
72755
72756 /* Don't allow clients that don't understand the native
72757 diff --git a/kernel/kgdb.c b/kernel/kgdb.c
72758 index 53dae4b..9ba3743 100644
72759 --- a/kernel/kgdb.c
72760 +++ b/kernel/kgdb.c
72761 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
72762 /* Guard for recursive entry */
72763 static int exception_level;
72764
72765 -static struct kgdb_io *kgdb_io_ops;
72766 +static const struct kgdb_io *kgdb_io_ops;
72767 static DEFINE_SPINLOCK(kgdb_registration_lock);
72768
72769 /* kgdb console driver is loaded */
72770 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
72771 */
72772 static atomic_t passive_cpu_wait[NR_CPUS];
72773 static atomic_t cpu_in_kgdb[NR_CPUS];
72774 -atomic_t kgdb_setting_breakpoint;
72775 +atomic_unchecked_t kgdb_setting_breakpoint;
72776
72777 struct task_struct *kgdb_usethread;
72778 struct task_struct *kgdb_contthread;
72779 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
72780 sizeof(unsigned long)];
72781
72782 /* to keep track of the CPU which is doing the single stepping*/
72783 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
72784 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
72785
72786 /*
72787 * If you are debugging a problem where roundup (the collection of
72788 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
72789 return 0;
72790 if (kgdb_connected)
72791 return 1;
72792 - if (atomic_read(&kgdb_setting_breakpoint))
72793 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
72794 return 1;
72795 if (print_wait)
72796 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
72797 @@ -1426,8 +1426,8 @@ acquirelock:
72798 * instance of the exception handler wanted to come into the
72799 * debugger on a different CPU via a single step
72800 */
72801 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
72802 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
72803 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
72804 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
72805
72806 atomic_set(&kgdb_active, -1);
72807 touch_softlockup_watchdog();
72808 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
72809 *
72810 * Register it with the KGDB core.
72811 */
72812 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
72813 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
72814 {
72815 int err;
72816
72817 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
72818 *
72819 * Unregister it with the KGDB core.
72820 */
72821 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
72822 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
72823 {
72824 BUG_ON(kgdb_connected);
72825
72826 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
72827 */
72828 void kgdb_breakpoint(void)
72829 {
72830 - atomic_set(&kgdb_setting_breakpoint, 1);
72831 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
72832 wmb(); /* Sync point before breakpoint */
72833 arch_kgdb_breakpoint();
72834 wmb(); /* Sync point after breakpoint */
72835 - atomic_set(&kgdb_setting_breakpoint, 0);
72836 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
72837 }
72838 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
72839
72840 diff --git a/kernel/kmod.c b/kernel/kmod.c
72841 index d206078..e27ba6a 100644
72842 --- a/kernel/kmod.c
72843 +++ b/kernel/kmod.c
72844 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
72845 * If module auto-loading support is disabled then this function
72846 * becomes a no-operation.
72847 */
72848 -int __request_module(bool wait, const char *fmt, ...)
72849 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
72850 {
72851 - va_list args;
72852 char module_name[MODULE_NAME_LEN];
72853 unsigned int max_modprobes;
72854 int ret;
72855 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
72856 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
72857 static char *envp[] = { "HOME=/",
72858 "TERM=linux",
72859 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
72860 @@ -84,12 +83,24 @@ int __request_module(bool wait, const char *fmt, ...)
72861 if (ret)
72862 return ret;
72863
72864 - va_start(args, fmt);
72865 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
72866 - va_end(args);
72867 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
72868 if (ret >= MODULE_NAME_LEN)
72869 return -ENAMETOOLONG;
72870
72871 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72872 + if (!current_uid()) {
72873 + /* hack to workaround consolekit/udisks stupidity */
72874 + read_lock(&tasklist_lock);
72875 + if (!strcmp(current->comm, "mount") &&
72876 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
72877 + read_unlock(&tasklist_lock);
72878 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
72879 + return -EPERM;
72880 + }
72881 + read_unlock(&tasklist_lock);
72882 + }
72883 +#endif
72884 +
72885 /* If modprobe needs a service that is in a module, we get a recursive
72886 * loop. Limit the number of running kmod threads to max_threads/2 or
72887 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
72888 @@ -123,6 +134,48 @@ int __request_module(bool wait, const char *fmt, ...)
72889 atomic_dec(&kmod_concurrent);
72890 return ret;
72891 }
72892 +
72893 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
72894 +{
72895 + va_list args;
72896 + int ret;
72897 +
72898 + va_start(args, fmt);
72899 + ret = ____request_module(wait, module_param, fmt, args);
72900 + va_end(args);
72901 +
72902 + return ret;
72903 +}
72904 +
72905 +int __request_module(bool wait, const char *fmt, ...)
72906 +{
72907 + va_list args;
72908 + int ret;
72909 +
72910 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72911 + if (current_uid()) {
72912 + char module_param[MODULE_NAME_LEN];
72913 +
72914 + memset(module_param, 0, sizeof(module_param));
72915 +
72916 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
72917 +
72918 + va_start(args, fmt);
72919 + ret = ____request_module(wait, module_param, fmt, args);
72920 + va_end(args);
72921 +
72922 + return ret;
72923 + }
72924 +#endif
72925 +
72926 + va_start(args, fmt);
72927 + ret = ____request_module(wait, NULL, fmt, args);
72928 + va_end(args);
72929 +
72930 + return ret;
72931 +}
72932 +
72933 +
72934 EXPORT_SYMBOL(__request_module);
72935 #endif /* CONFIG_MODULES */
72936
72937 @@ -228,7 +281,7 @@ static int wait_for_helper(void *data)
72938 *
72939 * Thus the __user pointer cast is valid here.
72940 */
72941 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
72942 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
72943
72944 /*
72945 * If ret is 0, either ____call_usermodehelper failed and the
72946 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
72947 index 176d825..77fa8ea 100644
72948 --- a/kernel/kprobes.c
72949 +++ b/kernel/kprobes.c
72950 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
72951 * kernel image and loaded module images reside. This is required
72952 * so x86_64 can correctly handle the %rip-relative fixups.
72953 */
72954 - kip->insns = module_alloc(PAGE_SIZE);
72955 + kip->insns = module_alloc_exec(PAGE_SIZE);
72956 if (!kip->insns) {
72957 kfree(kip);
72958 return NULL;
72959 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
72960 */
72961 if (!list_is_singular(&kprobe_insn_pages)) {
72962 list_del(&kip->list);
72963 - module_free(NULL, kip->insns);
72964 + module_free_exec(NULL, kip->insns);
72965 kfree(kip);
72966 }
72967 return 1;
72968 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
72969 {
72970 int i, err = 0;
72971 unsigned long offset = 0, size = 0;
72972 - char *modname, namebuf[128];
72973 + char *modname, namebuf[KSYM_NAME_LEN];
72974 const char *symbol_name;
72975 void *addr;
72976 struct kprobe_blackpoint *kb;
72977 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
72978 const char *sym = NULL;
72979 unsigned int i = *(loff_t *) v;
72980 unsigned long offset = 0;
72981 - char *modname, namebuf[128];
72982 + char *modname, namebuf[KSYM_NAME_LEN];
72983
72984 head = &kprobe_table[i];
72985 preempt_disable();
72986 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
72987 index d86fe89..d12fc66 100644
72988 --- a/kernel/lockdep.c
72989 +++ b/kernel/lockdep.c
72990 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
72991 /*
72992 * Various lockdep statistics:
72993 */
72994 -atomic_t chain_lookup_hits;
72995 -atomic_t chain_lookup_misses;
72996 -atomic_t hardirqs_on_events;
72997 -atomic_t hardirqs_off_events;
72998 -atomic_t redundant_hardirqs_on;
72999 -atomic_t redundant_hardirqs_off;
73000 -atomic_t softirqs_on_events;
73001 -atomic_t softirqs_off_events;
73002 -atomic_t redundant_softirqs_on;
73003 -atomic_t redundant_softirqs_off;
73004 -atomic_t nr_unused_locks;
73005 -atomic_t nr_cyclic_checks;
73006 -atomic_t nr_find_usage_forwards_checks;
73007 -atomic_t nr_find_usage_backwards_checks;
73008 +atomic_unchecked_t chain_lookup_hits;
73009 +atomic_unchecked_t chain_lookup_misses;
73010 +atomic_unchecked_t hardirqs_on_events;
73011 +atomic_unchecked_t hardirqs_off_events;
73012 +atomic_unchecked_t redundant_hardirqs_on;
73013 +atomic_unchecked_t redundant_hardirqs_off;
73014 +atomic_unchecked_t softirqs_on_events;
73015 +atomic_unchecked_t softirqs_off_events;
73016 +atomic_unchecked_t redundant_softirqs_on;
73017 +atomic_unchecked_t redundant_softirqs_off;
73018 +atomic_unchecked_t nr_unused_locks;
73019 +atomic_unchecked_t nr_cyclic_checks;
73020 +atomic_unchecked_t nr_find_usage_forwards_checks;
73021 +atomic_unchecked_t nr_find_usage_backwards_checks;
73022 #endif
73023
73024 /*
73025 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
73026 int i;
73027 #endif
73028
73029 +#ifdef CONFIG_PAX_KERNEXEC
73030 + start = ktla_ktva(start);
73031 +#endif
73032 +
73033 /*
73034 * static variable?
73035 */
73036 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
73037 */
73038 for_each_possible_cpu(i) {
73039 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
73040 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
73041 - + per_cpu_offset(i);
73042 + end = start + PERCPU_ENOUGH_ROOM;
73043
73044 if ((addr >= start) && (addr < end))
73045 return 1;
73046 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
73047 if (!static_obj(lock->key)) {
73048 debug_locks_off();
73049 printk("INFO: trying to register non-static key.\n");
73050 + printk("lock:%pS key:%pS.\n", lock, lock->key);
73051 printk("the code is fine but needs lockdep annotation.\n");
73052 printk("turning off the locking correctness validator.\n");
73053 dump_stack();
73054 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
73055 if (!class)
73056 return 0;
73057 }
73058 - debug_atomic_inc((atomic_t *)&class->ops);
73059 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
73060 if (very_verbose(class)) {
73061 printk("\nacquire class [%p] %s", class->key, class->name);
73062 if (class->name_version > 1)
73063 diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
73064 index a2ee95a..092f0f2 100644
73065 --- a/kernel/lockdep_internals.h
73066 +++ b/kernel/lockdep_internals.h
73067 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
73068 /*
73069 * Various lockdep statistics:
73070 */
73071 -extern atomic_t chain_lookup_hits;
73072 -extern atomic_t chain_lookup_misses;
73073 -extern atomic_t hardirqs_on_events;
73074 -extern atomic_t hardirqs_off_events;
73075 -extern atomic_t redundant_hardirqs_on;
73076 -extern atomic_t redundant_hardirqs_off;
73077 -extern atomic_t softirqs_on_events;
73078 -extern atomic_t softirqs_off_events;
73079 -extern atomic_t redundant_softirqs_on;
73080 -extern atomic_t redundant_softirqs_off;
73081 -extern atomic_t nr_unused_locks;
73082 -extern atomic_t nr_cyclic_checks;
73083 -extern atomic_t nr_cyclic_check_recursions;
73084 -extern atomic_t nr_find_usage_forwards_checks;
73085 -extern atomic_t nr_find_usage_forwards_recursions;
73086 -extern atomic_t nr_find_usage_backwards_checks;
73087 -extern atomic_t nr_find_usage_backwards_recursions;
73088 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
73089 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
73090 -# define debug_atomic_read(ptr) atomic_read(ptr)
73091 +extern atomic_unchecked_t chain_lookup_hits;
73092 +extern atomic_unchecked_t chain_lookup_misses;
73093 +extern atomic_unchecked_t hardirqs_on_events;
73094 +extern atomic_unchecked_t hardirqs_off_events;
73095 +extern atomic_unchecked_t redundant_hardirqs_on;
73096 +extern atomic_unchecked_t redundant_hardirqs_off;
73097 +extern atomic_unchecked_t softirqs_on_events;
73098 +extern atomic_unchecked_t softirqs_off_events;
73099 +extern atomic_unchecked_t redundant_softirqs_on;
73100 +extern atomic_unchecked_t redundant_softirqs_off;
73101 +extern atomic_unchecked_t nr_unused_locks;
73102 +extern atomic_unchecked_t nr_cyclic_checks;
73103 +extern atomic_unchecked_t nr_cyclic_check_recursions;
73104 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
73105 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
73106 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
73107 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
73108 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
73109 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
73110 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
73111 #else
73112 # define debug_atomic_inc(ptr) do { } while (0)
73113 # define debug_atomic_dec(ptr) do { } while (0)
73114 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
73115 index d4aba4f..02a353f 100644
73116 --- a/kernel/lockdep_proc.c
73117 +++ b/kernel/lockdep_proc.c
73118 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
73119
73120 static void print_name(struct seq_file *m, struct lock_class *class)
73121 {
73122 - char str[128];
73123 + char str[KSYM_NAME_LEN];
73124 const char *name = class->name;
73125
73126 if (!name) {
73127 diff --git a/kernel/module.c b/kernel/module.c
73128 index 4b270e6..2226274 100644
73129 --- a/kernel/module.c
73130 +++ b/kernel/module.c
73131 @@ -55,6 +55,7 @@
73132 #include <linux/async.h>
73133 #include <linux/percpu.h>
73134 #include <linux/kmemleak.h>
73135 +#include <linux/grsecurity.h>
73136
73137 #define CREATE_TRACE_POINTS
73138 #include <trace/events/module.h>
73139 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
73140 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
73141
73142 /* Bounds of module allocation, for speeding __module_address */
73143 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
73144 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
73145 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
73146
73147 int register_module_notifier(struct notifier_block * nb)
73148 {
73149 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
73150 return true;
73151
73152 list_for_each_entry_rcu(mod, &modules, list) {
73153 - struct symsearch arr[] = {
73154 + struct symsearch modarr[] = {
73155 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
73156 NOT_GPL_ONLY, false },
73157 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
73158 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
73159 #endif
73160 };
73161
73162 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
73163 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
73164 return true;
73165 }
73166 return false;
73167 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
73168 void *ptr;
73169 int cpu;
73170
73171 - if (align > PAGE_SIZE) {
73172 + if (align-1 >= PAGE_SIZE) {
73173 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
73174 name, align, PAGE_SIZE);
73175 align = PAGE_SIZE;
73176 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
73177 * /sys/module/foo/sections stuff
73178 * J. Corbet <corbet@lwn.net>
73179 */
73180 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
73181 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
73182
73183 static inline bool sect_empty(const Elf_Shdr *sect)
73184 {
73185 @@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
73186 destroy_params(mod->kp, mod->num_kp);
73187
73188 /* This may be NULL, but that's OK */
73189 - module_free(mod, mod->module_init);
73190 + module_free(mod, mod->module_init_rw);
73191 + module_free_exec(mod, mod->module_init_rx);
73192 kfree(mod->args);
73193 if (mod->percpu)
73194 percpu_modfree(mod->percpu);
73195 @@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
73196 percpu_modfree(mod->refptr);
73197 #endif
73198 /* Free lock-classes: */
73199 - lockdep_free_key_range(mod->module_core, mod->core_size);
73200 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
73201 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
73202
73203 /* Finally, free the core (containing the module structure) */
73204 - module_free(mod, mod->module_core);
73205 + module_free_exec(mod, mod->module_core_rx);
73206 + module_free(mod, mod->module_core_rw);
73207
73208 #ifdef CONFIG_MPU
73209 update_protections(current->mm);
73210 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
73211 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
73212 int ret = 0;
73213 const struct kernel_symbol *ksym;
73214 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73215 + int is_fs_load = 0;
73216 + int register_filesystem_found = 0;
73217 + char *p;
73218 +
73219 + p = strstr(mod->args, "grsec_modharden_fs");
73220 +
73221 + if (p) {
73222 + char *endptr = p + strlen("grsec_modharden_fs");
73223 + /* copy \0 as well */
73224 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
73225 + is_fs_load = 1;
73226 + }
73227 +#endif
73228 +
73229
73230 for (i = 1; i < n; i++) {
73231 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73232 + const char *name = strtab + sym[i].st_name;
73233 +
73234 + /* it's a real shame this will never get ripped and copied
73235 + upstream! ;(
73236 + */
73237 + if (is_fs_load && !strcmp(name, "register_filesystem"))
73238 + register_filesystem_found = 1;
73239 +#endif
73240 switch (sym[i].st_shndx) {
73241 case SHN_COMMON:
73242 /* We compiled with -fno-common. These are not
73243 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
73244 strtab + sym[i].st_name, mod);
73245 /* Ok if resolved. */
73246 if (ksym) {
73247 + pax_open_kernel();
73248 sym[i].st_value = ksym->value;
73249 + pax_close_kernel();
73250 break;
73251 }
73252
73253 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
73254 secbase = (unsigned long)mod->percpu;
73255 else
73256 secbase = sechdrs[sym[i].st_shndx].sh_addr;
73257 + pax_open_kernel();
73258 sym[i].st_value += secbase;
73259 + pax_close_kernel();
73260 break;
73261 }
73262 }
73263
73264 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73265 + if (is_fs_load && !register_filesystem_found) {
73266 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
73267 + ret = -EPERM;
73268 + }
73269 +#endif
73270 +
73271 return ret;
73272 }
73273
73274 @@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
73275 || s->sh_entsize != ~0UL
73276 || strstarts(secstrings + s->sh_name, ".init"))
73277 continue;
73278 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
73279 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
73280 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
73281 + else
73282 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
73283 DEBUGP("\t%s\n", secstrings + s->sh_name);
73284 }
73285 - if (m == 0)
73286 - mod->core_text_size = mod->core_size;
73287 }
73288
73289 DEBUGP("Init section allocation order:\n");
73290 @@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
73291 || s->sh_entsize != ~0UL
73292 || !strstarts(secstrings + s->sh_name, ".init"))
73293 continue;
73294 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
73295 - | INIT_OFFSET_MASK);
73296 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
73297 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
73298 + else
73299 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
73300 + s->sh_entsize |= INIT_OFFSET_MASK;
73301 DEBUGP("\t%s\n", secstrings + s->sh_name);
73302 }
73303 - if (m == 0)
73304 - mod->init_text_size = mod->init_size;
73305 }
73306 }
73307
73308 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
73309
73310 /* As per nm */
73311 static char elf_type(const Elf_Sym *sym,
73312 - Elf_Shdr *sechdrs,
73313 - const char *secstrings,
73314 - struct module *mod)
73315 + const Elf_Shdr *sechdrs,
73316 + const char *secstrings)
73317 {
73318 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
73319 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
73320 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
73321
73322 /* Put symbol section at end of init part of module. */
73323 symsect->sh_flags |= SHF_ALLOC;
73324 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
73325 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
73326 symindex) | INIT_OFFSET_MASK;
73327 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
73328
73329 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
73330 }
73331
73332 /* Append room for core symbols at end of core part. */
73333 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
73334 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
73335 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
73336 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
73337
73338 /* Put string table section at end of init part of module. */
73339 strsect->sh_flags |= SHF_ALLOC;
73340 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
73341 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
73342 strindex) | INIT_OFFSET_MASK;
73343 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
73344
73345 /* Append room for core symbols' strings at end of core part. */
73346 - *pstroffs = mod->core_size;
73347 + *pstroffs = mod->core_size_rx;
73348 __set_bit(0, strmap);
73349 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
73350 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
73351
73352 return symoffs;
73353 }
73354 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
73355 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
73356 mod->strtab = (void *)sechdrs[strindex].sh_addr;
73357
73358 + pax_open_kernel();
73359 +
73360 /* Set types up while we still have access to sections. */
73361 for (i = 0; i < mod->num_symtab; i++)
73362 mod->symtab[i].st_info
73363 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
73364 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
73365
73366 - mod->core_symtab = dst = mod->module_core + symoffs;
73367 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
73368 src = mod->symtab;
73369 *dst = *src;
73370 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
73371 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
73372 }
73373 mod->core_num_syms = ndst;
73374
73375 - mod->core_strtab = s = mod->module_core + stroffs;
73376 + mod->core_strtab = s = mod->module_core_rx + stroffs;
73377 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
73378 if (test_bit(i, strmap))
73379 *++s = mod->strtab[i];
73380 +
73381 + pax_close_kernel();
73382 }
73383 #else
73384 static inline unsigned long layout_symtab(struct module *mod,
73385 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
73386 #endif
73387 }
73388
73389 -static void *module_alloc_update_bounds(unsigned long size)
73390 +static void *module_alloc_update_bounds_rw(unsigned long size)
73391 {
73392 void *ret = module_alloc(size);
73393
73394 if (ret) {
73395 /* Update module bounds. */
73396 - if ((unsigned long)ret < module_addr_min)
73397 - module_addr_min = (unsigned long)ret;
73398 - if ((unsigned long)ret + size > module_addr_max)
73399 - module_addr_max = (unsigned long)ret + size;
73400 + if ((unsigned long)ret < module_addr_min_rw)
73401 + module_addr_min_rw = (unsigned long)ret;
73402 + if ((unsigned long)ret + size > module_addr_max_rw)
73403 + module_addr_max_rw = (unsigned long)ret + size;
73404 + }
73405 + return ret;
73406 +}
73407 +
73408 +static void *module_alloc_update_bounds_rx(unsigned long size)
73409 +{
73410 + void *ret = module_alloc_exec(size);
73411 +
73412 + if (ret) {
73413 + /* Update module bounds. */
73414 + if ((unsigned long)ret < module_addr_min_rx)
73415 + module_addr_min_rx = (unsigned long)ret;
73416 + if ((unsigned long)ret + size > module_addr_max_rx)
73417 + module_addr_max_rx = (unsigned long)ret + size;
73418 }
73419 return ret;
73420 }
73421 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
73422 unsigned int i;
73423
73424 /* only scan the sections containing data */
73425 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
73426 - (unsigned long)mod->module_core,
73427 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
73428 + (unsigned long)mod->module_core_rw,
73429 sizeof(struct module), GFP_KERNEL);
73430
73431 for (i = 1; i < hdr->e_shnum; i++) {
73432 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
73433 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
73434 continue;
73435
73436 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
73437 - (unsigned long)mod->module_core,
73438 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
73439 + (unsigned long)mod->module_core_rw,
73440 sechdrs[i].sh_size, GFP_KERNEL);
73441 }
73442 }
73443 @@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
73444 Elf_Ehdr *hdr;
73445 Elf_Shdr *sechdrs;
73446 char *secstrings, *args, *modmagic, *strtab = NULL;
73447 - char *staging;
73448 + char *staging, *license;
73449 unsigned int i;
73450 unsigned int symindex = 0;
73451 unsigned int strindex = 0;
73452 @@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
73453 goto free_hdr;
73454 }
73455
73456 + license = get_modinfo(sechdrs, infoindex, "license");
73457 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
73458 + if (!license || !license_is_gpl_compatible(license)) {
73459 + err -ENOEXEC;
73460 + goto free_hdr;
73461 + }
73462 +#endif
73463 +
73464 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
73465 /* This is allowed: modprobe --force will invalidate it. */
73466 if (!modmagic) {
73467 @@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
73468 secstrings, &stroffs, strmap);
73469
73470 /* Do the allocs. */
73471 - ptr = module_alloc_update_bounds(mod->core_size);
73472 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
73473 /*
73474 * The pointer to this block is stored in the module structure
73475 * which is inside the block. Just mark it as not being a
73476 @@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
73477 err = -ENOMEM;
73478 goto free_percpu;
73479 }
73480 - memset(ptr, 0, mod->core_size);
73481 - mod->module_core = ptr;
73482 + memset(ptr, 0, mod->core_size_rw);
73483 + mod->module_core_rw = ptr;
73484
73485 - ptr = module_alloc_update_bounds(mod->init_size);
73486 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
73487 /*
73488 * The pointer to this block is stored in the module structure
73489 * which is inside the block. This block doesn't need to be
73490 * scanned as it contains data and code that will be freed
73491 * after the module is initialized.
73492 */
73493 - kmemleak_ignore(ptr);
73494 - if (!ptr && mod->init_size) {
73495 + kmemleak_not_leak(ptr);
73496 + if (!ptr && mod->init_size_rw) {
73497 err = -ENOMEM;
73498 - goto free_core;
73499 + goto free_core_rw;
73500 }
73501 - memset(ptr, 0, mod->init_size);
73502 - mod->module_init = ptr;
73503 + memset(ptr, 0, mod->init_size_rw);
73504 + mod->module_init_rw = ptr;
73505 +
73506 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
73507 + kmemleak_not_leak(ptr);
73508 + if (!ptr) {
73509 + err = -ENOMEM;
73510 + goto free_init_rw;
73511 + }
73512 +
73513 + pax_open_kernel();
73514 + memset(ptr, 0, mod->core_size_rx);
73515 + pax_close_kernel();
73516 + mod->module_core_rx = ptr;
73517 +
73518 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
73519 + kmemleak_not_leak(ptr);
73520 + if (!ptr && mod->init_size_rx) {
73521 + err = -ENOMEM;
73522 + goto free_core_rx;
73523 + }
73524 +
73525 + pax_open_kernel();
73526 + memset(ptr, 0, mod->init_size_rx);
73527 + pax_close_kernel();
73528 + mod->module_init_rx = ptr;
73529
73530 /* Transfer each section which specifies SHF_ALLOC */
73531 DEBUGP("final section addresses:\n");
73532 @@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
73533 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
73534 continue;
73535
73536 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
73537 - dest = mod->module_init
73538 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73539 - else
73540 - dest = mod->module_core + sechdrs[i].sh_entsize;
73541 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
73542 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73543 + dest = mod->module_init_rw
73544 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73545 + else
73546 + dest = mod->module_init_rx
73547 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73548 + } else {
73549 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73550 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
73551 + else
73552 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
73553 + }
73554
73555 - if (sechdrs[i].sh_type != SHT_NOBITS)
73556 - memcpy(dest, (void *)sechdrs[i].sh_addr,
73557 - sechdrs[i].sh_size);
73558 + if (sechdrs[i].sh_type != SHT_NOBITS) {
73559 +
73560 +#ifdef CONFIG_PAX_KERNEXEC
73561 +#ifdef CONFIG_X86_64
73562 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
73563 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
73564 +#endif
73565 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
73566 + pax_open_kernel();
73567 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73568 + pax_close_kernel();
73569 + } else
73570 +#endif
73571 +
73572 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73573 + }
73574 /* Update sh_addr to point to copy in image. */
73575 - sechdrs[i].sh_addr = (unsigned long)dest;
73576 +
73577 +#ifdef CONFIG_PAX_KERNEXEC
73578 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
73579 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
73580 + else
73581 +#endif
73582 +
73583 + sechdrs[i].sh_addr = (unsigned long)dest;
73584 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
73585 }
73586 /* Module has been moved. */
73587 @@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
73588 mod->name);
73589 if (!mod->refptr) {
73590 err = -ENOMEM;
73591 - goto free_init;
73592 + goto free_init_rx;
73593 }
73594 #endif
73595 /* Now we've moved module, initialize linked lists, etc. */
73596 @@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
73597 goto free_unload;
73598
73599 /* Set up license info based on the info section */
73600 - set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
73601 + set_license(mod, license);
73602
73603 /*
73604 * ndiswrapper is under GPL by itself, but loads proprietary modules.
73605 @@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
73606 /* Set up MODINFO_ATTR fields */
73607 setup_modinfo(mod, sechdrs, infoindex);
73608
73609 + mod->args = args;
73610 +
73611 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73612 + {
73613 + char *p, *p2;
73614 +
73615 + if (strstr(mod->args, "grsec_modharden_netdev")) {
73616 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
73617 + err = -EPERM;
73618 + goto cleanup;
73619 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
73620 + p += strlen("grsec_modharden_normal");
73621 + p2 = strstr(p, "_");
73622 + if (p2) {
73623 + *p2 = '\0';
73624 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
73625 + *p2 = '_';
73626 + }
73627 + err = -EPERM;
73628 + goto cleanup;
73629 + }
73630 + }
73631 +#endif
73632 +
73633 +
73634 /* Fix up syms, so that st_value is a pointer to location. */
73635 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
73636 mod);
73637 @@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
73638
73639 /* Now do relocations. */
73640 for (i = 1; i < hdr->e_shnum; i++) {
73641 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
73642 unsigned int info = sechdrs[i].sh_info;
73643 + strtab = (char *)sechdrs[strindex].sh_addr;
73644
73645 /* Not a valid relocation section? */
73646 if (info >= hdr->e_shnum)
73647 @@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
73648 * Do it before processing of module parameters, so the module
73649 * can provide parameter accessor functions of its own.
73650 */
73651 - if (mod->module_init)
73652 - flush_icache_range((unsigned long)mod->module_init,
73653 - (unsigned long)mod->module_init
73654 - + mod->init_size);
73655 - flush_icache_range((unsigned long)mod->module_core,
73656 - (unsigned long)mod->module_core + mod->core_size);
73657 + if (mod->module_init_rx)
73658 + flush_icache_range((unsigned long)mod->module_init_rx,
73659 + (unsigned long)mod->module_init_rx
73660 + + mod->init_size_rx);
73661 + flush_icache_range((unsigned long)mod->module_core_rx,
73662 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
73663
73664 set_fs(old_fs);
73665
73666 - mod->args = args;
73667 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
73668 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
73669 mod->name);
73670 @@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
73671 free_unload:
73672 module_unload_free(mod);
73673 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
73674 + free_init_rx:
73675 percpu_modfree(mod->refptr);
73676 - free_init:
73677 #endif
73678 - module_free(mod, mod->module_init);
73679 - free_core:
73680 - module_free(mod, mod->module_core);
73681 + module_free_exec(mod, mod->module_init_rx);
73682 + free_core_rx:
73683 + module_free_exec(mod, mod->module_core_rx);
73684 + free_init_rw:
73685 + module_free(mod, mod->module_init_rw);
73686 + free_core_rw:
73687 + module_free(mod, mod->module_core_rw);
73688 /* mod will be freed with core. Don't access it beyond this line! */
73689 free_percpu:
73690 if (percpu)
73691 @@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
73692 mod->symtab = mod->core_symtab;
73693 mod->strtab = mod->core_strtab;
73694 #endif
73695 - module_free(mod, mod->module_init);
73696 - mod->module_init = NULL;
73697 - mod->init_size = 0;
73698 - mod->init_text_size = 0;
73699 + module_free(mod, mod->module_init_rw);
73700 + module_free_exec(mod, mod->module_init_rx);
73701 + mod->module_init_rw = NULL;
73702 + mod->module_init_rx = NULL;
73703 + mod->init_size_rw = 0;
73704 + mod->init_size_rx = 0;
73705 mutex_unlock(&module_mutex);
73706
73707 return 0;
73708 @@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
73709 unsigned long nextval;
73710
73711 /* At worse, next value is at end of module */
73712 - if (within_module_init(addr, mod))
73713 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
73714 + if (within_module_init_rx(addr, mod))
73715 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
73716 + else if (within_module_init_rw(addr, mod))
73717 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
73718 + else if (within_module_core_rx(addr, mod))
73719 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
73720 + else if (within_module_core_rw(addr, mod))
73721 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
73722 else
73723 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
73724 + return NULL;
73725
73726 /* Scan for closest preceeding symbol, and next symbol. (ELF
73727 starts real symbols at 1). */
73728 @@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
73729 char buf[8];
73730
73731 seq_printf(m, "%s %u",
73732 - mod->name, mod->init_size + mod->core_size);
73733 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
73734 print_unload_info(m, mod);
73735
73736 /* Informative for users. */
73737 @@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
73738 mod->state == MODULE_STATE_COMING ? "Loading":
73739 "Live");
73740 /* Used by oprofile and other similar tools. */
73741 - seq_printf(m, " 0x%p", mod->module_core);
73742 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
73743
73744 /* Taints info */
73745 if (mod->taints)
73746 @@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
73747
73748 static int __init proc_modules_init(void)
73749 {
73750 +#ifndef CONFIG_GRKERNSEC_HIDESYM
73751 +#ifdef CONFIG_GRKERNSEC_PROC_USER
73752 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
73753 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73754 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
73755 +#else
73756 proc_create("modules", 0, NULL, &proc_modules_operations);
73757 +#endif
73758 +#else
73759 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
73760 +#endif
73761 return 0;
73762 }
73763 module_init(proc_modules_init);
73764 @@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
73765 {
73766 struct module *mod;
73767
73768 - if (addr < module_addr_min || addr > module_addr_max)
73769 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
73770 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
73771 return NULL;
73772
73773 list_for_each_entry_rcu(mod, &modules, list)
73774 - if (within_module_core(addr, mod)
73775 - || within_module_init(addr, mod))
73776 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
73777 return mod;
73778 return NULL;
73779 }
73780 @@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
73781 */
73782 struct module *__module_text_address(unsigned long addr)
73783 {
73784 - struct module *mod = __module_address(addr);
73785 + struct module *mod;
73786 +
73787 +#ifdef CONFIG_X86_32
73788 + addr = ktla_ktva(addr);
73789 +#endif
73790 +
73791 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
73792 + return NULL;
73793 +
73794 + mod = __module_address(addr);
73795 +
73796 if (mod) {
73797 /* Make sure it's within the text section. */
73798 - if (!within(addr, mod->module_init, mod->init_text_size)
73799 - && !within(addr, mod->module_core, mod->core_text_size))
73800 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
73801 mod = NULL;
73802 }
73803 return mod;
73804 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
73805 index ec815a9..fe46e99 100644
73806 --- a/kernel/mutex-debug.c
73807 +++ b/kernel/mutex-debug.c
73808 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
73809 }
73810
73811 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73812 - struct thread_info *ti)
73813 + struct task_struct *task)
73814 {
73815 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
73816
73817 /* Mark the current thread as blocked on the lock: */
73818 - ti->task->blocked_on = waiter;
73819 + task->blocked_on = waiter;
73820 }
73821
73822 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73823 - struct thread_info *ti)
73824 + struct task_struct *task)
73825 {
73826 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
73827 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
73828 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
73829 - ti->task->blocked_on = NULL;
73830 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
73831 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
73832 + task->blocked_on = NULL;
73833
73834 list_del_init(&waiter->list);
73835 waiter->task = NULL;
73836 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
73837 return;
73838
73839 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
73840 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
73841 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
73842 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
73843 mutex_clear_owner(lock);
73844 }
73845 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
73846 index 6b2d735..372d3c4 100644
73847 --- a/kernel/mutex-debug.h
73848 +++ b/kernel/mutex-debug.h
73849 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
73850 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
73851 extern void debug_mutex_add_waiter(struct mutex *lock,
73852 struct mutex_waiter *waiter,
73853 - struct thread_info *ti);
73854 + struct task_struct *task);
73855 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73856 - struct thread_info *ti);
73857 + struct task_struct *task);
73858 extern void debug_mutex_unlock(struct mutex *lock);
73859 extern void debug_mutex_init(struct mutex *lock, const char *name,
73860 struct lock_class_key *key);
73861
73862 static inline void mutex_set_owner(struct mutex *lock)
73863 {
73864 - lock->owner = current_thread_info();
73865 + lock->owner = current;
73866 }
73867
73868 static inline void mutex_clear_owner(struct mutex *lock)
73869 diff --git a/kernel/mutex.c b/kernel/mutex.c
73870 index f85644c..5ee9f77 100644
73871 --- a/kernel/mutex.c
73872 +++ b/kernel/mutex.c
73873 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73874 */
73875
73876 for (;;) {
73877 - struct thread_info *owner;
73878 + struct task_struct *owner;
73879
73880 /*
73881 * If we own the BKL, then don't spin. The owner of
73882 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73883 spin_lock_mutex(&lock->wait_lock, flags);
73884
73885 debug_mutex_lock_common(lock, &waiter);
73886 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
73887 + debug_mutex_add_waiter(lock, &waiter, task);
73888
73889 /* add waiting tasks to the end of the waitqueue (FIFO): */
73890 list_add_tail(&waiter.list, &lock->wait_list);
73891 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73892 * TASK_UNINTERRUPTIBLE case.)
73893 */
73894 if (unlikely(signal_pending_state(state, task))) {
73895 - mutex_remove_waiter(lock, &waiter,
73896 - task_thread_info(task));
73897 + mutex_remove_waiter(lock, &waiter, task);
73898 mutex_release(&lock->dep_map, 1, ip);
73899 spin_unlock_mutex(&lock->wait_lock, flags);
73900
73901 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73902 done:
73903 lock_acquired(&lock->dep_map, ip);
73904 /* got the lock - rejoice! */
73905 - mutex_remove_waiter(lock, &waiter, current_thread_info());
73906 + mutex_remove_waiter(lock, &waiter, task);
73907 mutex_set_owner(lock);
73908
73909 /* set it to 0 if there are no waiters left: */
73910 diff --git a/kernel/mutex.h b/kernel/mutex.h
73911 index 67578ca..4115fbf 100644
73912 --- a/kernel/mutex.h
73913 +++ b/kernel/mutex.h
73914 @@ -19,7 +19,7 @@
73915 #ifdef CONFIG_SMP
73916 static inline void mutex_set_owner(struct mutex *lock)
73917 {
73918 - lock->owner = current_thread_info();
73919 + lock->owner = current;
73920 }
73921
73922 static inline void mutex_clear_owner(struct mutex *lock)
73923 diff --git a/kernel/panic.c b/kernel/panic.c
73924 index 96b45d0..ff70a46 100644
73925 --- a/kernel/panic.c
73926 +++ b/kernel/panic.c
73927 @@ -71,7 +71,11 @@ NORET_TYPE void panic(const char * fmt, ...)
73928 va_end(args);
73929 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
73930 #ifdef CONFIG_DEBUG_BUGVERBOSE
73931 - dump_stack();
73932 + /*
73933 + * Avoid nested stack-dumping if a panic occurs during oops processing
73934 + */
73935 + if (!oops_in_progress)
73936 + dump_stack();
73937 #endif
73938
73939 /*
73940 @@ -352,7 +356,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
73941 const char *board;
73942
73943 printk(KERN_WARNING "------------[ cut here ]------------\n");
73944 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
73945 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
73946 board = dmi_get_system_info(DMI_PRODUCT_NAME);
73947 if (board)
73948 printk(KERN_WARNING "Hardware name: %s\n", board);
73949 @@ -392,7 +396,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
73950 */
73951 void __stack_chk_fail(void)
73952 {
73953 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
73954 + dump_stack();
73955 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
73956 __builtin_return_address(0));
73957 }
73958 EXPORT_SYMBOL(__stack_chk_fail);
73959 diff --git a/kernel/params.c b/kernel/params.c
73960 index d656c27..21e452c 100644
73961 --- a/kernel/params.c
73962 +++ b/kernel/params.c
73963 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
73964 return ret;
73965 }
73966
73967 -static struct sysfs_ops module_sysfs_ops = {
73968 +static const struct sysfs_ops module_sysfs_ops = {
73969 .show = module_attr_show,
73970 .store = module_attr_store,
73971 };
73972 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
73973 return 0;
73974 }
73975
73976 -static struct kset_uevent_ops module_uevent_ops = {
73977 +static const struct kset_uevent_ops module_uevent_ops = {
73978 .filter = uevent_filter,
73979 };
73980
73981 diff --git a/kernel/perf_event.c b/kernel/perf_event.c
73982 index 37ebc14..9c121d9 100644
73983 --- a/kernel/perf_event.c
73984 +++ b/kernel/perf_event.c
73985 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
73986 */
73987 int sysctl_perf_event_sample_rate __read_mostly = 100000;
73988
73989 -static atomic64_t perf_event_id;
73990 +static atomic64_unchecked_t perf_event_id;
73991
73992 /*
73993 * Lock for (sysadmin-configurable) event reservations:
73994 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
73995 * In order to keep per-task stats reliable we need to flip the event
73996 * values when we flip the contexts.
73997 */
73998 - value = atomic64_read(&next_event->count);
73999 - value = atomic64_xchg(&event->count, value);
74000 - atomic64_set(&next_event->count, value);
74001 + value = atomic64_read_unchecked(&next_event->count);
74002 + value = atomic64_xchg_unchecked(&event->count, value);
74003 + atomic64_set_unchecked(&next_event->count, value);
74004
74005 swap(event->total_time_enabled, next_event->total_time_enabled);
74006 swap(event->total_time_running, next_event->total_time_running);
74007 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
74008 update_event_times(event);
74009 }
74010
74011 - return atomic64_read(&event->count);
74012 + return atomic64_read_unchecked(&event->count);
74013 }
74014
74015 /*
74016 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
74017 values[n++] = 1 + leader->nr_siblings;
74018 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74019 values[n++] = leader->total_time_enabled +
74020 - atomic64_read(&leader->child_total_time_enabled);
74021 + atomic64_read_unchecked(&leader->child_total_time_enabled);
74022 }
74023 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74024 values[n++] = leader->total_time_running +
74025 - atomic64_read(&leader->child_total_time_running);
74026 + atomic64_read_unchecked(&leader->child_total_time_running);
74027 }
74028
74029 size = n * sizeof(u64);
74030 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
74031 values[n++] = perf_event_read_value(event);
74032 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74033 values[n++] = event->total_time_enabled +
74034 - atomic64_read(&event->child_total_time_enabled);
74035 + atomic64_read_unchecked(&event->child_total_time_enabled);
74036 }
74037 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74038 values[n++] = event->total_time_running +
74039 - atomic64_read(&event->child_total_time_running);
74040 + atomic64_read_unchecked(&event->child_total_time_running);
74041 }
74042 if (read_format & PERF_FORMAT_ID)
74043 values[n++] = primary_event_id(event);
74044 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
74045 static void perf_event_reset(struct perf_event *event)
74046 {
74047 (void)perf_event_read(event);
74048 - atomic64_set(&event->count, 0);
74049 + atomic64_set_unchecked(&event->count, 0);
74050 perf_event_update_userpage(event);
74051 }
74052
74053 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
74054 ++userpg->lock;
74055 barrier();
74056 userpg->index = perf_event_index(event);
74057 - userpg->offset = atomic64_read(&event->count);
74058 + userpg->offset = atomic64_read_unchecked(&event->count);
74059 if (event->state == PERF_EVENT_STATE_ACTIVE)
74060 - userpg->offset -= atomic64_read(&event->hw.prev_count);
74061 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
74062
74063 userpg->time_enabled = event->total_time_enabled +
74064 - atomic64_read(&event->child_total_time_enabled);
74065 + atomic64_read_unchecked(&event->child_total_time_enabled);
74066
74067 userpg->time_running = event->total_time_running +
74068 - atomic64_read(&event->child_total_time_running);
74069 + atomic64_read_unchecked(&event->child_total_time_running);
74070
74071 barrier();
74072 ++userpg->lock;
74073 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
74074 u64 values[4];
74075 int n = 0;
74076
74077 - values[n++] = atomic64_read(&event->count);
74078 + values[n++] = atomic64_read_unchecked(&event->count);
74079 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74080 values[n++] = event->total_time_enabled +
74081 - atomic64_read(&event->child_total_time_enabled);
74082 + atomic64_read_unchecked(&event->child_total_time_enabled);
74083 }
74084 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74085 values[n++] = event->total_time_running +
74086 - atomic64_read(&event->child_total_time_running);
74087 + atomic64_read_unchecked(&event->child_total_time_running);
74088 }
74089 if (read_format & PERF_FORMAT_ID)
74090 values[n++] = primary_event_id(event);
74091 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
74092 if (leader != event)
74093 leader->pmu->read(leader);
74094
74095 - values[n++] = atomic64_read(&leader->count);
74096 + values[n++] = atomic64_read_unchecked(&leader->count);
74097 if (read_format & PERF_FORMAT_ID)
74098 values[n++] = primary_event_id(leader);
74099
74100 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
74101 if (sub != event)
74102 sub->pmu->read(sub);
74103
74104 - values[n++] = atomic64_read(&sub->count);
74105 + values[n++] = atomic64_read_unchecked(&sub->count);
74106 if (read_format & PERF_FORMAT_ID)
74107 values[n++] = primary_event_id(sub);
74108
74109 @@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
74110 * need to add enough zero bytes after the string to handle
74111 * the 64bit alignment we do later.
74112 */
74113 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
74114 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
74115 if (!buf) {
74116 name = strncpy(tmp, "//enomem", sizeof(tmp));
74117 goto got_name;
74118 }
74119 - name = d_path(&file->f_path, buf, PATH_MAX);
74120 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
74121 if (IS_ERR(name)) {
74122 name = strncpy(tmp, "//toolong", sizeof(tmp));
74123 goto got_name;
74124 @@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
74125 {
74126 struct hw_perf_event *hwc = &event->hw;
74127
74128 - atomic64_add(nr, &event->count);
74129 + atomic64_add_unchecked(nr, &event->count);
74130
74131 if (!hwc->sample_period)
74132 return;
74133 @@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
74134 u64 now;
74135
74136 now = cpu_clock(cpu);
74137 - prev = atomic64_read(&event->hw.prev_count);
74138 - atomic64_set(&event->hw.prev_count, now);
74139 - atomic64_add(now - prev, &event->count);
74140 + prev = atomic64_read_unchecked(&event->hw.prev_count);
74141 + atomic64_set_unchecked(&event->hw.prev_count, now);
74142 + atomic64_add_unchecked(now - prev, &event->count);
74143 }
74144
74145 static int cpu_clock_perf_event_enable(struct perf_event *event)
74146 @@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
74147 struct hw_perf_event *hwc = &event->hw;
74148 int cpu = raw_smp_processor_id();
74149
74150 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
74151 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
74152 perf_swevent_start_hrtimer(event);
74153
74154 return 0;
74155 @@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
74156 u64 prev;
74157 s64 delta;
74158
74159 - prev = atomic64_xchg(&event->hw.prev_count, now);
74160 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
74161 delta = now - prev;
74162 - atomic64_add(delta, &event->count);
74163 + atomic64_add_unchecked(delta, &event->count);
74164 }
74165
74166 static int task_clock_perf_event_enable(struct perf_event *event)
74167 @@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
74168
74169 now = event->ctx->time;
74170
74171 - atomic64_set(&hwc->prev_count, now);
74172 + atomic64_set_unchecked(&hwc->prev_count, now);
74173
74174 perf_swevent_start_hrtimer(event);
74175
74176 @@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
74177 event->parent = parent_event;
74178
74179 event->ns = get_pid_ns(current->nsproxy->pid_ns);
74180 - event->id = atomic64_inc_return(&perf_event_id);
74181 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
74182
74183 event->state = PERF_EVENT_STATE_INACTIVE;
74184
74185 @@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
74186 if (child_event->attr.inherit_stat)
74187 perf_event_read_event(child_event, child);
74188
74189 - child_val = atomic64_read(&child_event->count);
74190 + child_val = atomic64_read_unchecked(&child_event->count);
74191
74192 /*
74193 * Add back the child's count to the parent's count:
74194 */
74195 - atomic64_add(child_val, &parent_event->count);
74196 - atomic64_add(child_event->total_time_enabled,
74197 + atomic64_add_unchecked(child_val, &parent_event->count);
74198 + atomic64_add_unchecked(child_event->total_time_enabled,
74199 &parent_event->child_total_time_enabled);
74200 - atomic64_add(child_event->total_time_running,
74201 + atomic64_add_unchecked(child_event->total_time_running,
74202 &parent_event->child_total_time_running);
74203
74204 /*
74205 diff --git a/kernel/pid.c b/kernel/pid.c
74206 index fce7198..4f23a7e 100644
74207 --- a/kernel/pid.c
74208 +++ b/kernel/pid.c
74209 @@ -33,6 +33,7 @@
74210 #include <linux/rculist.h>
74211 #include <linux/bootmem.h>
74212 #include <linux/hash.h>
74213 +#include <linux/security.h>
74214 #include <linux/pid_namespace.h>
74215 #include <linux/init_task.h>
74216 #include <linux/syscalls.h>
74217 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
74218
74219 int pid_max = PID_MAX_DEFAULT;
74220
74221 -#define RESERVED_PIDS 300
74222 +#define RESERVED_PIDS 500
74223
74224 int pid_max_min = RESERVED_PIDS + 1;
74225 int pid_max_max = PID_MAX_LIMIT;
74226 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
74227 */
74228 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
74229 {
74230 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
74231 + struct task_struct *task;
74232 +
74233 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
74234 +
74235 + if (gr_pid_is_chrooted(task))
74236 + return NULL;
74237 +
74238 + return task;
74239 }
74240
74241 struct task_struct *find_task_by_vpid(pid_t vnr)
74242 @@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
74243 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
74244 }
74245
74246 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
74247 +{
74248 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
74249 +}
74250 +
74251 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
74252 {
74253 struct pid *pid;
74254 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
74255 index 5c9dc22..d271117 100644
74256 --- a/kernel/posix-cpu-timers.c
74257 +++ b/kernel/posix-cpu-timers.c
74258 @@ -6,6 +6,7 @@
74259 #include <linux/posix-timers.h>
74260 #include <linux/errno.h>
74261 #include <linux/math64.h>
74262 +#include <linux/security.h>
74263 #include <asm/uaccess.h>
74264 #include <linux/kernel_stat.h>
74265 #include <trace/events/timer.h>
74266 @@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
74267
74268 static __init int init_posix_cpu_timers(void)
74269 {
74270 - struct k_clock process = {
74271 + static struct k_clock process = {
74272 .clock_getres = process_cpu_clock_getres,
74273 .clock_get = process_cpu_clock_get,
74274 .clock_set = do_posix_clock_nosettime,
74275 @@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
74276 .nsleep = process_cpu_nsleep,
74277 .nsleep_restart = process_cpu_nsleep_restart,
74278 };
74279 - struct k_clock thread = {
74280 + static struct k_clock thread = {
74281 .clock_getres = thread_cpu_clock_getres,
74282 .clock_get = thread_cpu_clock_get,
74283 .clock_set = do_posix_clock_nosettime,
74284 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
74285 index 5e76d22..cf1baeb 100644
74286 --- a/kernel/posix-timers.c
74287 +++ b/kernel/posix-timers.c
74288 @@ -42,6 +42,7 @@
74289 #include <linux/compiler.h>
74290 #include <linux/idr.h>
74291 #include <linux/posix-timers.h>
74292 +#include <linux/grsecurity.h>
74293 #include <linux/syscalls.h>
74294 #include <linux/wait.h>
74295 #include <linux/workqueue.h>
74296 @@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
74297 * which we beg off on and pass to do_sys_settimeofday().
74298 */
74299
74300 -static struct k_clock posix_clocks[MAX_CLOCKS];
74301 +static struct k_clock *posix_clocks[MAX_CLOCKS];
74302
74303 /*
74304 * These ones are defined below.
74305 @@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
74306 */
74307 #define CLOCK_DISPATCH(clock, call, arglist) \
74308 ((clock) < 0 ? posix_cpu_##call arglist : \
74309 - (posix_clocks[clock].call != NULL \
74310 - ? (*posix_clocks[clock].call) arglist : common_##call arglist))
74311 + (posix_clocks[clock]->call != NULL \
74312 + ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
74313
74314 /*
74315 * Default clock hook functions when the struct k_clock passed
74316 @@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
74317 struct timespec *tp)
74318 {
74319 tp->tv_sec = 0;
74320 - tp->tv_nsec = posix_clocks[which_clock].res;
74321 + tp->tv_nsec = posix_clocks[which_clock]->res;
74322 return 0;
74323 }
74324
74325 @@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
74326 return 0;
74327 if ((unsigned) which_clock >= MAX_CLOCKS)
74328 return 1;
74329 - if (posix_clocks[which_clock].clock_getres != NULL)
74330 + if (posix_clocks[which_clock] == NULL)
74331 return 0;
74332 - if (posix_clocks[which_clock].res != 0)
74333 + if (posix_clocks[which_clock]->clock_getres != NULL)
74334 + return 0;
74335 + if (posix_clocks[which_clock]->res != 0)
74336 return 0;
74337 return 1;
74338 }
74339 @@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
74340 */
74341 static __init int init_posix_timers(void)
74342 {
74343 - struct k_clock clock_realtime = {
74344 + static struct k_clock clock_realtime = {
74345 .clock_getres = hrtimer_get_res,
74346 };
74347 - struct k_clock clock_monotonic = {
74348 + static struct k_clock clock_monotonic = {
74349 .clock_getres = hrtimer_get_res,
74350 .clock_get = posix_ktime_get_ts,
74351 .clock_set = do_posix_clock_nosettime,
74352 };
74353 - struct k_clock clock_monotonic_raw = {
74354 + static struct k_clock clock_monotonic_raw = {
74355 .clock_getres = hrtimer_get_res,
74356 .clock_get = posix_get_monotonic_raw,
74357 .clock_set = do_posix_clock_nosettime,
74358 .timer_create = no_timer_create,
74359 .nsleep = no_nsleep,
74360 };
74361 - struct k_clock clock_realtime_coarse = {
74362 + static struct k_clock clock_realtime_coarse = {
74363 .clock_getres = posix_get_coarse_res,
74364 .clock_get = posix_get_realtime_coarse,
74365 .clock_set = do_posix_clock_nosettime,
74366 .timer_create = no_timer_create,
74367 .nsleep = no_nsleep,
74368 };
74369 - struct k_clock clock_monotonic_coarse = {
74370 + static struct k_clock clock_monotonic_coarse = {
74371 .clock_getres = posix_get_coarse_res,
74372 .clock_get = posix_get_monotonic_coarse,
74373 .clock_set = do_posix_clock_nosettime,
74374 @@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
74375 .nsleep = no_nsleep,
74376 };
74377
74378 + pax_track_stack();
74379 +
74380 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
74381 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
74382 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
74383 @@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
74384 return;
74385 }
74386
74387 - posix_clocks[clock_id] = *new_clock;
74388 + posix_clocks[clock_id] = new_clock;
74389 }
74390 EXPORT_SYMBOL_GPL(register_posix_clock);
74391
74392 @@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
74393 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
74394 return -EFAULT;
74395
74396 + /* only the CLOCK_REALTIME clock can be set, all other clocks
74397 + have their clock_set fptr set to a nosettime dummy function
74398 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
74399 + call common_clock_set, which calls do_sys_settimeofday, which
74400 + we hook
74401 + */
74402 +
74403 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
74404 }
74405
74406 diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
74407 index 04a9e90..bc355aa 100644
74408 --- a/kernel/power/hibernate.c
74409 +++ b/kernel/power/hibernate.c
74410 @@ -48,14 +48,14 @@ enum {
74411
74412 static int hibernation_mode = HIBERNATION_SHUTDOWN;
74413
74414 -static struct platform_hibernation_ops *hibernation_ops;
74415 +static const struct platform_hibernation_ops *hibernation_ops;
74416
74417 /**
74418 * hibernation_set_ops - set the global hibernate operations
74419 * @ops: the hibernation operations to use in subsequent hibernation transitions
74420 */
74421
74422 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
74423 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
74424 {
74425 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
74426 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
74427 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
74428 index e8b3370..484c2e4 100644
74429 --- a/kernel/power/poweroff.c
74430 +++ b/kernel/power/poweroff.c
74431 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
74432 .enable_mask = SYSRQ_ENABLE_BOOT,
74433 };
74434
74435 -static int pm_sysrq_init(void)
74436 +static int __init pm_sysrq_init(void)
74437 {
74438 register_sysrq_key('o', &sysrq_poweroff_op);
74439 return 0;
74440 diff --git a/kernel/power/process.c b/kernel/power/process.c
74441 index e7cd671..56d5f459 100644
74442 --- a/kernel/power/process.c
74443 +++ b/kernel/power/process.c
74444 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
74445 struct timeval start, end;
74446 u64 elapsed_csecs64;
74447 unsigned int elapsed_csecs;
74448 + bool timedout = false;
74449
74450 do_gettimeofday(&start);
74451
74452 end_time = jiffies + TIMEOUT;
74453 do {
74454 todo = 0;
74455 + if (time_after(jiffies, end_time))
74456 + timedout = true;
74457 read_lock(&tasklist_lock);
74458 do_each_thread(g, p) {
74459 if (frozen(p) || !freezeable(p))
74460 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
74461 * It is "frozen enough". If the task does wake
74462 * up, it will immediately call try_to_freeze.
74463 */
74464 - if (!task_is_stopped_or_traced(p) &&
74465 - !freezer_should_skip(p))
74466 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
74467 todo++;
74468 + if (timedout) {
74469 + printk(KERN_ERR "Task refusing to freeze:\n");
74470 + sched_show_task(p);
74471 + }
74472 + }
74473 } while_each_thread(g, p);
74474 read_unlock(&tasklist_lock);
74475 yield(); /* Yield is okay here */
74476 - if (time_after(jiffies, end_time))
74477 - break;
74478 - } while (todo);
74479 + } while (todo && !timedout);
74480
74481 do_gettimeofday(&end);
74482 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
74483 diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
74484 index 40dd021..fb30ceb 100644
74485 --- a/kernel/power/suspend.c
74486 +++ b/kernel/power/suspend.c
74487 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
74488 [PM_SUSPEND_MEM] = "mem",
74489 };
74490
74491 -static struct platform_suspend_ops *suspend_ops;
74492 +static const struct platform_suspend_ops *suspend_ops;
74493
74494 /**
74495 * suspend_set_ops - Set the global suspend method table.
74496 * @ops: Pointer to ops structure.
74497 */
74498 -void suspend_set_ops(struct platform_suspend_ops *ops)
74499 +void suspend_set_ops(const struct platform_suspend_ops *ops)
74500 {
74501 mutex_lock(&pm_mutex);
74502 suspend_ops = ops;
74503 diff --git a/kernel/printk.c b/kernel/printk.c
74504 index 4cade47..4d17900 100644
74505 --- a/kernel/printk.c
74506 +++ b/kernel/printk.c
74507 @@ -33,6 +33,7 @@
74508 #include <linux/bootmem.h>
74509 #include <linux/syscalls.h>
74510 #include <linux/kexec.h>
74511 +#include <linux/syslog.h>
74512
74513 #include <asm/uaccess.h>
74514
74515 @@ -256,38 +257,30 @@ static inline void boot_delay_msec(void)
74516 }
74517 #endif
74518
74519 -/*
74520 - * Commands to do_syslog:
74521 - *
74522 - * 0 -- Close the log. Currently a NOP.
74523 - * 1 -- Open the log. Currently a NOP.
74524 - * 2 -- Read from the log.
74525 - * 3 -- Read all messages remaining in the ring buffer.
74526 - * 4 -- Read and clear all messages remaining in the ring buffer
74527 - * 5 -- Clear ring buffer.
74528 - * 6 -- Disable printk's to console
74529 - * 7 -- Enable printk's to console
74530 - * 8 -- Set level of messages printed to console
74531 - * 9 -- Return number of unread characters in the log buffer
74532 - * 10 -- Return size of the log buffer
74533 - */
74534 -int do_syslog(int type, char __user *buf, int len)
74535 +int do_syslog(int type, char __user *buf, int len, bool from_file)
74536 {
74537 unsigned i, j, limit, count;
74538 int do_clear = 0;
74539 char c;
74540 int error = 0;
74541
74542 - error = security_syslog(type);
74543 +#ifdef CONFIG_GRKERNSEC_DMESG
74544 + if (grsec_enable_dmesg &&
74545 + (!from_file || (from_file && type == SYSLOG_ACTION_OPEN)) &&
74546 + !capable(CAP_SYS_ADMIN))
74547 + return -EPERM;
74548 +#endif
74549 +
74550 + error = security_syslog(type, from_file);
74551 if (error)
74552 return error;
74553
74554 switch (type) {
74555 - case 0: /* Close log */
74556 + case SYSLOG_ACTION_CLOSE: /* Close log */
74557 break;
74558 - case 1: /* Open log */
74559 + case SYSLOG_ACTION_OPEN: /* Open log */
74560 break;
74561 - case 2: /* Read from log */
74562 + case SYSLOG_ACTION_READ: /* Read from log */
74563 error = -EINVAL;
74564 if (!buf || len < 0)
74565 goto out;
74566 @@ -318,10 +311,12 @@ int do_syslog(int type, char __user *buf, int len)
74567 if (!error)
74568 error = i;
74569 break;
74570 - case 4: /* Read/clear last kernel messages */
74571 + /* Read/clear last kernel messages */
74572 + case SYSLOG_ACTION_READ_CLEAR:
74573 do_clear = 1;
74574 /* FALL THRU */
74575 - case 3: /* Read last kernel messages */
74576 + /* Read last kernel messages */
74577 + case SYSLOG_ACTION_READ_ALL:
74578 error = -EINVAL;
74579 if (!buf || len < 0)
74580 goto out;
74581 @@ -374,21 +369,25 @@ int do_syslog(int type, char __user *buf, int len)
74582 }
74583 }
74584 break;
74585 - case 5: /* Clear ring buffer */
74586 + /* Clear ring buffer */
74587 + case SYSLOG_ACTION_CLEAR:
74588 logged_chars = 0;
74589 break;
74590 - case 6: /* Disable logging to console */
74591 + /* Disable logging to console */
74592 + case SYSLOG_ACTION_CONSOLE_OFF:
74593 if (saved_console_loglevel == -1)
74594 saved_console_loglevel = console_loglevel;
74595 console_loglevel = minimum_console_loglevel;
74596 break;
74597 - case 7: /* Enable logging to console */
74598 + /* Enable logging to console */
74599 + case SYSLOG_ACTION_CONSOLE_ON:
74600 if (saved_console_loglevel != -1) {
74601 console_loglevel = saved_console_loglevel;
74602 saved_console_loglevel = -1;
74603 }
74604 break;
74605 - case 8: /* Set level of messages printed to console */
74606 + /* Set level of messages printed to console */
74607 + case SYSLOG_ACTION_CONSOLE_LEVEL:
74608 error = -EINVAL;
74609 if (len < 1 || len > 8)
74610 goto out;
74611 @@ -399,10 +398,12 @@ int do_syslog(int type, char __user *buf, int len)
74612 saved_console_loglevel = -1;
74613 error = 0;
74614 break;
74615 - case 9: /* Number of chars in the log buffer */
74616 + /* Number of chars in the log buffer */
74617 + case SYSLOG_ACTION_SIZE_UNREAD:
74618 error = log_end - log_start;
74619 break;
74620 - case 10: /* Size of the log buffer */
74621 + /* Size of the log buffer */
74622 + case SYSLOG_ACTION_SIZE_BUFFER:
74623 error = log_buf_len;
74624 break;
74625 default:
74626 @@ -415,7 +416,7 @@ out:
74627
74628 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
74629 {
74630 - return do_syslog(type, buf, len);
74631 + return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
74632 }
74633
74634 /*
74635 diff --git a/kernel/profile.c b/kernel/profile.c
74636 index dfadc5b..7f59404 100644
74637 --- a/kernel/profile.c
74638 +++ b/kernel/profile.c
74639 @@ -39,7 +39,7 @@ struct profile_hit {
74640 /* Oprofile timer tick hook */
74641 static int (*timer_hook)(struct pt_regs *) __read_mostly;
74642
74643 -static atomic_t *prof_buffer;
74644 +static atomic_unchecked_t *prof_buffer;
74645 static unsigned long prof_len, prof_shift;
74646
74647 int prof_on __read_mostly;
74648 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
74649 hits[i].pc = 0;
74650 continue;
74651 }
74652 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74653 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74654 hits[i].hits = hits[i].pc = 0;
74655 }
74656 }
74657 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74658 * Add the current hit(s) and flush the write-queue out
74659 * to the global buffer:
74660 */
74661 - atomic_add(nr_hits, &prof_buffer[pc]);
74662 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
74663 for (i = 0; i < NR_PROFILE_HIT; ++i) {
74664 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74665 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74666 hits[i].pc = hits[i].hits = 0;
74667 }
74668 out:
74669 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74670 if (prof_on != type || !prof_buffer)
74671 return;
74672 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
74673 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74674 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74675 }
74676 #endif /* !CONFIG_SMP */
74677 EXPORT_SYMBOL_GPL(profile_hits);
74678 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
74679 return -EFAULT;
74680 buf++; p++; count--; read++;
74681 }
74682 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
74683 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
74684 if (copy_to_user(buf, (void *)pnt, count))
74685 return -EFAULT;
74686 read += count;
74687 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
74688 }
74689 #endif
74690 profile_discard_flip_buffers();
74691 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
74692 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
74693 return count;
74694 }
74695
74696 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
74697 index 05625f6..733bf70 100644
74698 --- a/kernel/ptrace.c
74699 +++ b/kernel/ptrace.c
74700 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
74701 return ret;
74702 }
74703
74704 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74705 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
74706 + unsigned int log)
74707 {
74708 const struct cred *cred = current_cred(), *tcred;
74709
74710 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74711 cred->gid != tcred->egid ||
74712 cred->gid != tcred->sgid ||
74713 cred->gid != tcred->gid) &&
74714 - !capable(CAP_SYS_PTRACE)) {
74715 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
74716 + (log && !capable(CAP_SYS_PTRACE)))
74717 + ) {
74718 rcu_read_unlock();
74719 return -EPERM;
74720 }
74721 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74722 smp_rmb();
74723 if (task->mm)
74724 dumpable = get_dumpable(task->mm);
74725 - if (!dumpable && !capable(CAP_SYS_PTRACE))
74726 + if (!dumpable &&
74727 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
74728 + (log && !capable(CAP_SYS_PTRACE))))
74729 return -EPERM;
74730
74731 return security_ptrace_access_check(task, mode);
74732 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
74733 {
74734 int err;
74735 task_lock(task);
74736 - err = __ptrace_may_access(task, mode);
74737 + err = __ptrace_may_access(task, mode, 0);
74738 + task_unlock(task);
74739 + return !err;
74740 +}
74741 +
74742 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
74743 +{
74744 + int err;
74745 + task_lock(task);
74746 + err = __ptrace_may_access(task, mode, 1);
74747 task_unlock(task);
74748 return !err;
74749 }
74750 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
74751 goto out;
74752
74753 task_lock(task);
74754 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
74755 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
74756 task_unlock(task);
74757 if (retval)
74758 goto unlock_creds;
74759 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
74760 goto unlock_tasklist;
74761
74762 task->ptrace = PT_PTRACED;
74763 - if (capable(CAP_SYS_PTRACE))
74764 + if (capable_nolog(CAP_SYS_PTRACE))
74765 task->ptrace |= PT_PTRACE_CAP;
74766
74767 __ptrace_link(task, current);
74768 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
74769 {
74770 int copied = 0;
74771
74772 + pax_track_stack();
74773 +
74774 while (len > 0) {
74775 char buf[128];
74776 int this_len, retval;
74777 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
74778 {
74779 int copied = 0;
74780
74781 + pax_track_stack();
74782 +
74783 while (len > 0) {
74784 char buf[128];
74785 int this_len, retval;
74786 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
74787 int ret = -EIO;
74788 siginfo_t siginfo;
74789
74790 + pax_track_stack();
74791 +
74792 switch (request) {
74793 case PTRACE_PEEKTEXT:
74794 case PTRACE_PEEKDATA:
74795 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
74796 ret = ptrace_setoptions(child, data);
74797 break;
74798 case PTRACE_GETEVENTMSG:
74799 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
74800 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
74801 break;
74802
74803 case PTRACE_GETSIGINFO:
74804 ret = ptrace_getsiginfo(child, &siginfo);
74805 if (!ret)
74806 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
74807 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
74808 &siginfo);
74809 break;
74810
74811 case PTRACE_SETSIGINFO:
74812 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
74813 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
74814 sizeof siginfo))
74815 ret = -EFAULT;
74816 else
74817 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
74818 goto out;
74819 }
74820
74821 + if (gr_handle_ptrace(child, request)) {
74822 + ret = -EPERM;
74823 + goto out_put_task_struct;
74824 + }
74825 +
74826 if (request == PTRACE_ATTACH) {
74827 ret = ptrace_attach(child);
74828 /*
74829 * Some architectures need to do book-keeping after
74830 * a ptrace attach.
74831 */
74832 - if (!ret)
74833 + if (!ret) {
74834 arch_ptrace_attach(child);
74835 + gr_audit_ptrace(child);
74836 + }
74837 goto out_put_task_struct;
74838 }
74839
74840 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
74841 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
74842 if (copied != sizeof(tmp))
74843 return -EIO;
74844 - return put_user(tmp, (unsigned long __user *)data);
74845 + return put_user(tmp, (__force unsigned long __user *)data);
74846 }
74847
74848 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
74849 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
74850 siginfo_t siginfo;
74851 int ret;
74852
74853 + pax_track_stack();
74854 +
74855 switch (request) {
74856 case PTRACE_PEEKTEXT:
74857 case PTRACE_PEEKDATA:
74858 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
74859 goto out;
74860 }
74861
74862 + if (gr_handle_ptrace(child, request)) {
74863 + ret = -EPERM;
74864 + goto out_put_task_struct;
74865 + }
74866 +
74867 if (request == PTRACE_ATTACH) {
74868 ret = ptrace_attach(child);
74869 /*
74870 * Some architectures need to do book-keeping after
74871 * a ptrace attach.
74872 */
74873 - if (!ret)
74874 + if (!ret) {
74875 arch_ptrace_attach(child);
74876 + gr_audit_ptrace(child);
74877 + }
74878 goto out_put_task_struct;
74879 }
74880
74881 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
74882 index 697c0a0..2402696 100644
74883 --- a/kernel/rcutorture.c
74884 +++ b/kernel/rcutorture.c
74885 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
74886 { 0 };
74887 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
74888 { 0 };
74889 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
74890 -static atomic_t n_rcu_torture_alloc;
74891 -static atomic_t n_rcu_torture_alloc_fail;
74892 -static atomic_t n_rcu_torture_free;
74893 -static atomic_t n_rcu_torture_mberror;
74894 -static atomic_t n_rcu_torture_error;
74895 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
74896 +static atomic_unchecked_t n_rcu_torture_alloc;
74897 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
74898 +static atomic_unchecked_t n_rcu_torture_free;
74899 +static atomic_unchecked_t n_rcu_torture_mberror;
74900 +static atomic_unchecked_t n_rcu_torture_error;
74901 static long n_rcu_torture_timers;
74902 static struct list_head rcu_torture_removed;
74903 static cpumask_var_t shuffle_tmp_mask;
74904 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
74905
74906 spin_lock_bh(&rcu_torture_lock);
74907 if (list_empty(&rcu_torture_freelist)) {
74908 - atomic_inc(&n_rcu_torture_alloc_fail);
74909 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
74910 spin_unlock_bh(&rcu_torture_lock);
74911 return NULL;
74912 }
74913 - atomic_inc(&n_rcu_torture_alloc);
74914 + atomic_inc_unchecked(&n_rcu_torture_alloc);
74915 p = rcu_torture_freelist.next;
74916 list_del_init(p);
74917 spin_unlock_bh(&rcu_torture_lock);
74918 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
74919 static void
74920 rcu_torture_free(struct rcu_torture *p)
74921 {
74922 - atomic_inc(&n_rcu_torture_free);
74923 + atomic_inc_unchecked(&n_rcu_torture_free);
74924 spin_lock_bh(&rcu_torture_lock);
74925 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
74926 spin_unlock_bh(&rcu_torture_lock);
74927 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
74928 i = rp->rtort_pipe_count;
74929 if (i > RCU_TORTURE_PIPE_LEN)
74930 i = RCU_TORTURE_PIPE_LEN;
74931 - atomic_inc(&rcu_torture_wcount[i]);
74932 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
74933 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
74934 rp->rtort_mbtest = 0;
74935 rcu_torture_free(rp);
74936 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
74937 i = rp->rtort_pipe_count;
74938 if (i > RCU_TORTURE_PIPE_LEN)
74939 i = RCU_TORTURE_PIPE_LEN;
74940 - atomic_inc(&rcu_torture_wcount[i]);
74941 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
74942 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
74943 rp->rtort_mbtest = 0;
74944 list_del(&rp->rtort_free);
74945 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
74946 i = old_rp->rtort_pipe_count;
74947 if (i > RCU_TORTURE_PIPE_LEN)
74948 i = RCU_TORTURE_PIPE_LEN;
74949 - atomic_inc(&rcu_torture_wcount[i]);
74950 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
74951 old_rp->rtort_pipe_count++;
74952 cur_ops->deferred_free(old_rp);
74953 }
74954 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
74955 return;
74956 }
74957 if (p->rtort_mbtest == 0)
74958 - atomic_inc(&n_rcu_torture_mberror);
74959 + atomic_inc_unchecked(&n_rcu_torture_mberror);
74960 spin_lock(&rand_lock);
74961 cur_ops->read_delay(&rand);
74962 n_rcu_torture_timers++;
74963 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
74964 continue;
74965 }
74966 if (p->rtort_mbtest == 0)
74967 - atomic_inc(&n_rcu_torture_mberror);
74968 + atomic_inc_unchecked(&n_rcu_torture_mberror);
74969 cur_ops->read_delay(&rand);
74970 preempt_disable();
74971 pipe_count = p->rtort_pipe_count;
74972 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
74973 rcu_torture_current,
74974 rcu_torture_current_version,
74975 list_empty(&rcu_torture_freelist),
74976 - atomic_read(&n_rcu_torture_alloc),
74977 - atomic_read(&n_rcu_torture_alloc_fail),
74978 - atomic_read(&n_rcu_torture_free),
74979 - atomic_read(&n_rcu_torture_mberror),
74980 + atomic_read_unchecked(&n_rcu_torture_alloc),
74981 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
74982 + atomic_read_unchecked(&n_rcu_torture_free),
74983 + atomic_read_unchecked(&n_rcu_torture_mberror),
74984 n_rcu_torture_timers);
74985 - if (atomic_read(&n_rcu_torture_mberror) != 0)
74986 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
74987 cnt += sprintf(&page[cnt], " !!!");
74988 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
74989 if (i > 1) {
74990 cnt += sprintf(&page[cnt], "!!! ");
74991 - atomic_inc(&n_rcu_torture_error);
74992 + atomic_inc_unchecked(&n_rcu_torture_error);
74993 WARN_ON_ONCE(1);
74994 }
74995 cnt += sprintf(&page[cnt], "Reader Pipe: ");
74996 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
74997 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
74998 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
74999 cnt += sprintf(&page[cnt], " %d",
75000 - atomic_read(&rcu_torture_wcount[i]));
75001 + atomic_read_unchecked(&rcu_torture_wcount[i]));
75002 }
75003 cnt += sprintf(&page[cnt], "\n");
75004 if (cur_ops->stats)
75005 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
75006
75007 if (cur_ops->cleanup)
75008 cur_ops->cleanup();
75009 - if (atomic_read(&n_rcu_torture_error))
75010 + if (atomic_read_unchecked(&n_rcu_torture_error))
75011 rcu_torture_print_module_parms("End of test: FAILURE");
75012 else
75013 rcu_torture_print_module_parms("End of test: SUCCESS");
75014 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
75015
75016 rcu_torture_current = NULL;
75017 rcu_torture_current_version = 0;
75018 - atomic_set(&n_rcu_torture_alloc, 0);
75019 - atomic_set(&n_rcu_torture_alloc_fail, 0);
75020 - atomic_set(&n_rcu_torture_free, 0);
75021 - atomic_set(&n_rcu_torture_mberror, 0);
75022 - atomic_set(&n_rcu_torture_error, 0);
75023 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
75024 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
75025 + atomic_set_unchecked(&n_rcu_torture_free, 0);
75026 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
75027 + atomic_set_unchecked(&n_rcu_torture_error, 0);
75028 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
75029 - atomic_set(&rcu_torture_wcount[i], 0);
75030 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
75031 for_each_possible_cpu(cpu) {
75032 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
75033 per_cpu(rcu_torture_count, cpu)[i] = 0;
75034 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
75035 index 683c4f3..97f54c6 100644
75036 --- a/kernel/rcutree.c
75037 +++ b/kernel/rcutree.c
75038 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
75039 /*
75040 * Do softirq processing for the current CPU.
75041 */
75042 -static void rcu_process_callbacks(struct softirq_action *unused)
75043 +static void rcu_process_callbacks(void)
75044 {
75045 /*
75046 * Memory references from any prior RCU read-side critical sections
75047 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
75048 index c03edf7..ac1b341 100644
75049 --- a/kernel/rcutree_plugin.h
75050 +++ b/kernel/rcutree_plugin.h
75051 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
75052 */
75053 void __rcu_read_lock(void)
75054 {
75055 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
75056 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
75057 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
75058 }
75059 EXPORT_SYMBOL_GPL(__rcu_read_lock);
75060 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
75061 struct task_struct *t = current;
75062
75063 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
75064 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
75065 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
75066 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
75067 rcu_read_unlock_special(t);
75068 }
75069 diff --git a/kernel/relay.c b/kernel/relay.c
75070 index 760c262..908e9ee 100644
75071 --- a/kernel/relay.c
75072 +++ b/kernel/relay.c
75073 @@ -171,10 +171,14 @@ depopulate:
75074 */
75075 static struct rchan_buf *relay_create_buf(struct rchan *chan)
75076 {
75077 - struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
75078 + struct rchan_buf *buf;
75079 +
75080 + if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
75081 + return NULL;
75082 +
75083 + buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
75084 if (!buf)
75085 return NULL;
75086 -
75087 buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL);
75088 if (!buf->padding)
75089 goto free_buf;
75090 @@ -581,6 +585,8 @@ struct rchan *relay_open(const char *base_filename,
75091
75092 if (!(subbuf_size && n_subbufs))
75093 return NULL;
75094 + if (subbuf_size > UINT_MAX / n_subbufs)
75095 + return NULL;
75096
75097 chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
75098 if (!chan)
75099 @@ -1222,7 +1228,7 @@ static int subbuf_splice_actor(struct file *in,
75100 unsigned int flags,
75101 int *nonpad_ret)
75102 {
75103 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
75104 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
75105 struct rchan_buf *rbuf = in->private_data;
75106 unsigned int subbuf_size = rbuf->chan->subbuf_size;
75107 uint64_t pos = (uint64_t) *ppos;
75108 @@ -1241,6 +1247,9 @@ static int subbuf_splice_actor(struct file *in,
75109 .ops = &relay_pipe_buf_ops,
75110 .spd_release = relay_page_release,
75111 };
75112 + ssize_t ret;
75113 +
75114 + pax_track_stack();
75115
75116 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
75117 return 0;
75118 diff --git a/kernel/resource.c b/kernel/resource.c
75119 index fb11a58..4e61ae1 100644
75120 --- a/kernel/resource.c
75121 +++ b/kernel/resource.c
75122 @@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
75123
75124 static int __init ioresources_init(void)
75125 {
75126 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
75127 +#ifdef CONFIG_GRKERNSEC_PROC_USER
75128 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
75129 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
75130 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75131 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
75132 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
75133 +#endif
75134 +#else
75135 proc_create("ioports", 0, NULL, &proc_ioports_operations);
75136 proc_create("iomem", 0, NULL, &proc_iomem_operations);
75137 +#endif
75138 return 0;
75139 }
75140 __initcall(ioresources_init);
75141 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
75142 index a56f629..1fc4989 100644
75143 --- a/kernel/rtmutex-tester.c
75144 +++ b/kernel/rtmutex-tester.c
75145 @@ -21,7 +21,7 @@
75146 #define MAX_RT_TEST_MUTEXES 8
75147
75148 static spinlock_t rttest_lock;
75149 -static atomic_t rttest_event;
75150 +static atomic_unchecked_t rttest_event;
75151
75152 struct test_thread_data {
75153 int opcode;
75154 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75155
75156 case RTTEST_LOCKCONT:
75157 td->mutexes[td->opdata] = 1;
75158 - td->event = atomic_add_return(1, &rttest_event);
75159 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75160 return 0;
75161
75162 case RTTEST_RESET:
75163 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75164 return 0;
75165
75166 case RTTEST_RESETEVENT:
75167 - atomic_set(&rttest_event, 0);
75168 + atomic_set_unchecked(&rttest_event, 0);
75169 return 0;
75170
75171 default:
75172 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75173 return ret;
75174
75175 td->mutexes[id] = 1;
75176 - td->event = atomic_add_return(1, &rttest_event);
75177 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75178 rt_mutex_lock(&mutexes[id]);
75179 - td->event = atomic_add_return(1, &rttest_event);
75180 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75181 td->mutexes[id] = 4;
75182 return 0;
75183
75184 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75185 return ret;
75186
75187 td->mutexes[id] = 1;
75188 - td->event = atomic_add_return(1, &rttest_event);
75189 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75190 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
75191 - td->event = atomic_add_return(1, &rttest_event);
75192 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75193 td->mutexes[id] = ret ? 0 : 4;
75194 return ret ? -EINTR : 0;
75195
75196 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75197 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
75198 return ret;
75199
75200 - td->event = atomic_add_return(1, &rttest_event);
75201 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75202 rt_mutex_unlock(&mutexes[id]);
75203 - td->event = atomic_add_return(1, &rttest_event);
75204 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75205 td->mutexes[id] = 0;
75206 return 0;
75207
75208 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
75209 break;
75210
75211 td->mutexes[dat] = 2;
75212 - td->event = atomic_add_return(1, &rttest_event);
75213 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75214 break;
75215
75216 case RTTEST_LOCKBKL:
75217 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
75218 return;
75219
75220 td->mutexes[dat] = 3;
75221 - td->event = atomic_add_return(1, &rttest_event);
75222 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75223 break;
75224
75225 case RTTEST_LOCKNOWAIT:
75226 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
75227 return;
75228
75229 td->mutexes[dat] = 1;
75230 - td->event = atomic_add_return(1, &rttest_event);
75231 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75232 return;
75233
75234 case RTTEST_LOCKBKL:
75235 diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
75236 index 29bd4ba..8c5de90 100644
75237 --- a/kernel/rtmutex.c
75238 +++ b/kernel/rtmutex.c
75239 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
75240 */
75241 spin_lock_irqsave(&pendowner->pi_lock, flags);
75242
75243 - WARN_ON(!pendowner->pi_blocked_on);
75244 + BUG_ON(!pendowner->pi_blocked_on);
75245 WARN_ON(pendowner->pi_blocked_on != waiter);
75246 WARN_ON(pendowner->pi_blocked_on->lock != lock);
75247
75248 diff --git a/kernel/sched.c b/kernel/sched.c
75249 index 0591df8..e3af3a4 100644
75250 --- a/kernel/sched.c
75251 +++ b/kernel/sched.c
75252 @@ -5043,7 +5043,7 @@ out:
75253 * In CONFIG_NO_HZ case, the idle load balance owner will do the
75254 * rebalancing for all the cpus for whom scheduler ticks are stopped.
75255 */
75256 -static void run_rebalance_domains(struct softirq_action *h)
75257 +static void run_rebalance_domains(void)
75258 {
75259 int this_cpu = smp_processor_id();
75260 struct rq *this_rq = cpu_rq(this_cpu);
75261 @@ -5690,6 +5690,19 @@ pick_next_task(struct rq *rq)
75262 }
75263 }
75264
75265 +#ifdef CONFIG_GRKERNSEC_SETXID
75266 +extern void gr_delayed_cred_worker(void);
75267 +static inline void gr_cred_schedule(void)
75268 +{
75269 + if (unlikely(current->delayed_cred))
75270 + gr_delayed_cred_worker();
75271 +}
75272 +#else
75273 +static inline void gr_cred_schedule(void)
75274 +{
75275 +}
75276 +#endif
75277 +
75278 /*
75279 * schedule() is the main scheduler function.
75280 */
75281 @@ -5700,6 +5713,8 @@ asmlinkage void __sched schedule(void)
75282 struct rq *rq;
75283 int cpu;
75284
75285 + pax_track_stack();
75286 +
75287 need_resched:
75288 preempt_disable();
75289 cpu = smp_processor_id();
75290 @@ -5713,6 +5728,8 @@ need_resched_nonpreemptible:
75291
75292 schedule_debug(prev);
75293
75294 + gr_cred_schedule();
75295 +
75296 if (sched_feat(HRTICK))
75297 hrtick_clear(rq);
75298
75299 @@ -5770,7 +5787,7 @@ EXPORT_SYMBOL(schedule);
75300 * Look out! "owner" is an entirely speculative pointer
75301 * access and not reliable.
75302 */
75303 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
75304 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
75305 {
75306 unsigned int cpu;
75307 struct rq *rq;
75308 @@ -5784,10 +5801,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
75309 * DEBUG_PAGEALLOC could have unmapped it if
75310 * the mutex owner just released it and exited.
75311 */
75312 - if (probe_kernel_address(&owner->cpu, cpu))
75313 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
75314 return 0;
75315 #else
75316 - cpu = owner->cpu;
75317 + cpu = task_thread_info(owner)->cpu;
75318 #endif
75319
75320 /*
75321 @@ -5816,7 +5833,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
75322 /*
75323 * Is that owner really running on that cpu?
75324 */
75325 - if (task_thread_info(rq->curr) != owner || need_resched())
75326 + if (rq->curr != owner || need_resched())
75327 return 0;
75328
75329 cpu_relax();
75330 @@ -6359,6 +6376,8 @@ int can_nice(const struct task_struct *p, const int nice)
75331 /* convert nice value [19,-20] to rlimit style value [1,40] */
75332 int nice_rlim = 20 - nice;
75333
75334 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
75335 +
75336 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
75337 capable(CAP_SYS_NICE));
75338 }
75339 @@ -6392,7 +6411,8 @@ SYSCALL_DEFINE1(nice, int, increment)
75340 if (nice > 19)
75341 nice = 19;
75342
75343 - if (increment < 0 && !can_nice(current, nice))
75344 + if (increment < 0 && (!can_nice(current, nice) ||
75345 + gr_handle_chroot_nice()))
75346 return -EPERM;
75347
75348 retval = security_task_setnice(current, nice);
75349 @@ -8774,7 +8794,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
75350 long power;
75351 int weight;
75352
75353 - WARN_ON(!sd || !sd->groups);
75354 + BUG_ON(!sd || !sd->groups);
75355
75356 if (cpu != group_first_cpu(sd->groups))
75357 return;
75358 diff --git a/kernel/signal.c b/kernel/signal.c
75359 index 2494827..cda80a0 100644
75360 --- a/kernel/signal.c
75361 +++ b/kernel/signal.c
75362 @@ -41,12 +41,12 @@
75363
75364 static struct kmem_cache *sigqueue_cachep;
75365
75366 -static void __user *sig_handler(struct task_struct *t, int sig)
75367 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
75368 {
75369 return t->sighand->action[sig - 1].sa.sa_handler;
75370 }
75371
75372 -static int sig_handler_ignored(void __user *handler, int sig)
75373 +static int sig_handler_ignored(__sighandler_t handler, int sig)
75374 {
75375 /* Is it explicitly or implicitly ignored? */
75376 return handler == SIG_IGN ||
75377 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
75378 static int sig_task_ignored(struct task_struct *t, int sig,
75379 int from_ancestor_ns)
75380 {
75381 - void __user *handler;
75382 + __sighandler_t handler;
75383
75384 handler = sig_handler(t, sig);
75385
75386 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
75387 */
75388 user = get_uid(__task_cred(t)->user);
75389 atomic_inc(&user->sigpending);
75390 +
75391 + if (!override_rlimit)
75392 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
75393 if (override_rlimit ||
75394 atomic_read(&user->sigpending) <=
75395 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
75396 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
75397
75398 int unhandled_signal(struct task_struct *tsk, int sig)
75399 {
75400 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
75401 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
75402 if (is_global_init(tsk))
75403 return 1;
75404 if (handler != SIG_IGN && handler != SIG_DFL)
75405 @@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
75406 }
75407 }
75408
75409 + /* allow glibc communication via tgkill to other threads in our
75410 + thread group */
75411 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
75412 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
75413 + && gr_handle_signal(t, sig))
75414 + return -EPERM;
75415 +
75416 return security_task_kill(t, info, sig, 0);
75417 }
75418
75419 @@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
75420 return send_signal(sig, info, p, 1);
75421 }
75422
75423 -static int
75424 +int
75425 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75426 {
75427 return send_signal(sig, info, t, 0);
75428 @@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75429 unsigned long int flags;
75430 int ret, blocked, ignored;
75431 struct k_sigaction *action;
75432 + int is_unhandled = 0;
75433
75434 spin_lock_irqsave(&t->sighand->siglock, flags);
75435 action = &t->sighand->action[sig-1];
75436 @@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75437 }
75438 if (action->sa.sa_handler == SIG_DFL)
75439 t->signal->flags &= ~SIGNAL_UNKILLABLE;
75440 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
75441 + is_unhandled = 1;
75442 ret = specific_send_sig_info(sig, info, t);
75443 spin_unlock_irqrestore(&t->sighand->siglock, flags);
75444
75445 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
75446 + normal operation */
75447 + if (is_unhandled) {
75448 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
75449 + gr_handle_crash(t, sig);
75450 + }
75451 +
75452 return ret;
75453 }
75454
75455 @@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
75456 {
75457 int ret = check_kill_permission(sig, info, p);
75458
75459 - if (!ret && sig)
75460 + if (!ret && sig) {
75461 ret = do_send_sig_info(sig, info, p, true);
75462 + if (!ret)
75463 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
75464 + }
75465
75466 return ret;
75467 }
75468 @@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
75469 {
75470 siginfo_t info;
75471
75472 + pax_track_stack();
75473 +
75474 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
75475
75476 memset(&info, 0, sizeof info);
75477 @@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
75478 int error = -ESRCH;
75479
75480 rcu_read_lock();
75481 - p = find_task_by_vpid(pid);
75482 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
75483 + /* allow glibc communication via tgkill to other threads in our
75484 + thread group */
75485 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
75486 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
75487 + p = find_task_by_vpid_unrestricted(pid);
75488 + else
75489 +#endif
75490 + p = find_task_by_vpid(pid);
75491 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
75492 error = check_kill_permission(sig, info, p);
75493 /*
75494 diff --git a/kernel/smp.c b/kernel/smp.c
75495 index aa9cff3..631a0de 100644
75496 --- a/kernel/smp.c
75497 +++ b/kernel/smp.c
75498 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
75499 }
75500 EXPORT_SYMBOL(smp_call_function);
75501
75502 -void ipi_call_lock(void)
75503 +void ipi_call_lock(void) __acquires(call_function.lock)
75504 {
75505 spin_lock(&call_function.lock);
75506 }
75507
75508 -void ipi_call_unlock(void)
75509 +void ipi_call_unlock(void) __releases(call_function.lock)
75510 {
75511 spin_unlock(&call_function.lock);
75512 }
75513
75514 -void ipi_call_lock_irq(void)
75515 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
75516 {
75517 spin_lock_irq(&call_function.lock);
75518 }
75519
75520 -void ipi_call_unlock_irq(void)
75521 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
75522 {
75523 spin_unlock_irq(&call_function.lock);
75524 }
75525 diff --git a/kernel/softirq.c b/kernel/softirq.c
75526 index 04a0252..580c512 100644
75527 --- a/kernel/softirq.c
75528 +++ b/kernel/softirq.c
75529 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
75530
75531 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
75532
75533 -char *softirq_to_name[NR_SOFTIRQS] = {
75534 +const char * const softirq_to_name[NR_SOFTIRQS] = {
75535 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
75536 "TASKLET", "SCHED", "HRTIMER", "RCU"
75537 };
75538 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
75539
75540 asmlinkage void __do_softirq(void)
75541 {
75542 - struct softirq_action *h;
75543 + const struct softirq_action *h;
75544 __u32 pending;
75545 int max_restart = MAX_SOFTIRQ_RESTART;
75546 int cpu;
75547 @@ -233,7 +233,7 @@ restart:
75548 kstat_incr_softirqs_this_cpu(h - softirq_vec);
75549
75550 trace_softirq_entry(h, softirq_vec);
75551 - h->action(h);
75552 + h->action();
75553 trace_softirq_exit(h, softirq_vec);
75554 if (unlikely(prev_count != preempt_count())) {
75555 printk(KERN_ERR "huh, entered softirq %td %s %p"
75556 @@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
75557 local_irq_restore(flags);
75558 }
75559
75560 -void open_softirq(int nr, void (*action)(struct softirq_action *))
75561 +void open_softirq(int nr, void (*action)(void))
75562 {
75563 - softirq_vec[nr].action = action;
75564 + pax_open_kernel();
75565 + *(void **)&softirq_vec[nr].action = action;
75566 + pax_close_kernel();
75567 }
75568
75569 /*
75570 @@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
75571
75572 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
75573
75574 -static void tasklet_action(struct softirq_action *a)
75575 +static void tasklet_action(void)
75576 {
75577 struct tasklet_struct *list;
75578
75579 @@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
75580 }
75581 }
75582
75583 -static void tasklet_hi_action(struct softirq_action *a)
75584 +static void tasklet_hi_action(void)
75585 {
75586 struct tasklet_struct *list;
75587
75588 diff --git a/kernel/sys.c b/kernel/sys.c
75589 index e9512b1..f07185f 100644
75590 --- a/kernel/sys.c
75591 +++ b/kernel/sys.c
75592 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
75593 error = -EACCES;
75594 goto out;
75595 }
75596 +
75597 + if (gr_handle_chroot_setpriority(p, niceval)) {
75598 + error = -EACCES;
75599 + goto out;
75600 + }
75601 +
75602 no_nice = security_task_setnice(p, niceval);
75603 if (no_nice) {
75604 error = no_nice;
75605 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
75606 !(user = find_user(who)))
75607 goto out_unlock; /* No processes for this user */
75608
75609 - do_each_thread(g, p)
75610 + do_each_thread(g, p) {
75611 if (__task_cred(p)->uid == who)
75612 error = set_one_prio(p, niceval, error);
75613 - while_each_thread(g, p);
75614 + } while_each_thread(g, p);
75615 if (who != cred->uid)
75616 free_uid(user); /* For find_user() */
75617 break;
75618 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
75619 !(user = find_user(who)))
75620 goto out_unlock; /* No processes for this user */
75621
75622 - do_each_thread(g, p)
75623 + do_each_thread(g, p) {
75624 if (__task_cred(p)->uid == who) {
75625 niceval = 20 - task_nice(p);
75626 if (niceval > retval)
75627 retval = niceval;
75628 }
75629 - while_each_thread(g, p);
75630 + } while_each_thread(g, p);
75631 if (who != cred->uid)
75632 free_uid(user); /* for find_user() */
75633 break;
75634 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
75635 goto error;
75636 }
75637
75638 + if (gr_check_group_change(new->gid, new->egid, -1))
75639 + goto error;
75640 +
75641 if (rgid != (gid_t) -1 ||
75642 (egid != (gid_t) -1 && egid != old->gid))
75643 new->sgid = new->egid;
75644 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
75645 goto error;
75646
75647 retval = -EPERM;
75648 +
75649 + if (gr_check_group_change(gid, gid, gid))
75650 + goto error;
75651 +
75652 if (capable(CAP_SETGID))
75653 new->gid = new->egid = new->sgid = new->fsgid = gid;
75654 else if (gid == old->gid || gid == old->sgid)
75655 @@ -559,7 +572,7 @@ error:
75656 /*
75657 * change the user struct in a credentials set to match the new UID
75658 */
75659 -static int set_user(struct cred *new)
75660 +int set_user(struct cred *new)
75661 {
75662 struct user_struct *new_user;
75663
75664 @@ -567,12 +580,19 @@ static int set_user(struct cred *new)
75665 if (!new_user)
75666 return -EAGAIN;
75667
75668 + /*
75669 + * We don't fail in case of NPROC limit excess here because too many
75670 + * poorly written programs don't check set*uid() return code, assuming
75671 + * it never fails if called by root. We may still enforce NPROC limit
75672 + * for programs doing set*uid()+execve() by harmlessly deferring the
75673 + * failure to the execve() stage.
75674 + */
75675 if (atomic_read(&new_user->processes) >=
75676 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
75677 - new_user != INIT_USER) {
75678 - free_uid(new_user);
75679 - return -EAGAIN;
75680 - }
75681 + new_user != INIT_USER)
75682 + current->flags |= PF_NPROC_EXCEEDED;
75683 + else
75684 + current->flags &= ~PF_NPROC_EXCEEDED;
75685
75686 free_uid(new->user);
75687 new->user = new_user;
75688 @@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
75689 goto error;
75690 }
75691
75692 + if (gr_check_user_change(new->uid, new->euid, -1))
75693 + goto error;
75694 +
75695 if (new->uid != old->uid) {
75696 retval = set_user(new);
75697 if (retval < 0)
75698 @@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
75699 goto error;
75700
75701 retval = -EPERM;
75702 +
75703 + if (gr_check_crash_uid(uid))
75704 + goto error;
75705 + if (gr_check_user_change(uid, uid, uid))
75706 + goto error;
75707 +
75708 if (capable(CAP_SETUID)) {
75709 new->suid = new->uid = uid;
75710 if (uid != old->uid) {
75711 @@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
75712 goto error;
75713 }
75714
75715 + if (gr_check_user_change(ruid, euid, -1))
75716 + goto error;
75717 +
75718 if (ruid != (uid_t) -1) {
75719 new->uid = ruid;
75720 if (ruid != old->uid) {
75721 @@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
75722 goto error;
75723 }
75724
75725 + if (gr_check_group_change(rgid, egid, -1))
75726 + goto error;
75727 +
75728 if (rgid != (gid_t) -1)
75729 new->gid = rgid;
75730 if (egid != (gid_t) -1)
75731 @@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
75732 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
75733 goto error;
75734
75735 + if (gr_check_user_change(-1, -1, uid))
75736 + goto error;
75737 +
75738 if (uid == old->uid || uid == old->euid ||
75739 uid == old->suid || uid == old->fsuid ||
75740 capable(CAP_SETUID)) {
75741 @@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
75742 if (gid == old->gid || gid == old->egid ||
75743 gid == old->sgid || gid == old->fsgid ||
75744 capable(CAP_SETGID)) {
75745 + if (gr_check_group_change(-1, -1, gid))
75746 + goto error;
75747 +
75748 if (gid != old_fsgid) {
75749 new->fsgid = gid;
75750 goto change_okay;
75751 @@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
75752 error = get_dumpable(me->mm);
75753 break;
75754 case PR_SET_DUMPABLE:
75755 - if (arg2 < 0 || arg2 > 1) {
75756 + if (arg2 > 1) {
75757 error = -EINVAL;
75758 break;
75759 }
75760 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
75761 index b8bd058..ab6a76be 100644
75762 --- a/kernel/sysctl.c
75763 +++ b/kernel/sysctl.c
75764 @@ -63,6 +63,13 @@
75765 static int deprecated_sysctl_warning(struct __sysctl_args *args);
75766
75767 #if defined(CONFIG_SYSCTL)
75768 +#include <linux/grsecurity.h>
75769 +#include <linux/grinternal.h>
75770 +
75771 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
75772 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
75773 + const int op);
75774 +extern int gr_handle_chroot_sysctl(const int op);
75775
75776 /* External variables not in a header file. */
75777 extern int C_A_D;
75778 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
75779 static int proc_taint(struct ctl_table *table, int write,
75780 void __user *buffer, size_t *lenp, loff_t *ppos);
75781 #endif
75782 +extern ctl_table grsecurity_table[];
75783
75784 static struct ctl_table root_table[];
75785 static struct ctl_table_root sysctl_table_root;
75786 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
75787 int sysctl_legacy_va_layout;
75788 #endif
75789
75790 +#ifdef CONFIG_PAX_SOFTMODE
75791 +static ctl_table pax_table[] = {
75792 + {
75793 + .ctl_name = CTL_UNNUMBERED,
75794 + .procname = "softmode",
75795 + .data = &pax_softmode,
75796 + .maxlen = sizeof(unsigned int),
75797 + .mode = 0600,
75798 + .proc_handler = &proc_dointvec,
75799 + },
75800 +
75801 + { .ctl_name = 0 }
75802 +};
75803 +#endif
75804 +
75805 extern int prove_locking;
75806 extern int lock_stat;
75807
75808 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
75809 #endif
75810
75811 static struct ctl_table kern_table[] = {
75812 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
75813 + {
75814 + .ctl_name = CTL_UNNUMBERED,
75815 + .procname = "grsecurity",
75816 + .mode = 0500,
75817 + .child = grsecurity_table,
75818 + },
75819 +#endif
75820 +
75821 +#ifdef CONFIG_PAX_SOFTMODE
75822 + {
75823 + .ctl_name = CTL_UNNUMBERED,
75824 + .procname = "pax",
75825 + .mode = 0500,
75826 + .child = pax_table,
75827 + },
75828 +#endif
75829 +
75830 {
75831 .ctl_name = CTL_UNNUMBERED,
75832 .procname = "sched_child_runs_first",
75833 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
75834 .data = &modprobe_path,
75835 .maxlen = KMOD_PATH_LEN,
75836 .mode = 0644,
75837 - .proc_handler = &proc_dostring,
75838 - .strategy = &sysctl_string,
75839 + .proc_handler = &proc_dostring_modpriv,
75840 + .strategy = &sysctl_string_modpriv,
75841 },
75842 {
75843 .ctl_name = CTL_UNNUMBERED,
75844 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
75845 .mode = 0644,
75846 .proc_handler = &proc_dointvec
75847 },
75848 + {
75849 + .procname = "heap_stack_gap",
75850 + .data = &sysctl_heap_stack_gap,
75851 + .maxlen = sizeof(sysctl_heap_stack_gap),
75852 + .mode = 0644,
75853 + .proc_handler = proc_doulongvec_minmax,
75854 + },
75855 #else
75856 {
75857 .ctl_name = CTL_UNNUMBERED,
75858 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
75859 return 0;
75860 }
75861
75862 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
75863 +
75864 static int parse_table(int __user *name, int nlen,
75865 void __user *oldval, size_t __user *oldlenp,
75866 void __user *newval, size_t newlen,
75867 @@ -1821,7 +1871,7 @@ repeat:
75868 if (n == table->ctl_name) {
75869 int error;
75870 if (table->child) {
75871 - if (sysctl_perm(root, table, MAY_EXEC))
75872 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
75873 return -EPERM;
75874 name++;
75875 nlen--;
75876 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
75877 int error;
75878 int mode;
75879
75880 + if (table->parent != NULL && table->parent->procname != NULL &&
75881 + table->procname != NULL &&
75882 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
75883 + return -EACCES;
75884 + if (gr_handle_chroot_sysctl(op))
75885 + return -EACCES;
75886 + error = gr_handle_sysctl(table, op);
75887 + if (error)
75888 + return error;
75889 +
75890 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
75891 + if (error)
75892 + return error;
75893 +
75894 + if (root->permissions)
75895 + mode = root->permissions(root, current->nsproxy, table);
75896 + else
75897 + mode = table->mode;
75898 +
75899 + return test_perm(mode, op);
75900 +}
75901 +
75902 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
75903 +{
75904 + int error;
75905 + int mode;
75906 +
75907 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
75908 if (error)
75909 return error;
75910 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
75911 buffer, lenp, ppos);
75912 }
75913
75914 +int proc_dostring_modpriv(struct ctl_table *table, int write,
75915 + void __user *buffer, size_t *lenp, loff_t *ppos)
75916 +{
75917 + if (write && !capable(CAP_SYS_MODULE))
75918 + return -EPERM;
75919 +
75920 + return _proc_do_string(table->data, table->maxlen, write,
75921 + buffer, lenp, ppos);
75922 +}
75923 +
75924
75925 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
75926 int *valp,
75927 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
75928 vleft = table->maxlen / sizeof(unsigned long);
75929 left = *lenp;
75930
75931 - for (; left && vleft--; i++, min++, max++, first=0) {
75932 + for (; left && vleft--; i++, first=0) {
75933 if (write) {
75934 while (left) {
75935 char c;
75936 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
75937 return -ENOSYS;
75938 }
75939
75940 +int proc_dostring_modpriv(struct ctl_table *table, int write,
75941 + void __user *buffer, size_t *lenp, loff_t *ppos)
75942 +{
75943 + return -ENOSYS;
75944 +}
75945 +
75946 int proc_dointvec(struct ctl_table *table, int write,
75947 void __user *buffer, size_t *lenp, loff_t *ppos)
75948 {
75949 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
75950 return 1;
75951 }
75952
75953 +int sysctl_string_modpriv(struct ctl_table *table,
75954 + void __user *oldval, size_t __user *oldlenp,
75955 + void __user *newval, size_t newlen)
75956 +{
75957 + if (newval && newlen && !capable(CAP_SYS_MODULE))
75958 + return -EPERM;
75959 +
75960 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
75961 +}
75962 +
75963 /*
75964 * This function makes sure that all of the integers in the vector
75965 * are between the minimum and maximum values given in the arrays
75966 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
75967 return -ENOSYS;
75968 }
75969
75970 +int sysctl_string_modpriv(struct ctl_table *table,
75971 + void __user *oldval, size_t __user *oldlenp,
75972 + void __user *newval, size_t newlen)
75973 +{
75974 + return -ENOSYS;
75975 +}
75976 +
75977 int sysctl_intvec(struct ctl_table *table,
75978 void __user *oldval, size_t __user *oldlenp,
75979 void __user *newval, size_t newlen)
75980 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
75981 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
75982 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
75983 EXPORT_SYMBOL(proc_dostring);
75984 +EXPORT_SYMBOL(proc_dostring_modpriv);
75985 EXPORT_SYMBOL(proc_doulongvec_minmax);
75986 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
75987 EXPORT_SYMBOL(register_sysctl_table);
75988 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
75989 EXPORT_SYMBOL(sysctl_jiffies);
75990 EXPORT_SYMBOL(sysctl_ms_jiffies);
75991 EXPORT_SYMBOL(sysctl_string);
75992 +EXPORT_SYMBOL(sysctl_string_modpriv);
75993 EXPORT_SYMBOL(sysctl_data);
75994 EXPORT_SYMBOL(unregister_sysctl_table);
75995 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
75996 index 469193c..ea3ecb2 100644
75997 --- a/kernel/sysctl_check.c
75998 +++ b/kernel/sysctl_check.c
75999 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
76000 } else {
76001 if ((table->strategy == sysctl_data) ||
76002 (table->strategy == sysctl_string) ||
76003 + (table->strategy == sysctl_string_modpriv) ||
76004 (table->strategy == sysctl_intvec) ||
76005 (table->strategy == sysctl_jiffies) ||
76006 (table->strategy == sysctl_ms_jiffies) ||
76007 (table->proc_handler == proc_dostring) ||
76008 + (table->proc_handler == proc_dostring_modpriv) ||
76009 (table->proc_handler == proc_dointvec) ||
76010 (table->proc_handler == proc_dointvec_minmax) ||
76011 (table->proc_handler == proc_dointvec_jiffies) ||
76012 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
76013 index a4ef542..798bcd7 100644
76014 --- a/kernel/taskstats.c
76015 +++ b/kernel/taskstats.c
76016 @@ -26,9 +26,12 @@
76017 #include <linux/cgroup.h>
76018 #include <linux/fs.h>
76019 #include <linux/file.h>
76020 +#include <linux/grsecurity.h>
76021 #include <net/genetlink.h>
76022 #include <asm/atomic.h>
76023
76024 +extern int gr_is_taskstats_denied(int pid);
76025 +
76026 /*
76027 * Maximum length of a cpumask that can be specified in
76028 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
76029 @@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
76030 size_t size;
76031 cpumask_var_t mask;
76032
76033 + if (gr_is_taskstats_denied(current->pid))
76034 + return -EACCES;
76035 +
76036 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
76037 return -ENOMEM;
76038
76039 diff --git a/kernel/time.c b/kernel/time.c
76040 index 33df60e..ca768bd 100644
76041 --- a/kernel/time.c
76042 +++ b/kernel/time.c
76043 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
76044 return error;
76045
76046 if (tz) {
76047 + /* we log in do_settimeofday called below, so don't log twice
76048 + */
76049 + if (!tv)
76050 + gr_log_timechange();
76051 +
76052 /* SMP safe, global irq locking makes it work. */
76053 sys_tz = *tz;
76054 update_vsyscall_tz();
76055 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
76056 * Avoid unnecessary multiplications/divisions in the
76057 * two most common HZ cases:
76058 */
76059 -unsigned int inline jiffies_to_msecs(const unsigned long j)
76060 +inline unsigned int jiffies_to_msecs(const unsigned long j)
76061 {
76062 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
76063 return (MSEC_PER_SEC / HZ) * j;
76064 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
76065 }
76066 EXPORT_SYMBOL(jiffies_to_msecs);
76067
76068 -unsigned int inline jiffies_to_usecs(const unsigned long j)
76069 +inline unsigned int jiffies_to_usecs(const unsigned long j)
76070 {
76071 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
76072 return (USEC_PER_SEC / HZ) * j;
76073 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
76074 index 57b953f..06f149f 100644
76075 --- a/kernel/time/tick-broadcast.c
76076 +++ b/kernel/time/tick-broadcast.c
76077 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
76078 * then clear the broadcast bit.
76079 */
76080 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
76081 - int cpu = smp_processor_id();
76082 + cpu = smp_processor_id();
76083
76084 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
76085 tick_broadcast_clear_oneshot(cpu);
76086 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
76087 index 4a71cff..ffb5548 100644
76088 --- a/kernel/time/timekeeping.c
76089 +++ b/kernel/time/timekeeping.c
76090 @@ -14,6 +14,7 @@
76091 #include <linux/init.h>
76092 #include <linux/mm.h>
76093 #include <linux/sched.h>
76094 +#include <linux/grsecurity.h>
76095 #include <linux/sysdev.h>
76096 #include <linux/clocksource.h>
76097 #include <linux/jiffies.h>
76098 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
76099 */
76100 struct timespec ts = xtime;
76101 timespec_add_ns(&ts, nsec);
76102 - ACCESS_ONCE(xtime_cache) = ts;
76103 + ACCESS_ONCE_RW(xtime_cache) = ts;
76104 }
76105
76106 /* must hold xtime_lock */
76107 @@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv)
76108 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
76109 return -EINVAL;
76110
76111 + gr_log_timechange();
76112 +
76113 write_seqlock_irqsave(&xtime_lock, flags);
76114
76115 timekeeping_forward_now();
76116 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
76117 index 54c0dda..e9095d9 100644
76118 --- a/kernel/time/timer_list.c
76119 +++ b/kernel/time/timer_list.c
76120 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
76121
76122 static void print_name_offset(struct seq_file *m, void *sym)
76123 {
76124 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76125 + SEQ_printf(m, "<%p>", NULL);
76126 +#else
76127 char symname[KSYM_NAME_LEN];
76128
76129 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
76130 SEQ_printf(m, "<%p>", sym);
76131 else
76132 SEQ_printf(m, "%s", symname);
76133 +#endif
76134 }
76135
76136 static void
76137 @@ -112,7 +116,11 @@ next_one:
76138 static void
76139 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
76140 {
76141 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76142 + SEQ_printf(m, " .base: %p\n", NULL);
76143 +#else
76144 SEQ_printf(m, " .base: %p\n", base);
76145 +#endif
76146 SEQ_printf(m, " .index: %d\n",
76147 base->index);
76148 SEQ_printf(m, " .resolution: %Lu nsecs\n",
76149 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
76150 {
76151 struct proc_dir_entry *pe;
76152
76153 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
76154 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
76155 +#else
76156 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
76157 +#endif
76158 if (!pe)
76159 return -ENOMEM;
76160 return 0;
76161 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
76162 index ee5681f..634089b 100644
76163 --- a/kernel/time/timer_stats.c
76164 +++ b/kernel/time/timer_stats.c
76165 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
76166 static unsigned long nr_entries;
76167 static struct entry entries[MAX_ENTRIES];
76168
76169 -static atomic_t overflow_count;
76170 +static atomic_unchecked_t overflow_count;
76171
76172 /*
76173 * The entries are in a hash-table, for fast lookup:
76174 @@ -140,7 +140,7 @@ static void reset_entries(void)
76175 nr_entries = 0;
76176 memset(entries, 0, sizeof(entries));
76177 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
76178 - atomic_set(&overflow_count, 0);
76179 + atomic_set_unchecked(&overflow_count, 0);
76180 }
76181
76182 static struct entry *alloc_entry(void)
76183 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
76184 if (likely(entry))
76185 entry->count++;
76186 else
76187 - atomic_inc(&overflow_count);
76188 + atomic_inc_unchecked(&overflow_count);
76189
76190 out_unlock:
76191 spin_unlock_irqrestore(lock, flags);
76192 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
76193
76194 static void print_name_offset(struct seq_file *m, unsigned long addr)
76195 {
76196 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76197 + seq_printf(m, "<%p>", NULL);
76198 +#else
76199 char symname[KSYM_NAME_LEN];
76200
76201 if (lookup_symbol_name(addr, symname) < 0)
76202 seq_printf(m, "<%p>", (void *)addr);
76203 else
76204 seq_printf(m, "%s", symname);
76205 +#endif
76206 }
76207
76208 static int tstats_show(struct seq_file *m, void *v)
76209 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
76210
76211 seq_puts(m, "Timer Stats Version: v0.2\n");
76212 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
76213 - if (atomic_read(&overflow_count))
76214 + if (atomic_read_unchecked(&overflow_count))
76215 seq_printf(m, "Overflow: %d entries\n",
76216 - atomic_read(&overflow_count));
76217 + atomic_read_unchecked(&overflow_count));
76218
76219 for (i = 0; i < nr_entries; i++) {
76220 entry = entries + i;
76221 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
76222 {
76223 struct proc_dir_entry *pe;
76224
76225 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
76226 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
76227 +#else
76228 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
76229 +#endif
76230 if (!pe)
76231 return -ENOMEM;
76232 return 0;
76233 diff --git a/kernel/timer.c b/kernel/timer.c
76234 index cb3c1f1..8bf5526 100644
76235 --- a/kernel/timer.c
76236 +++ b/kernel/timer.c
76237 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
76238 /*
76239 * This function runs timers and the timer-tq in bottom half context.
76240 */
76241 -static void run_timer_softirq(struct softirq_action *h)
76242 +static void run_timer_softirq(void)
76243 {
76244 struct tvec_base *base = __get_cpu_var(tvec_bases);
76245
76246 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
76247 index d9d6206..f19467e 100644
76248 --- a/kernel/trace/blktrace.c
76249 +++ b/kernel/trace/blktrace.c
76250 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
76251 struct blk_trace *bt = filp->private_data;
76252 char buf[16];
76253
76254 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
76255 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
76256
76257 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
76258 }
76259 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
76260 return 1;
76261
76262 bt = buf->chan->private_data;
76263 - atomic_inc(&bt->dropped);
76264 + atomic_inc_unchecked(&bt->dropped);
76265 return 0;
76266 }
76267
76268 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
76269
76270 bt->dir = dir;
76271 bt->dev = dev;
76272 - atomic_set(&bt->dropped, 0);
76273 + atomic_set_unchecked(&bt->dropped, 0);
76274
76275 ret = -EIO;
76276 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
76277 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
76278 index 4872937..c794d40 100644
76279 --- a/kernel/trace/ftrace.c
76280 +++ b/kernel/trace/ftrace.c
76281 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
76282
76283 ip = rec->ip;
76284
76285 + ret = ftrace_arch_code_modify_prepare();
76286 + FTRACE_WARN_ON(ret);
76287 + if (ret)
76288 + return 0;
76289 +
76290 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
76291 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
76292 if (ret) {
76293 ftrace_bug(ret, ip);
76294 rec->flags |= FTRACE_FL_FAILED;
76295 - return 0;
76296 }
76297 - return 1;
76298 + return ret ? 0 : 1;
76299 }
76300
76301 /*
76302 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
76303 index e749a05..19c6e94 100644
76304 --- a/kernel/trace/ring_buffer.c
76305 +++ b/kernel/trace/ring_buffer.c
76306 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
76307 * the reader page). But if the next page is a header page,
76308 * its flags will be non zero.
76309 */
76310 -static int inline
76311 +static inline int
76312 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
76313 struct buffer_page *page, struct list_head *list)
76314 {
76315 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
76316 index a2a2d1f..7f32b09 100644
76317 --- a/kernel/trace/trace.c
76318 +++ b/kernel/trace/trace.c
76319 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
76320 size_t rem;
76321 unsigned int i;
76322
76323 + pax_track_stack();
76324 +
76325 /* copy the tracer to avoid using a global lock all around */
76326 mutex_lock(&trace_types_lock);
76327 if (unlikely(old_tracer != current_trace && current_trace)) {
76328 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
76329 int entries, size, i;
76330 size_t ret;
76331
76332 + pax_track_stack();
76333 +
76334 if (*ppos & (PAGE_SIZE - 1)) {
76335 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
76336 return -EINVAL;
76337 @@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
76338 };
76339 #endif
76340
76341 -static struct dentry *d_tracer;
76342 -
76343 struct dentry *tracing_init_dentry(void)
76344 {
76345 + static struct dentry *d_tracer;
76346 static int once;
76347
76348 if (d_tracer)
76349 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
76350 return d_tracer;
76351 }
76352
76353 -static struct dentry *d_percpu;
76354 -
76355 struct dentry *tracing_dentry_percpu(void)
76356 {
76357 + static struct dentry *d_percpu;
76358 static int once;
76359 struct dentry *d_tracer;
76360
76361 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
76362 index d128f65..f37b4af 100644
76363 --- a/kernel/trace/trace_events.c
76364 +++ b/kernel/trace/trace_events.c
76365 @@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
76366 * Modules must own their file_operations to keep up with
76367 * reference counting.
76368 */
76369 +
76370 struct ftrace_module_file_ops {
76371 struct list_head list;
76372 struct module *mod;
76373 - struct file_operations id;
76374 - struct file_operations enable;
76375 - struct file_operations format;
76376 - struct file_operations filter;
76377 };
76378
76379 static void remove_subsystem_dir(const char *name)
76380 @@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
76381
76382 file_ops->mod = mod;
76383
76384 - file_ops->id = ftrace_event_id_fops;
76385 - file_ops->id.owner = mod;
76386 -
76387 - file_ops->enable = ftrace_enable_fops;
76388 - file_ops->enable.owner = mod;
76389 -
76390 - file_ops->filter = ftrace_event_filter_fops;
76391 - file_ops->filter.owner = mod;
76392 -
76393 - file_ops->format = ftrace_event_format_fops;
76394 - file_ops->format.owner = mod;
76395 + pax_open_kernel();
76396 + *(void **)&mod->trace_id.owner = mod;
76397 + *(void **)&mod->trace_enable.owner = mod;
76398 + *(void **)&mod->trace_filter.owner = mod;
76399 + *(void **)&mod->trace_format.owner = mod;
76400 + pax_close_kernel();
76401
76402 list_add(&file_ops->list, &ftrace_module_file_list);
76403
76404 @@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
76405 call->mod = mod;
76406 list_add(&call->list, &ftrace_events);
76407 event_create_dir(call, d_events,
76408 - &file_ops->id, &file_ops->enable,
76409 - &file_ops->filter, &file_ops->format);
76410 + &mod->trace_id, &mod->trace_enable,
76411 + &mod->trace_filter, &mod->trace_format);
76412 }
76413 }
76414
76415 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
76416 index 0acd834..b800b56 100644
76417 --- a/kernel/trace/trace_mmiotrace.c
76418 +++ b/kernel/trace/trace_mmiotrace.c
76419 @@ -23,7 +23,7 @@ struct header_iter {
76420 static struct trace_array *mmio_trace_array;
76421 static bool overrun_detected;
76422 static unsigned long prev_overruns;
76423 -static atomic_t dropped_count;
76424 +static atomic_unchecked_t dropped_count;
76425
76426 static void mmio_reset_data(struct trace_array *tr)
76427 {
76428 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
76429
76430 static unsigned long count_overruns(struct trace_iterator *iter)
76431 {
76432 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
76433 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
76434 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
76435
76436 if (over > prev_overruns)
76437 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
76438 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
76439 sizeof(*entry), 0, pc);
76440 if (!event) {
76441 - atomic_inc(&dropped_count);
76442 + atomic_inc_unchecked(&dropped_count);
76443 return;
76444 }
76445 entry = ring_buffer_event_data(event);
76446 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
76447 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
76448 sizeof(*entry), 0, pc);
76449 if (!event) {
76450 - atomic_inc(&dropped_count);
76451 + atomic_inc_unchecked(&dropped_count);
76452 return;
76453 }
76454 entry = ring_buffer_event_data(event);
76455 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
76456 index b6c12c6..41fdc53 100644
76457 --- a/kernel/trace/trace_output.c
76458 +++ b/kernel/trace/trace_output.c
76459 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
76460 return 0;
76461 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
76462 if (!IS_ERR(p)) {
76463 - p = mangle_path(s->buffer + s->len, p, "\n");
76464 + p = mangle_path(s->buffer + s->len, p, "\n\\");
76465 if (p) {
76466 s->len = p - s->buffer;
76467 return 1;
76468 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
76469 index 8504ac7..ecf0adb 100644
76470 --- a/kernel/trace/trace_stack.c
76471 +++ b/kernel/trace/trace_stack.c
76472 @@ -50,7 +50,7 @@ static inline void check_stack(void)
76473 return;
76474
76475 /* we do not handle interrupt stacks yet */
76476 - if (!object_is_on_stack(&this_size))
76477 + if (!object_starts_on_stack(&this_size))
76478 return;
76479
76480 local_irq_save(flags);
76481 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
76482 index 40cafb0..d5ead43 100644
76483 --- a/kernel/trace/trace_workqueue.c
76484 +++ b/kernel/trace/trace_workqueue.c
76485 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
76486 int cpu;
76487 pid_t pid;
76488 /* Can be inserted from interrupt or user context, need to be atomic */
76489 - atomic_t inserted;
76490 + atomic_unchecked_t inserted;
76491 /*
76492 * Don't need to be atomic, works are serialized in a single workqueue thread
76493 * on a single CPU.
76494 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
76495 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
76496 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
76497 if (node->pid == wq_thread->pid) {
76498 - atomic_inc(&node->inserted);
76499 + atomic_inc_unchecked(&node->inserted);
76500 goto found;
76501 }
76502 }
76503 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
76504 tsk = get_pid_task(pid, PIDTYPE_PID);
76505 if (tsk) {
76506 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
76507 - atomic_read(&cws->inserted), cws->executed,
76508 + atomic_read_unchecked(&cws->inserted), cws->executed,
76509 tsk->comm);
76510 put_task_struct(tsk);
76511 }
76512 diff --git a/kernel/user.c b/kernel/user.c
76513 index 1b91701..8795237 100644
76514 --- a/kernel/user.c
76515 +++ b/kernel/user.c
76516 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
76517 spin_lock_irq(&uidhash_lock);
76518 up = uid_hash_find(uid, hashent);
76519 if (up) {
76520 + put_user_ns(ns);
76521 key_put(new->uid_keyring);
76522 key_put(new->session_keyring);
76523 kmem_cache_free(uid_cachep, new);
76524 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
76525 index 234ceb1..ad74049 100644
76526 --- a/lib/Kconfig.debug
76527 +++ b/lib/Kconfig.debug
76528 @@ -905,7 +905,7 @@ config LATENCYTOP
76529 select STACKTRACE
76530 select SCHEDSTATS
76531 select SCHED_DEBUG
76532 - depends on HAVE_LATENCYTOP_SUPPORT
76533 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
76534 help
76535 Enable this option if you want to use the LatencyTOP tool
76536 to find out which userspace is blocking on what kernel operations.
76537 diff --git a/lib/bitmap.c b/lib/bitmap.c
76538 index 7025658..8d14cab 100644
76539 --- a/lib/bitmap.c
76540 +++ b/lib/bitmap.c
76541 @@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
76542 {
76543 int c, old_c, totaldigits, ndigits, nchunks, nbits;
76544 u32 chunk;
76545 - const char __user *ubuf = buf;
76546 + const char __user *ubuf = (const char __force_user *)buf;
76547
76548 bitmap_zero(maskp, nmaskbits);
76549
76550 @@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
76551 {
76552 if (!access_ok(VERIFY_READ, ubuf, ulen))
76553 return -EFAULT;
76554 - return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
76555 + return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
76556 }
76557 EXPORT_SYMBOL(bitmap_parse_user);
76558
76559 diff --git a/lib/bug.c b/lib/bug.c
76560 index 300e41a..2779eb0 100644
76561 --- a/lib/bug.c
76562 +++ b/lib/bug.c
76563 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
76564 return BUG_TRAP_TYPE_NONE;
76565
76566 bug = find_bug(bugaddr);
76567 + if (!bug)
76568 + return BUG_TRAP_TYPE_NONE;
76569
76570 printk(KERN_EMERG "------------[ cut here ]------------\n");
76571
76572 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
76573 index 2b413db..e21d207 100644
76574 --- a/lib/debugobjects.c
76575 +++ b/lib/debugobjects.c
76576 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
76577 if (limit > 4)
76578 return;
76579
76580 - is_on_stack = object_is_on_stack(addr);
76581 + is_on_stack = object_starts_on_stack(addr);
76582 if (is_on_stack == onstack)
76583 return;
76584
76585 diff --git a/lib/devres.c b/lib/devres.c
76586 index 72c8909..7543868 100644
76587 --- a/lib/devres.c
76588 +++ b/lib/devres.c
76589 @@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
76590 {
76591 iounmap(addr);
76592 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
76593 - (void *)addr));
76594 + (void __force *)addr));
76595 }
76596 EXPORT_SYMBOL(devm_iounmap);
76597
76598 @@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
76599 {
76600 ioport_unmap(addr);
76601 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
76602 - devm_ioport_map_match, (void *)addr));
76603 + devm_ioport_map_match, (void __force *)addr));
76604 }
76605 EXPORT_SYMBOL(devm_ioport_unmap);
76606
76607 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
76608 index 084e879..0674448 100644
76609 --- a/lib/dma-debug.c
76610 +++ b/lib/dma-debug.c
76611 @@ -861,7 +861,7 @@ out:
76612
76613 static void check_for_stack(struct device *dev, void *addr)
76614 {
76615 - if (object_is_on_stack(addr))
76616 + if (object_starts_on_stack(addr))
76617 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
76618 "stack [addr=%p]\n", addr);
76619 }
76620 diff --git a/lib/idr.c b/lib/idr.c
76621 index eda7ba3..915dfae 100644
76622 --- a/lib/idr.c
76623 +++ b/lib/idr.c
76624 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
76625 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
76626
76627 /* if already at the top layer, we need to grow */
76628 - if (id >= 1 << (idp->layers * IDR_BITS)) {
76629 + if (id >= (1 << (idp->layers * IDR_BITS))) {
76630 *starting_id = id;
76631 return IDR_NEED_TO_GROW;
76632 }
76633 diff --git a/lib/inflate.c b/lib/inflate.c
76634 index d102559..4215f31 100644
76635 --- a/lib/inflate.c
76636 +++ b/lib/inflate.c
76637 @@ -266,7 +266,7 @@ static void free(void *where)
76638 malloc_ptr = free_mem_ptr;
76639 }
76640 #else
76641 -#define malloc(a) kmalloc(a, GFP_KERNEL)
76642 +#define malloc(a) kmalloc((a), GFP_KERNEL)
76643 #define free(a) kfree(a)
76644 #endif
76645
76646 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
76647 index bd2bea9..6b3c95e 100644
76648 --- a/lib/is_single_threaded.c
76649 +++ b/lib/is_single_threaded.c
76650 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
76651 struct task_struct *p, *t;
76652 bool ret;
76653
76654 + if (!mm)
76655 + return true;
76656 +
76657 if (atomic_read(&task->signal->live) != 1)
76658 return false;
76659
76660 diff --git a/lib/kobject.c b/lib/kobject.c
76661 index b512b74..8115eb1 100644
76662 --- a/lib/kobject.c
76663 +++ b/lib/kobject.c
76664 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
76665 return ret;
76666 }
76667
76668 -struct sysfs_ops kobj_sysfs_ops = {
76669 +const struct sysfs_ops kobj_sysfs_ops = {
76670 .show = kobj_attr_show,
76671 .store = kobj_attr_store,
76672 };
76673 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
76674 * If the kset was not able to be created, NULL will be returned.
76675 */
76676 static struct kset *kset_create(const char *name,
76677 - struct kset_uevent_ops *uevent_ops,
76678 + const struct kset_uevent_ops *uevent_ops,
76679 struct kobject *parent_kobj)
76680 {
76681 struct kset *kset;
76682 @@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
76683 * If the kset was not able to be created, NULL will be returned.
76684 */
76685 struct kset *kset_create_and_add(const char *name,
76686 - struct kset_uevent_ops *uevent_ops,
76687 + const struct kset_uevent_ops *uevent_ops,
76688 struct kobject *parent_kobj)
76689 {
76690 struct kset *kset;
76691 diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
76692 index 507b821..0bf8ed0 100644
76693 --- a/lib/kobject_uevent.c
76694 +++ b/lib/kobject_uevent.c
76695 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
76696 const char *subsystem;
76697 struct kobject *top_kobj;
76698 struct kset *kset;
76699 - struct kset_uevent_ops *uevent_ops;
76700 + const struct kset_uevent_ops *uevent_ops;
76701 u64 seq;
76702 int i = 0;
76703 int retval = 0;
76704 diff --git a/lib/kref.c b/lib/kref.c
76705 index 9ecd6e8..12c94c1 100644
76706 --- a/lib/kref.c
76707 +++ b/lib/kref.c
76708 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
76709 */
76710 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
76711 {
76712 - WARN_ON(release == NULL);
76713 + BUG_ON(release == NULL);
76714 WARN_ON(release == (void (*)(struct kref *))kfree);
76715
76716 if (atomic_dec_and_test(&kref->refcount)) {
76717 diff --git a/lib/parser.c b/lib/parser.c
76718 index b00d020..1b34325 100644
76719 --- a/lib/parser.c
76720 +++ b/lib/parser.c
76721 @@ -126,7 +126,7 @@ static int match_number(substring_t *s, int *result, int base)
76722 char *buf;
76723 int ret;
76724
76725 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
76726 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
76727 if (!buf)
76728 return -ENOMEM;
76729 memcpy(buf, s->from, s->to - s->from);
76730 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
76731 index 92cdd99..a8149d7 100644
76732 --- a/lib/radix-tree.c
76733 +++ b/lib/radix-tree.c
76734 @@ -81,7 +81,7 @@ struct radix_tree_preload {
76735 int nr;
76736 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
76737 };
76738 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
76739 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
76740
76741 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
76742 {
76743 diff --git a/lib/random32.c b/lib/random32.c
76744 index 217d5c4..45aba8a 100644
76745 --- a/lib/random32.c
76746 +++ b/lib/random32.c
76747 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
76748 */
76749 static inline u32 __seed(u32 x, u32 m)
76750 {
76751 - return (x < m) ? x + m : x;
76752 + return (x <= m) ? x + m + 1 : x;
76753 }
76754
76755 /**
76756 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
76757 index 33bed5e..1477e46 100644
76758 --- a/lib/vsprintf.c
76759 +++ b/lib/vsprintf.c
76760 @@ -16,6 +16,9 @@
76761 * - scnprintf and vscnprintf
76762 */
76763
76764 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76765 +#define __INCLUDED_BY_HIDESYM 1
76766 +#endif
76767 #include <stdarg.h>
76768 #include <linux/module.h>
76769 #include <linux/types.h>
76770 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
76771 return buf;
76772 }
76773
76774 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
76775 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
76776 {
76777 int len, i;
76778
76779 if ((unsigned long)s < PAGE_SIZE)
76780 - s = "<NULL>";
76781 + s = "(null)";
76782
76783 len = strnlen(s, spec.precision);
76784
76785 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
76786 unsigned long value = (unsigned long) ptr;
76787 #ifdef CONFIG_KALLSYMS
76788 char sym[KSYM_SYMBOL_LEN];
76789 - if (ext != 'f' && ext != 's')
76790 + if (ext != 'f' && ext != 's' && ext != 'a')
76791 sprint_symbol(sym, value);
76792 else
76793 kallsyms_lookup(value, NULL, NULL, NULL, sym);
76794 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
76795 * - 'f' For simple symbolic function names without offset
76796 * - 'S' For symbolic direct pointers with offset
76797 * - 's' For symbolic direct pointers without offset
76798 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
76799 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
76800 * - 'R' For a struct resource pointer, it prints the range of
76801 * addresses (not the name nor the flags)
76802 * - 'M' For a 6-byte MAC address, it prints the address in the
76803 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
76804 struct printf_spec spec)
76805 {
76806 if (!ptr)
76807 - return string(buf, end, "(null)", spec);
76808 + return string(buf, end, "(nil)", spec);
76809
76810 switch (*fmt) {
76811 case 'F':
76812 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
76813 case 's':
76814 /* Fallthrough */
76815 case 'S':
76816 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76817 + break;
76818 +#else
76819 + return symbol_string(buf, end, ptr, spec, *fmt);
76820 +#endif
76821 + case 'a':
76822 + /* Fallthrough */
76823 + case 'A':
76824 return symbol_string(buf, end, ptr, spec, *fmt);
76825 case 'R':
76826 return resource_string(buf, end, ptr, spec);
76827 @@ -1445,7 +1458,7 @@ do { \
76828 size_t len;
76829 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
76830 || (unsigned long)save_str < PAGE_SIZE)
76831 - save_str = "<NULL>";
76832 + save_str = "(null)";
76833 len = strlen(save_str);
76834 if (str + len + 1 < end)
76835 memcpy(str, save_str, len + 1);
76836 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
76837 typeof(type) value; \
76838 if (sizeof(type) == 8) { \
76839 args = PTR_ALIGN(args, sizeof(u32)); \
76840 - *(u32 *)&value = *(u32 *)args; \
76841 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
76842 + *(u32 *)&value = *(const u32 *)args; \
76843 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
76844 } else { \
76845 args = PTR_ALIGN(args, sizeof(type)); \
76846 - value = *(typeof(type) *)args; \
76847 + value = *(const typeof(type) *)args; \
76848 } \
76849 args += sizeof(type); \
76850 value; \
76851 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
76852 const char *str_arg = args;
76853 size_t len = strlen(str_arg);
76854 args += len + 1;
76855 - str = string(str, end, (char *)str_arg, spec);
76856 + str = string(str, end, str_arg, spec);
76857 break;
76858 }
76859
76860 diff --git a/localversion-grsec b/localversion-grsec
76861 new file mode 100644
76862 index 0000000..7cd6065
76863 --- /dev/null
76864 +++ b/localversion-grsec
76865 @@ -0,0 +1 @@
76866 +-grsec
76867 diff --git a/mm/Kconfig b/mm/Kconfig
76868 index 2c19c0b..f3c3f83 100644
76869 --- a/mm/Kconfig
76870 +++ b/mm/Kconfig
76871 @@ -228,7 +228,7 @@ config KSM
76872 config DEFAULT_MMAP_MIN_ADDR
76873 int "Low address space to protect from user allocation"
76874 depends on MMU
76875 - default 4096
76876 + default 65536
76877 help
76878 This is the portion of low virtual memory which should be protected
76879 from userspace allocation. Keeping a user from writing to low pages
76880 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
76881 index 67a33a5..094dcf1 100644
76882 --- a/mm/backing-dev.c
76883 +++ b/mm/backing-dev.c
76884 @@ -272,7 +272,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
76885 list_add_tail_rcu(&wb->list, &bdi->wb_list);
76886 spin_unlock(&bdi->wb_lock);
76887
76888 - tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
76889 + tsk->flags |= PF_SWAPWRITE;
76890 set_freezable();
76891
76892 /*
76893 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
76894 * Add the default flusher task that gets created for any bdi
76895 * that has dirty data pending writeout
76896 */
76897 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
76898 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
76899 {
76900 if (!bdi_cap_writeback_dirty(bdi))
76901 return;
76902 diff --git a/mm/filemap.c b/mm/filemap.c
76903 index a1fe378..e26702f 100644
76904 --- a/mm/filemap.c
76905 +++ b/mm/filemap.c
76906 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
76907 struct address_space *mapping = file->f_mapping;
76908
76909 if (!mapping->a_ops->readpage)
76910 - return -ENOEXEC;
76911 + return -ENODEV;
76912 file_accessed(file);
76913 vma->vm_ops = &generic_file_vm_ops;
76914 vma->vm_flags |= VM_CAN_NONLINEAR;
76915 @@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
76916 *pos = i_size_read(inode);
76917
76918 if (limit != RLIM_INFINITY) {
76919 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
76920 if (*pos >= limit) {
76921 send_sig(SIGXFSZ, current, 0);
76922 return -EFBIG;
76923 diff --git a/mm/fremap.c b/mm/fremap.c
76924 index b6ec85a..a24ac22 100644
76925 --- a/mm/fremap.c
76926 +++ b/mm/fremap.c
76927 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
76928 retry:
76929 vma = find_vma(mm, start);
76930
76931 +#ifdef CONFIG_PAX_SEGMEXEC
76932 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
76933 + goto out;
76934 +#endif
76935 +
76936 /*
76937 * Make sure the vma is shared, that it supports prefaulting,
76938 * and that the remapped range is valid and fully within
76939 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
76940 /*
76941 * drop PG_Mlocked flag for over-mapped range
76942 */
76943 - unsigned int saved_flags = vma->vm_flags;
76944 + unsigned long saved_flags = vma->vm_flags;
76945 munlock_vma_pages_range(vma, start, start + size);
76946 vma->vm_flags = saved_flags;
76947 }
76948 diff --git a/mm/highmem.c b/mm/highmem.c
76949 index 9c1e627..5ca9447 100644
76950 --- a/mm/highmem.c
76951 +++ b/mm/highmem.c
76952 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
76953 * So no dangers, even with speculative execution.
76954 */
76955 page = pte_page(pkmap_page_table[i]);
76956 + pax_open_kernel();
76957 pte_clear(&init_mm, (unsigned long)page_address(page),
76958 &pkmap_page_table[i]);
76959 -
76960 + pax_close_kernel();
76961 set_page_address(page, NULL);
76962 need_flush = 1;
76963 }
76964 @@ -177,9 +178,11 @@ start:
76965 }
76966 }
76967 vaddr = PKMAP_ADDR(last_pkmap_nr);
76968 +
76969 + pax_open_kernel();
76970 set_pte_at(&init_mm, vaddr,
76971 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
76972 -
76973 + pax_close_kernel();
76974 pkmap_count[last_pkmap_nr] = 1;
76975 set_page_address(page, (void *)vaddr);
76976
76977 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
76978 index 5e1e508..ac70275 100644
76979 --- a/mm/hugetlb.c
76980 +++ b/mm/hugetlb.c
76981 @@ -869,6 +869,7 @@ free:
76982 list_del(&page->lru);
76983 enqueue_huge_page(h, page);
76984 }
76985 + spin_unlock(&hugetlb_lock);
76986
76987 /* Free unnecessary surplus pages to the buddy allocator */
76988 if (!list_empty(&surplus_list)) {
76989 @@ -1933,6 +1934,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
76990 return 1;
76991 }
76992
76993 +#ifdef CONFIG_PAX_SEGMEXEC
76994 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
76995 +{
76996 + struct mm_struct *mm = vma->vm_mm;
76997 + struct vm_area_struct *vma_m;
76998 + unsigned long address_m;
76999 + pte_t *ptep_m;
77000 +
77001 + vma_m = pax_find_mirror_vma(vma);
77002 + if (!vma_m)
77003 + return;
77004 +
77005 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77006 + address_m = address + SEGMEXEC_TASK_SIZE;
77007 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
77008 + get_page(page_m);
77009 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
77010 +}
77011 +#endif
77012 +
77013 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
77014 unsigned long address, pte_t *ptep, pte_t pte,
77015 struct page *pagecache_page)
77016 @@ -2004,6 +2025,11 @@ retry_avoidcopy:
77017 huge_ptep_clear_flush(vma, address, ptep);
77018 set_huge_pte_at(mm, address, ptep,
77019 make_huge_pte(vma, new_page, 1));
77020 +
77021 +#ifdef CONFIG_PAX_SEGMEXEC
77022 + pax_mirror_huge_pte(vma, address, new_page);
77023 +#endif
77024 +
77025 /* Make the old page be freed below */
77026 new_page = old_page;
77027 }
77028 @@ -2135,6 +2161,10 @@ retry:
77029 && (vma->vm_flags & VM_SHARED)));
77030 set_huge_pte_at(mm, address, ptep, new_pte);
77031
77032 +#ifdef CONFIG_PAX_SEGMEXEC
77033 + pax_mirror_huge_pte(vma, address, page);
77034 +#endif
77035 +
77036 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
77037 /* Optimization, do the COW without a second fault */
77038 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
77039 @@ -2163,6 +2193,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77040 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
77041 struct hstate *h = hstate_vma(vma);
77042
77043 +#ifdef CONFIG_PAX_SEGMEXEC
77044 + struct vm_area_struct *vma_m;
77045 +
77046 + vma_m = pax_find_mirror_vma(vma);
77047 + if (vma_m) {
77048 + unsigned long address_m;
77049 +
77050 + if (vma->vm_start > vma_m->vm_start) {
77051 + address_m = address;
77052 + address -= SEGMEXEC_TASK_SIZE;
77053 + vma = vma_m;
77054 + h = hstate_vma(vma);
77055 + } else
77056 + address_m = address + SEGMEXEC_TASK_SIZE;
77057 +
77058 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
77059 + return VM_FAULT_OOM;
77060 + address_m &= HPAGE_MASK;
77061 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
77062 + }
77063 +#endif
77064 +
77065 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
77066 if (!ptep)
77067 return VM_FAULT_OOM;
77068 diff --git a/mm/internal.h b/mm/internal.h
77069 index f03e8e2..7354343 100644
77070 --- a/mm/internal.h
77071 +++ b/mm/internal.h
77072 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
77073 * in mm/page_alloc.c
77074 */
77075 extern void __free_pages_bootmem(struct page *page, unsigned int order);
77076 +extern void free_compound_page(struct page *page);
77077 extern void prep_compound_page(struct page *page, unsigned long order);
77078
77079
77080 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
77081 index c346660..b47382f 100644
77082 --- a/mm/kmemleak.c
77083 +++ b/mm/kmemleak.c
77084 @@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
77085
77086 for (i = 0; i < object->trace_len; i++) {
77087 void *ptr = (void *)object->trace[i];
77088 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
77089 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
77090 }
77091 }
77092
77093 diff --git a/mm/maccess.c b/mm/maccess.c
77094 index 9073695..1127f348 100644
77095 --- a/mm/maccess.c
77096 +++ b/mm/maccess.c
77097 @@ -14,7 +14,7 @@
77098 * Safely read from address @src to the buffer at @dst. If a kernel fault
77099 * happens, handle that and return -EFAULT.
77100 */
77101 -long probe_kernel_read(void *dst, void *src, size_t size)
77102 +long probe_kernel_read(void *dst, const void *src, size_t size)
77103 {
77104 long ret;
77105 mm_segment_t old_fs = get_fs();
77106 @@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
77107 set_fs(KERNEL_DS);
77108 pagefault_disable();
77109 ret = __copy_from_user_inatomic(dst,
77110 - (__force const void __user *)src, size);
77111 + (const void __force_user *)src, size);
77112 pagefault_enable();
77113 set_fs(old_fs);
77114
77115 @@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
77116 * Safely write to address @dst from the buffer at @src. If a kernel fault
77117 * happens, handle that and return -EFAULT.
77118 */
77119 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
77120 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
77121 {
77122 long ret;
77123 mm_segment_t old_fs = get_fs();
77124
77125 set_fs(KERNEL_DS);
77126 pagefault_disable();
77127 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
77128 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
77129 pagefault_enable();
77130 set_fs(old_fs);
77131
77132 diff --git a/mm/madvise.c b/mm/madvise.c
77133 index 35b1479..499f7d4 100644
77134 --- a/mm/madvise.c
77135 +++ b/mm/madvise.c
77136 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
77137 pgoff_t pgoff;
77138 unsigned long new_flags = vma->vm_flags;
77139
77140 +#ifdef CONFIG_PAX_SEGMEXEC
77141 + struct vm_area_struct *vma_m;
77142 +#endif
77143 +
77144 switch (behavior) {
77145 case MADV_NORMAL:
77146 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
77147 @@ -103,6 +107,13 @@ success:
77148 /*
77149 * vm_flags is protected by the mmap_sem held in write mode.
77150 */
77151 +
77152 +#ifdef CONFIG_PAX_SEGMEXEC
77153 + vma_m = pax_find_mirror_vma(vma);
77154 + if (vma_m)
77155 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
77156 +#endif
77157 +
77158 vma->vm_flags = new_flags;
77159
77160 out:
77161 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
77162 struct vm_area_struct ** prev,
77163 unsigned long start, unsigned long end)
77164 {
77165 +
77166 +#ifdef CONFIG_PAX_SEGMEXEC
77167 + struct vm_area_struct *vma_m;
77168 +#endif
77169 +
77170 *prev = vma;
77171 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
77172 return -EINVAL;
77173 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
77174 zap_page_range(vma, start, end - start, &details);
77175 } else
77176 zap_page_range(vma, start, end - start, NULL);
77177 +
77178 +#ifdef CONFIG_PAX_SEGMEXEC
77179 + vma_m = pax_find_mirror_vma(vma);
77180 + if (vma_m) {
77181 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
77182 + struct zap_details details = {
77183 + .nonlinear_vma = vma_m,
77184 + .last_index = ULONG_MAX,
77185 + };
77186 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
77187 + } else
77188 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
77189 + }
77190 +#endif
77191 +
77192 return 0;
77193 }
77194
77195 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
77196 if (end < start)
77197 goto out;
77198
77199 +#ifdef CONFIG_PAX_SEGMEXEC
77200 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
77201 + if (end > SEGMEXEC_TASK_SIZE)
77202 + goto out;
77203 + } else
77204 +#endif
77205 +
77206 + if (end > TASK_SIZE)
77207 + goto out;
77208 +
77209 error = 0;
77210 if (end == start)
77211 goto out;
77212 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
77213 index 8aeba53..b4a4198 100644
77214 --- a/mm/memory-failure.c
77215 +++ b/mm/memory-failure.c
77216 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
77217
77218 int sysctl_memory_failure_recovery __read_mostly = 1;
77219
77220 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
77221 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
77222
77223 /*
77224 * Send all the processes who have the page mapped an ``action optional''
77225 @@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
77226 si.si_signo = SIGBUS;
77227 si.si_errno = 0;
77228 si.si_code = BUS_MCEERR_AO;
77229 - si.si_addr = (void *)addr;
77230 + si.si_addr = (void __user *)addr;
77231 #ifdef __ARCH_SI_TRAPNO
77232 si.si_trapno = trapno;
77233 #endif
77234 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
77235 return 0;
77236 }
77237
77238 - atomic_long_add(1, &mce_bad_pages);
77239 + atomic_long_add_unchecked(1, &mce_bad_pages);
77240
77241 /*
77242 * We need/can do nothing about count=0 pages.
77243 diff --git a/mm/memory.c b/mm/memory.c
77244 index 6c836d3..48f3264 100644
77245 --- a/mm/memory.c
77246 +++ b/mm/memory.c
77247 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
77248 return;
77249
77250 pmd = pmd_offset(pud, start);
77251 +
77252 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
77253 pud_clear(pud);
77254 pmd_free_tlb(tlb, pmd, start);
77255 +#endif
77256 +
77257 }
77258
77259 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
77260 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
77261 if (end - 1 > ceiling - 1)
77262 return;
77263
77264 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
77265 pud = pud_offset(pgd, start);
77266 pgd_clear(pgd);
77267 pud_free_tlb(tlb, pud, start);
77268 +#endif
77269 +
77270 }
77271
77272 /*
77273 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77274 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
77275 i = 0;
77276
77277 - do {
77278 + while (nr_pages) {
77279 struct vm_area_struct *vma;
77280
77281 - vma = find_extend_vma(mm, start);
77282 + vma = find_vma(mm, start);
77283 if (!vma && in_gate_area(tsk, start)) {
77284 unsigned long pg = start & PAGE_MASK;
77285 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
77286 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77287 continue;
77288 }
77289
77290 - if (!vma ||
77291 + if (!vma || start < vma->vm_start ||
77292 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
77293 !(vm_flags & vma->vm_flags))
77294 return i ? : -EFAULT;
77295 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77296 start += PAGE_SIZE;
77297 nr_pages--;
77298 } while (nr_pages && start < vma->vm_end);
77299 - } while (nr_pages);
77300 + }
77301 return i;
77302 }
77303
77304 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
77305 page_add_file_rmap(page);
77306 set_pte_at(mm, addr, pte, mk_pte(page, prot));
77307
77308 +#ifdef CONFIG_PAX_SEGMEXEC
77309 + pax_mirror_file_pte(vma, addr, page, ptl);
77310 +#endif
77311 +
77312 retval = 0;
77313 pte_unmap_unlock(pte, ptl);
77314 return retval;
77315 @@ -1560,10 +1571,22 @@ out:
77316 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
77317 struct page *page)
77318 {
77319 +
77320 +#ifdef CONFIG_PAX_SEGMEXEC
77321 + struct vm_area_struct *vma_m;
77322 +#endif
77323 +
77324 if (addr < vma->vm_start || addr >= vma->vm_end)
77325 return -EFAULT;
77326 if (!page_count(page))
77327 return -EINVAL;
77328 +
77329 +#ifdef CONFIG_PAX_SEGMEXEC
77330 + vma_m = pax_find_mirror_vma(vma);
77331 + if (vma_m)
77332 + vma_m->vm_flags |= VM_INSERTPAGE;
77333 +#endif
77334 +
77335 vma->vm_flags |= VM_INSERTPAGE;
77336 return insert_page(vma, addr, page, vma->vm_page_prot);
77337 }
77338 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
77339 unsigned long pfn)
77340 {
77341 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
77342 + BUG_ON(vma->vm_mirror);
77343
77344 if (addr < vma->vm_start || addr >= vma->vm_end)
77345 return -EFAULT;
77346 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
77347 copy_user_highpage(dst, src, va, vma);
77348 }
77349
77350 +#ifdef CONFIG_PAX_SEGMEXEC
77351 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
77352 +{
77353 + struct mm_struct *mm = vma->vm_mm;
77354 + spinlock_t *ptl;
77355 + pte_t *pte, entry;
77356 +
77357 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
77358 + entry = *pte;
77359 + if (!pte_present(entry)) {
77360 + if (!pte_none(entry)) {
77361 + BUG_ON(pte_file(entry));
77362 + free_swap_and_cache(pte_to_swp_entry(entry));
77363 + pte_clear_not_present_full(mm, address, pte, 0);
77364 + }
77365 + } else {
77366 + struct page *page;
77367 +
77368 + flush_cache_page(vma, address, pte_pfn(entry));
77369 + entry = ptep_clear_flush(vma, address, pte);
77370 + BUG_ON(pte_dirty(entry));
77371 + page = vm_normal_page(vma, address, entry);
77372 + if (page) {
77373 + update_hiwater_rss(mm);
77374 + if (PageAnon(page))
77375 + dec_mm_counter(mm, anon_rss);
77376 + else
77377 + dec_mm_counter(mm, file_rss);
77378 + page_remove_rmap(page);
77379 + page_cache_release(page);
77380 + }
77381 + }
77382 + pte_unmap_unlock(pte, ptl);
77383 +}
77384 +
77385 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
77386 + *
77387 + * the ptl of the lower mapped page is held on entry and is not released on exit
77388 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
77389 + */
77390 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
77391 +{
77392 + struct mm_struct *mm = vma->vm_mm;
77393 + unsigned long address_m;
77394 + spinlock_t *ptl_m;
77395 + struct vm_area_struct *vma_m;
77396 + pmd_t *pmd_m;
77397 + pte_t *pte_m, entry_m;
77398 +
77399 + BUG_ON(!page_m || !PageAnon(page_m));
77400 +
77401 + vma_m = pax_find_mirror_vma(vma);
77402 + if (!vma_m)
77403 + return;
77404 +
77405 + BUG_ON(!PageLocked(page_m));
77406 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77407 + address_m = address + SEGMEXEC_TASK_SIZE;
77408 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77409 + pte_m = pte_offset_map_nested(pmd_m, address_m);
77410 + ptl_m = pte_lockptr(mm, pmd_m);
77411 + if (ptl != ptl_m) {
77412 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77413 + if (!pte_none(*pte_m))
77414 + goto out;
77415 + }
77416 +
77417 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
77418 + page_cache_get(page_m);
77419 + page_add_anon_rmap(page_m, vma_m, address_m);
77420 + inc_mm_counter(mm, anon_rss);
77421 + set_pte_at(mm, address_m, pte_m, entry_m);
77422 + update_mmu_cache(vma_m, address_m, entry_m);
77423 +out:
77424 + if (ptl != ptl_m)
77425 + spin_unlock(ptl_m);
77426 + pte_unmap_nested(pte_m);
77427 + unlock_page(page_m);
77428 +}
77429 +
77430 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
77431 +{
77432 + struct mm_struct *mm = vma->vm_mm;
77433 + unsigned long address_m;
77434 + spinlock_t *ptl_m;
77435 + struct vm_area_struct *vma_m;
77436 + pmd_t *pmd_m;
77437 + pte_t *pte_m, entry_m;
77438 +
77439 + BUG_ON(!page_m || PageAnon(page_m));
77440 +
77441 + vma_m = pax_find_mirror_vma(vma);
77442 + if (!vma_m)
77443 + return;
77444 +
77445 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77446 + address_m = address + SEGMEXEC_TASK_SIZE;
77447 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77448 + pte_m = pte_offset_map_nested(pmd_m, address_m);
77449 + ptl_m = pte_lockptr(mm, pmd_m);
77450 + if (ptl != ptl_m) {
77451 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77452 + if (!pte_none(*pte_m))
77453 + goto out;
77454 + }
77455 +
77456 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
77457 + page_cache_get(page_m);
77458 + page_add_file_rmap(page_m);
77459 + inc_mm_counter(mm, file_rss);
77460 + set_pte_at(mm, address_m, pte_m, entry_m);
77461 + update_mmu_cache(vma_m, address_m, entry_m);
77462 +out:
77463 + if (ptl != ptl_m)
77464 + spin_unlock(ptl_m);
77465 + pte_unmap_nested(pte_m);
77466 +}
77467 +
77468 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
77469 +{
77470 + struct mm_struct *mm = vma->vm_mm;
77471 + unsigned long address_m;
77472 + spinlock_t *ptl_m;
77473 + struct vm_area_struct *vma_m;
77474 + pmd_t *pmd_m;
77475 + pte_t *pte_m, entry_m;
77476 +
77477 + vma_m = pax_find_mirror_vma(vma);
77478 + if (!vma_m)
77479 + return;
77480 +
77481 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77482 + address_m = address + SEGMEXEC_TASK_SIZE;
77483 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77484 + pte_m = pte_offset_map_nested(pmd_m, address_m);
77485 + ptl_m = pte_lockptr(mm, pmd_m);
77486 + if (ptl != ptl_m) {
77487 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77488 + if (!pte_none(*pte_m))
77489 + goto out;
77490 + }
77491 +
77492 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
77493 + set_pte_at(mm, address_m, pte_m, entry_m);
77494 +out:
77495 + if (ptl != ptl_m)
77496 + spin_unlock(ptl_m);
77497 + pte_unmap_nested(pte_m);
77498 +}
77499 +
77500 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
77501 +{
77502 + struct page *page_m;
77503 + pte_t entry;
77504 +
77505 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
77506 + goto out;
77507 +
77508 + entry = *pte;
77509 + page_m = vm_normal_page(vma, address, entry);
77510 + if (!page_m)
77511 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
77512 + else if (PageAnon(page_m)) {
77513 + if (pax_find_mirror_vma(vma)) {
77514 + pte_unmap_unlock(pte, ptl);
77515 + lock_page(page_m);
77516 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
77517 + if (pte_same(entry, *pte))
77518 + pax_mirror_anon_pte(vma, address, page_m, ptl);
77519 + else
77520 + unlock_page(page_m);
77521 + }
77522 + } else
77523 + pax_mirror_file_pte(vma, address, page_m, ptl);
77524 +
77525 +out:
77526 + pte_unmap_unlock(pte, ptl);
77527 +}
77528 +#endif
77529 +
77530 /*
77531 * This routine handles present pages, when users try to write
77532 * to a shared page. It is done by copying the page to a new address
77533 @@ -2156,6 +2360,12 @@ gotten:
77534 */
77535 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77536 if (likely(pte_same(*page_table, orig_pte))) {
77537 +
77538 +#ifdef CONFIG_PAX_SEGMEXEC
77539 + if (pax_find_mirror_vma(vma))
77540 + BUG_ON(!trylock_page(new_page));
77541 +#endif
77542 +
77543 if (old_page) {
77544 if (!PageAnon(old_page)) {
77545 dec_mm_counter(mm, file_rss);
77546 @@ -2207,6 +2417,10 @@ gotten:
77547 page_remove_rmap(old_page);
77548 }
77549
77550 +#ifdef CONFIG_PAX_SEGMEXEC
77551 + pax_mirror_anon_pte(vma, address, new_page, ptl);
77552 +#endif
77553 +
77554 /* Free the old page.. */
77555 new_page = old_page;
77556 ret |= VM_FAULT_WRITE;
77557 @@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77558 swap_free(entry);
77559 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
77560 try_to_free_swap(page);
77561 +
77562 +#ifdef CONFIG_PAX_SEGMEXEC
77563 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
77564 +#endif
77565 +
77566 unlock_page(page);
77567
77568 if (flags & FAULT_FLAG_WRITE) {
77569 @@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77570
77571 /* No need to invalidate - it was non-present before */
77572 update_mmu_cache(vma, address, pte);
77573 +
77574 +#ifdef CONFIG_PAX_SEGMEXEC
77575 + pax_mirror_anon_pte(vma, address, page, ptl);
77576 +#endif
77577 +
77578 unlock:
77579 pte_unmap_unlock(page_table, ptl);
77580 out:
77581 @@ -2632,40 +2856,6 @@ out_release:
77582 }
77583
77584 /*
77585 - * This is like a special single-page "expand_{down|up}wards()",
77586 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
77587 - * doesn't hit another vma.
77588 - */
77589 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
77590 -{
77591 - address &= PAGE_MASK;
77592 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
77593 - struct vm_area_struct *prev = vma->vm_prev;
77594 -
77595 - /*
77596 - * Is there a mapping abutting this one below?
77597 - *
77598 - * That's only ok if it's the same stack mapping
77599 - * that has gotten split..
77600 - */
77601 - if (prev && prev->vm_end == address)
77602 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
77603 -
77604 - expand_stack(vma, address - PAGE_SIZE);
77605 - }
77606 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
77607 - struct vm_area_struct *next = vma->vm_next;
77608 -
77609 - /* As VM_GROWSDOWN but s/below/above/ */
77610 - if (next && next->vm_start == address + PAGE_SIZE)
77611 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
77612 -
77613 - expand_upwards(vma, address + PAGE_SIZE);
77614 - }
77615 - return 0;
77616 -}
77617 -
77618 -/*
77619 * We enter with non-exclusive mmap_sem (to exclude vma changes,
77620 * but allow concurrent faults), and pte mapped but not yet locked.
77621 * We return with mmap_sem still held, but pte unmapped and unlocked.
77622 @@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77623 unsigned long address, pte_t *page_table, pmd_t *pmd,
77624 unsigned int flags)
77625 {
77626 - struct page *page;
77627 + struct page *page = NULL;
77628 spinlock_t *ptl;
77629 pte_t entry;
77630
77631 - pte_unmap(page_table);
77632 -
77633 - /* Check if we need to add a guard page to the stack */
77634 - if (check_stack_guard_page(vma, address) < 0)
77635 - return VM_FAULT_SIGBUS;
77636 -
77637 - /* Use the zero-page for reads */
77638 if (!(flags & FAULT_FLAG_WRITE)) {
77639 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
77640 vma->vm_page_prot));
77641 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77642 + ptl = pte_lockptr(mm, pmd);
77643 + spin_lock(ptl);
77644 if (!pte_none(*page_table))
77645 goto unlock;
77646 goto setpte;
77647 }
77648
77649 /* Allocate our own private page. */
77650 + pte_unmap(page_table);
77651 +
77652 if (unlikely(anon_vma_prepare(vma)))
77653 goto oom;
77654 page = alloc_zeroed_user_highpage_movable(vma, address);
77655 @@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77656 if (!pte_none(*page_table))
77657 goto release;
77658
77659 +#ifdef CONFIG_PAX_SEGMEXEC
77660 + if (pax_find_mirror_vma(vma))
77661 + BUG_ON(!trylock_page(page));
77662 +#endif
77663 +
77664 inc_mm_counter(mm, anon_rss);
77665 page_add_new_anon_rmap(page, vma, address);
77666 setpte:
77667 @@ -2720,6 +2911,12 @@ setpte:
77668
77669 /* No need to invalidate - it was non-present before */
77670 update_mmu_cache(vma, address, entry);
77671 +
77672 +#ifdef CONFIG_PAX_SEGMEXEC
77673 + if (page)
77674 + pax_mirror_anon_pte(vma, address, page, ptl);
77675 +#endif
77676 +
77677 unlock:
77678 pte_unmap_unlock(page_table, ptl);
77679 return 0;
77680 @@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77681 */
77682 /* Only go through if we didn't race with anybody else... */
77683 if (likely(pte_same(*page_table, orig_pte))) {
77684 +
77685 +#ifdef CONFIG_PAX_SEGMEXEC
77686 + if (anon && pax_find_mirror_vma(vma))
77687 + BUG_ON(!trylock_page(page));
77688 +#endif
77689 +
77690 flush_icache_page(vma, page);
77691 entry = mk_pte(page, vma->vm_page_prot);
77692 if (flags & FAULT_FLAG_WRITE)
77693 @@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77694
77695 /* no need to invalidate: a not-present page won't be cached */
77696 update_mmu_cache(vma, address, entry);
77697 +
77698 +#ifdef CONFIG_PAX_SEGMEXEC
77699 + if (anon)
77700 + pax_mirror_anon_pte(vma, address, page, ptl);
77701 + else
77702 + pax_mirror_file_pte(vma, address, page, ptl);
77703 +#endif
77704 +
77705 } else {
77706 if (charged)
77707 mem_cgroup_uncharge_page(page);
77708 @@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
77709 if (flags & FAULT_FLAG_WRITE)
77710 flush_tlb_page(vma, address);
77711 }
77712 +
77713 +#ifdef CONFIG_PAX_SEGMEXEC
77714 + pax_mirror_pte(vma, address, pte, pmd, ptl);
77715 + return 0;
77716 +#endif
77717 +
77718 unlock:
77719 pte_unmap_unlock(pte, ptl);
77720 return 0;
77721 @@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77722 pmd_t *pmd;
77723 pte_t *pte;
77724
77725 +#ifdef CONFIG_PAX_SEGMEXEC
77726 + struct vm_area_struct *vma_m;
77727 +#endif
77728 +
77729 __set_current_state(TASK_RUNNING);
77730
77731 count_vm_event(PGFAULT);
77732 @@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77733 if (unlikely(is_vm_hugetlb_page(vma)))
77734 return hugetlb_fault(mm, vma, address, flags);
77735
77736 +#ifdef CONFIG_PAX_SEGMEXEC
77737 + vma_m = pax_find_mirror_vma(vma);
77738 + if (vma_m) {
77739 + unsigned long address_m;
77740 + pgd_t *pgd_m;
77741 + pud_t *pud_m;
77742 + pmd_t *pmd_m;
77743 +
77744 + if (vma->vm_start > vma_m->vm_start) {
77745 + address_m = address;
77746 + address -= SEGMEXEC_TASK_SIZE;
77747 + vma = vma_m;
77748 + } else
77749 + address_m = address + SEGMEXEC_TASK_SIZE;
77750 +
77751 + pgd_m = pgd_offset(mm, address_m);
77752 + pud_m = pud_alloc(mm, pgd_m, address_m);
77753 + if (!pud_m)
77754 + return VM_FAULT_OOM;
77755 + pmd_m = pmd_alloc(mm, pud_m, address_m);
77756 + if (!pmd_m)
77757 + return VM_FAULT_OOM;
77758 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
77759 + return VM_FAULT_OOM;
77760 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
77761 + }
77762 +#endif
77763 +
77764 pgd = pgd_offset(mm, address);
77765 pud = pud_alloc(mm, pgd, address);
77766 if (!pud)
77767 @@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
77768 gate_vma.vm_start = FIXADDR_USER_START;
77769 gate_vma.vm_end = FIXADDR_USER_END;
77770 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
77771 - gate_vma.vm_page_prot = __P101;
77772 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
77773 /*
77774 * Make sure the vDSO gets into every core dump.
77775 * Dumping its contents makes post-mortem fully interpretable later
77776 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
77777 index 3c6e3e2..b1ddbb8 100644
77778 --- a/mm/mempolicy.c
77779 +++ b/mm/mempolicy.c
77780 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
77781 struct vm_area_struct *next;
77782 int err;
77783
77784 +#ifdef CONFIG_PAX_SEGMEXEC
77785 + struct vm_area_struct *vma_m;
77786 +#endif
77787 +
77788 err = 0;
77789 for (; vma && vma->vm_start < end; vma = next) {
77790 next = vma->vm_next;
77791 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
77792 err = policy_vma(vma, new);
77793 if (err)
77794 break;
77795 +
77796 +#ifdef CONFIG_PAX_SEGMEXEC
77797 + vma_m = pax_find_mirror_vma(vma);
77798 + if (vma_m) {
77799 + err = policy_vma(vma_m, new);
77800 + if (err)
77801 + break;
77802 + }
77803 +#endif
77804 +
77805 }
77806 return err;
77807 }
77808 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
77809
77810 if (end < start)
77811 return -EINVAL;
77812 +
77813 +#ifdef CONFIG_PAX_SEGMEXEC
77814 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
77815 + if (end > SEGMEXEC_TASK_SIZE)
77816 + return -EINVAL;
77817 + } else
77818 +#endif
77819 +
77820 + if (end > TASK_SIZE)
77821 + return -EINVAL;
77822 +
77823 if (end == start)
77824 return 0;
77825
77826 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
77827 if (!mm)
77828 return -EINVAL;
77829
77830 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77831 + if (mm != current->mm &&
77832 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
77833 + err = -EPERM;
77834 + goto out;
77835 + }
77836 +#endif
77837 +
77838 /*
77839 * Check if this process has the right to modify the specified
77840 * process. The right exists if the process has administrative
77841 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
77842 rcu_read_lock();
77843 tcred = __task_cred(task);
77844 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
77845 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
77846 - !capable(CAP_SYS_NICE)) {
77847 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
77848 rcu_read_unlock();
77849 err = -EPERM;
77850 goto out;
77851 @@ -2367,6 +2399,12 @@ static inline void check_huge_range(struct vm_area_struct *vma,
77852 }
77853 #endif
77854
77855 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77856 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
77857 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
77858 + _mm->pax_flags & MF_PAX_SEGMEXEC))
77859 +#endif
77860 +
77861 /*
77862 * Display pages allocated per node and memory policy via /proc.
77863 */
77864 @@ -2381,6 +2419,13 @@ int show_numa_map(struct seq_file *m, void *v)
77865 int n;
77866 char buffer[50];
77867
77868 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77869 + if (current->exec_id != m->exec_id) {
77870 + gr_log_badprocpid("numa_maps");
77871 + return 0;
77872 + }
77873 +#endif
77874 +
77875 if (!mm)
77876 return 0;
77877
77878 @@ -2392,11 +2437,15 @@ int show_numa_map(struct seq_file *m, void *v)
77879 mpol_to_str(buffer, sizeof(buffer), pol, 0);
77880 mpol_cond_put(pol);
77881
77882 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77883 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
77884 +#else
77885 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
77886 +#endif
77887
77888 if (file) {
77889 seq_printf(m, " file=");
77890 - seq_path(m, &file->f_path, "\n\t= ");
77891 + seq_path(m, &file->f_path, "\n\t\\= ");
77892 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
77893 seq_printf(m, " heap");
77894 } else if (vma->vm_start <= mm->start_stack &&
77895 diff --git a/mm/migrate.c b/mm/migrate.c
77896 index aaca868..2ebecdc 100644
77897 --- a/mm/migrate.c
77898 +++ b/mm/migrate.c
77899 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
77900 unsigned long chunk_start;
77901 int err;
77902
77903 + pax_track_stack();
77904 +
77905 task_nodes = cpuset_mems_allowed(task);
77906
77907 err = -ENOMEM;
77908 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
77909 if (!mm)
77910 return -EINVAL;
77911
77912 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77913 + if (mm != current->mm &&
77914 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
77915 + err = -EPERM;
77916 + goto out;
77917 + }
77918 +#endif
77919 +
77920 /*
77921 * Check if this process has the right to modify the specified
77922 * process. The right exists if the process has administrative
77923 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
77924 rcu_read_lock();
77925 tcred = __task_cred(task);
77926 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
77927 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
77928 - !capable(CAP_SYS_NICE)) {
77929 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
77930 rcu_read_unlock();
77931 err = -EPERM;
77932 goto out;
77933 diff --git a/mm/mlock.c b/mm/mlock.c
77934 index 2d846cf..98134d2 100644
77935 --- a/mm/mlock.c
77936 +++ b/mm/mlock.c
77937 @@ -13,6 +13,7 @@
77938 #include <linux/pagemap.h>
77939 #include <linux/mempolicy.h>
77940 #include <linux/syscalls.h>
77941 +#include <linux/security.h>
77942 #include <linux/sched.h>
77943 #include <linux/module.h>
77944 #include <linux/rmap.h>
77945 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
77946 }
77947 }
77948
77949 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
77950 -{
77951 - return (vma->vm_flags & VM_GROWSDOWN) &&
77952 - (vma->vm_start == addr) &&
77953 - !vma_stack_continue(vma->vm_prev, addr);
77954 -}
77955 -
77956 /**
77957 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
77958 * @vma: target vma
77959 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
77960 if (vma->vm_flags & VM_WRITE)
77961 gup_flags |= FOLL_WRITE;
77962
77963 - /* We don't try to access the guard page of a stack vma */
77964 - if (stack_guard_page(vma, start)) {
77965 - addr += PAGE_SIZE;
77966 - nr_pages--;
77967 - }
77968 -
77969 while (nr_pages > 0) {
77970 int i;
77971
77972 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
77973 {
77974 unsigned long nstart, end, tmp;
77975 struct vm_area_struct * vma, * prev;
77976 - int error;
77977 + int error = -EINVAL;
77978
77979 len = PAGE_ALIGN(len);
77980 end = start + len;
77981 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
77982 return -EINVAL;
77983 if (end == start)
77984 return 0;
77985 + if (end > TASK_SIZE)
77986 + return -EINVAL;
77987 +
77988 vma = find_vma_prev(current->mm, start, &prev);
77989 if (!vma || vma->vm_start > start)
77990 return -ENOMEM;
77991 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
77992 for (nstart = start ; ; ) {
77993 unsigned int newflags;
77994
77995 +#ifdef CONFIG_PAX_SEGMEXEC
77996 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
77997 + break;
77998 +#endif
77999 +
78000 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
78001
78002 newflags = vma->vm_flags | VM_LOCKED;
78003 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
78004 lock_limit >>= PAGE_SHIFT;
78005
78006 /* check against resource limits */
78007 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
78008 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
78009 error = do_mlock(start, len, 1);
78010 up_write(&current->mm->mmap_sem);
78011 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
78012 static int do_mlockall(int flags)
78013 {
78014 struct vm_area_struct * vma, * prev = NULL;
78015 - unsigned int def_flags = 0;
78016
78017 if (flags & MCL_FUTURE)
78018 - def_flags = VM_LOCKED;
78019 - current->mm->def_flags = def_flags;
78020 + current->mm->def_flags |= VM_LOCKED;
78021 + else
78022 + current->mm->def_flags &= ~VM_LOCKED;
78023 if (flags == MCL_FUTURE)
78024 goto out;
78025
78026 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
78027 - unsigned int newflags;
78028 + unsigned long newflags;
78029
78030 +#ifdef CONFIG_PAX_SEGMEXEC
78031 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
78032 + break;
78033 +#endif
78034 +
78035 + BUG_ON(vma->vm_end > TASK_SIZE);
78036 newflags = vma->vm_flags | VM_LOCKED;
78037 if (!(flags & MCL_CURRENT))
78038 newflags &= ~VM_LOCKED;
78039 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
78040 lock_limit >>= PAGE_SHIFT;
78041
78042 ret = -ENOMEM;
78043 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
78044 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
78045 capable(CAP_IPC_LOCK))
78046 ret = do_mlockall(flags);
78047 diff --git a/mm/mmap.c b/mm/mmap.c
78048 index 4b80cbf..cd3731c 100644
78049 --- a/mm/mmap.c
78050 +++ b/mm/mmap.c
78051 @@ -45,6 +45,16 @@
78052 #define arch_rebalance_pgtables(addr, len) (addr)
78053 #endif
78054
78055 +static inline void verify_mm_writelocked(struct mm_struct *mm)
78056 +{
78057 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
78058 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
78059 + up_read(&mm->mmap_sem);
78060 + BUG();
78061 + }
78062 +#endif
78063 +}
78064 +
78065 static void unmap_region(struct mm_struct *mm,
78066 struct vm_area_struct *vma, struct vm_area_struct *prev,
78067 unsigned long start, unsigned long end);
78068 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
78069 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
78070 *
78071 */
78072 -pgprot_t protection_map[16] = {
78073 +pgprot_t protection_map[16] __read_only = {
78074 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
78075 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
78076 };
78077
78078 pgprot_t vm_get_page_prot(unsigned long vm_flags)
78079 {
78080 - return __pgprot(pgprot_val(protection_map[vm_flags &
78081 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
78082 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
78083 pgprot_val(arch_vm_get_page_prot(vm_flags)));
78084 +
78085 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78086 + if (!nx_enabled &&
78087 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
78088 + (vm_flags & (VM_READ | VM_WRITE)))
78089 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
78090 +#endif
78091 +
78092 + return prot;
78093 }
78094 EXPORT_SYMBOL(vm_get_page_prot);
78095
78096 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
78097 int sysctl_overcommit_ratio = 50; /* default is 50% */
78098 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
78099 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
78100 struct percpu_counter vm_committed_as;
78101
78102 /*
78103 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
78104 struct vm_area_struct *next = vma->vm_next;
78105
78106 might_sleep();
78107 + BUG_ON(vma->vm_mirror);
78108 if (vma->vm_ops && vma->vm_ops->close)
78109 vma->vm_ops->close(vma);
78110 if (vma->vm_file) {
78111 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
78112 * not page aligned -Ram Gupta
78113 */
78114 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
78115 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
78116 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
78117 (mm->end_data - mm->start_data) > rlim)
78118 goto out;
78119 @@ -704,6 +726,12 @@ static int
78120 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
78121 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
78122 {
78123 +
78124 +#ifdef CONFIG_PAX_SEGMEXEC
78125 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
78126 + return 0;
78127 +#endif
78128 +
78129 if (is_mergeable_vma(vma, file, vm_flags) &&
78130 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
78131 if (vma->vm_pgoff == vm_pgoff)
78132 @@ -723,6 +751,12 @@ static int
78133 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
78134 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
78135 {
78136 +
78137 +#ifdef CONFIG_PAX_SEGMEXEC
78138 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
78139 + return 0;
78140 +#endif
78141 +
78142 if (is_mergeable_vma(vma, file, vm_flags) &&
78143 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
78144 pgoff_t vm_pglen;
78145 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
78146 struct vm_area_struct *vma_merge(struct mm_struct *mm,
78147 struct vm_area_struct *prev, unsigned long addr,
78148 unsigned long end, unsigned long vm_flags,
78149 - struct anon_vma *anon_vma, struct file *file,
78150 + struct anon_vma *anon_vma, struct file *file,
78151 pgoff_t pgoff, struct mempolicy *policy)
78152 {
78153 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
78154 struct vm_area_struct *area, *next;
78155
78156 +#ifdef CONFIG_PAX_SEGMEXEC
78157 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
78158 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
78159 +
78160 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
78161 +#endif
78162 +
78163 /*
78164 * We later require that vma->vm_flags == vm_flags,
78165 * so this tests vma->vm_flags & VM_SPECIAL, too.
78166 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
78167 if (next && next->vm_end == end) /* cases 6, 7, 8 */
78168 next = next->vm_next;
78169
78170 +#ifdef CONFIG_PAX_SEGMEXEC
78171 + if (prev)
78172 + prev_m = pax_find_mirror_vma(prev);
78173 + if (area)
78174 + area_m = pax_find_mirror_vma(area);
78175 + if (next)
78176 + next_m = pax_find_mirror_vma(next);
78177 +#endif
78178 +
78179 /*
78180 * Can it merge with the predecessor?
78181 */
78182 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
78183 /* cases 1, 6 */
78184 vma_adjust(prev, prev->vm_start,
78185 next->vm_end, prev->vm_pgoff, NULL);
78186 - } else /* cases 2, 5, 7 */
78187 +
78188 +#ifdef CONFIG_PAX_SEGMEXEC
78189 + if (prev_m)
78190 + vma_adjust(prev_m, prev_m->vm_start,
78191 + next_m->vm_end, prev_m->vm_pgoff, NULL);
78192 +#endif
78193 +
78194 + } else { /* cases 2, 5, 7 */
78195 vma_adjust(prev, prev->vm_start,
78196 end, prev->vm_pgoff, NULL);
78197 +
78198 +#ifdef CONFIG_PAX_SEGMEXEC
78199 + if (prev_m)
78200 + vma_adjust(prev_m, prev_m->vm_start,
78201 + end_m, prev_m->vm_pgoff, NULL);
78202 +#endif
78203 +
78204 + }
78205 return prev;
78206 }
78207
78208 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
78209 mpol_equal(policy, vma_policy(next)) &&
78210 can_vma_merge_before(next, vm_flags,
78211 anon_vma, file, pgoff+pglen)) {
78212 - if (prev && addr < prev->vm_end) /* case 4 */
78213 + if (prev && addr < prev->vm_end) { /* case 4 */
78214 vma_adjust(prev, prev->vm_start,
78215 addr, prev->vm_pgoff, NULL);
78216 - else /* cases 3, 8 */
78217 +
78218 +#ifdef CONFIG_PAX_SEGMEXEC
78219 + if (prev_m)
78220 + vma_adjust(prev_m, prev_m->vm_start,
78221 + addr_m, prev_m->vm_pgoff, NULL);
78222 +#endif
78223 +
78224 + } else { /* cases 3, 8 */
78225 vma_adjust(area, addr, next->vm_end,
78226 next->vm_pgoff - pglen, NULL);
78227 +
78228 +#ifdef CONFIG_PAX_SEGMEXEC
78229 + if (area_m)
78230 + vma_adjust(area_m, addr_m, next_m->vm_end,
78231 + next_m->vm_pgoff - pglen, NULL);
78232 +#endif
78233 +
78234 + }
78235 return area;
78236 }
78237
78238 @@ -898,14 +978,11 @@ none:
78239 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
78240 struct file *file, long pages)
78241 {
78242 - const unsigned long stack_flags
78243 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
78244 -
78245 if (file) {
78246 mm->shared_vm += pages;
78247 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
78248 mm->exec_vm += pages;
78249 - } else if (flags & stack_flags)
78250 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
78251 mm->stack_vm += pages;
78252 if (flags & (VM_RESERVED|VM_IO))
78253 mm->reserved_vm += pages;
78254 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78255 * (the exception is when the underlying filesystem is noexec
78256 * mounted, in which case we dont add PROT_EXEC.)
78257 */
78258 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
78259 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
78260 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
78261 prot |= PROT_EXEC;
78262
78263 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78264 /* Obtain the address to map to. we verify (or select) it and ensure
78265 * that it represents a valid section of the address space.
78266 */
78267 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
78268 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
78269 if (addr & ~PAGE_MASK)
78270 return addr;
78271
78272 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78273 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
78274 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
78275
78276 +#ifdef CONFIG_PAX_MPROTECT
78277 + if (mm->pax_flags & MF_PAX_MPROTECT) {
78278 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
78279 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
78280 + gr_log_rwxmmap(file);
78281 +
78282 +#ifdef CONFIG_PAX_EMUPLT
78283 + vm_flags &= ~VM_EXEC;
78284 +#else
78285 + return -EPERM;
78286 +#endif
78287 +
78288 + }
78289 +
78290 + if (!(vm_flags & VM_EXEC))
78291 + vm_flags &= ~VM_MAYEXEC;
78292 +#else
78293 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
78294 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
78295 +#endif
78296 + else
78297 + vm_flags &= ~VM_MAYWRITE;
78298 + }
78299 +#endif
78300 +
78301 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78302 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
78303 + vm_flags &= ~VM_PAGEEXEC;
78304 +#endif
78305 +
78306 if (flags & MAP_LOCKED)
78307 if (!can_do_mlock())
78308 return -EPERM;
78309 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78310 locked += mm->locked_vm;
78311 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
78312 lock_limit >>= PAGE_SHIFT;
78313 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
78314 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
78315 return -EAGAIN;
78316 }
78317 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78318 if (error)
78319 return error;
78320
78321 + if (!gr_acl_handle_mmap(file, prot))
78322 + return -EACCES;
78323 +
78324 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
78325 }
78326 EXPORT_SYMBOL(do_mmap_pgoff);
78327 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
78328 */
78329 int vma_wants_writenotify(struct vm_area_struct *vma)
78330 {
78331 - unsigned int vm_flags = vma->vm_flags;
78332 + unsigned long vm_flags = vma->vm_flags;
78333
78334 /* If it was private or non-writable, the write bit is already clear */
78335 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
78336 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
78337 return 0;
78338
78339 /* The backer wishes to know when pages are first written to? */
78340 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
78341 unsigned long charged = 0;
78342 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
78343
78344 +#ifdef CONFIG_PAX_SEGMEXEC
78345 + struct vm_area_struct *vma_m = NULL;
78346 +#endif
78347 +
78348 + /*
78349 + * mm->mmap_sem is required to protect against another thread
78350 + * changing the mappings in case we sleep.
78351 + */
78352 + verify_mm_writelocked(mm);
78353 +
78354 /* Clear old maps */
78355 error = -ENOMEM;
78356 -munmap_back:
78357 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78358 if (vma && vma->vm_start < addr + len) {
78359 if (do_munmap(mm, addr, len))
78360 return -ENOMEM;
78361 - goto munmap_back;
78362 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78363 + BUG_ON(vma && vma->vm_start < addr + len);
78364 }
78365
78366 /* Check against address space limit. */
78367 @@ -1173,6 +1294,16 @@ munmap_back:
78368 goto unacct_error;
78369 }
78370
78371 +#ifdef CONFIG_PAX_SEGMEXEC
78372 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
78373 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78374 + if (!vma_m) {
78375 + error = -ENOMEM;
78376 + goto free_vma;
78377 + }
78378 + }
78379 +#endif
78380 +
78381 vma->vm_mm = mm;
78382 vma->vm_start = addr;
78383 vma->vm_end = addr + len;
78384 @@ -1195,6 +1326,19 @@ munmap_back:
78385 error = file->f_op->mmap(file, vma);
78386 if (error)
78387 goto unmap_and_free_vma;
78388 +
78389 +#ifdef CONFIG_PAX_SEGMEXEC
78390 + if (vma_m && (vm_flags & VM_EXECUTABLE))
78391 + added_exe_file_vma(mm);
78392 +#endif
78393 +
78394 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78395 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
78396 + vma->vm_flags |= VM_PAGEEXEC;
78397 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
78398 + }
78399 +#endif
78400 +
78401 if (vm_flags & VM_EXECUTABLE)
78402 added_exe_file_vma(mm);
78403
78404 @@ -1218,6 +1362,11 @@ munmap_back:
78405 vma_link(mm, vma, prev, rb_link, rb_parent);
78406 file = vma->vm_file;
78407
78408 +#ifdef CONFIG_PAX_SEGMEXEC
78409 + if (vma_m)
78410 + pax_mirror_vma(vma_m, vma);
78411 +#endif
78412 +
78413 /* Once vma denies write, undo our temporary denial count */
78414 if (correct_wcount)
78415 atomic_inc(&inode->i_writecount);
78416 @@ -1226,6 +1375,7 @@ out:
78417
78418 mm->total_vm += len >> PAGE_SHIFT;
78419 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
78420 + track_exec_limit(mm, addr, addr + len, vm_flags);
78421 if (vm_flags & VM_LOCKED) {
78422 /*
78423 * makes pages present; downgrades, drops, reacquires mmap_sem
78424 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
78425 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
78426 charged = 0;
78427 free_vma:
78428 +
78429 +#ifdef CONFIG_PAX_SEGMEXEC
78430 + if (vma_m)
78431 + kmem_cache_free(vm_area_cachep, vma_m);
78432 +#endif
78433 +
78434 kmem_cache_free(vm_area_cachep, vma);
78435 unacct_error:
78436 if (charged)
78437 @@ -1255,6 +1411,44 @@ unacct_error:
78438 return error;
78439 }
78440
78441 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
78442 +{
78443 + if (!vma) {
78444 +#ifdef CONFIG_STACK_GROWSUP
78445 + if (addr > sysctl_heap_stack_gap)
78446 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
78447 + else
78448 + vma = find_vma(current->mm, 0);
78449 + if (vma && (vma->vm_flags & VM_GROWSUP))
78450 + return false;
78451 +#endif
78452 + return true;
78453 + }
78454 +
78455 + if (addr + len > vma->vm_start)
78456 + return false;
78457 +
78458 + if (vma->vm_flags & VM_GROWSDOWN)
78459 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
78460 +#ifdef CONFIG_STACK_GROWSUP
78461 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
78462 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
78463 +#endif
78464 +
78465 + return true;
78466 +}
78467 +
78468 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
78469 +{
78470 + if (vma->vm_start < len)
78471 + return -ENOMEM;
78472 + if (!(vma->vm_flags & VM_GROWSDOWN))
78473 + return vma->vm_start - len;
78474 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
78475 + return vma->vm_start - len - sysctl_heap_stack_gap;
78476 + return -ENOMEM;
78477 +}
78478 +
78479 /* Get an address range which is currently unmapped.
78480 * For shmat() with addr=0.
78481 *
78482 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
78483 if (flags & MAP_FIXED)
78484 return addr;
78485
78486 +#ifdef CONFIG_PAX_RANDMMAP
78487 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
78488 +#endif
78489 +
78490 if (addr) {
78491 addr = PAGE_ALIGN(addr);
78492 - vma = find_vma(mm, addr);
78493 - if (TASK_SIZE - len >= addr &&
78494 - (!vma || addr + len <= vma->vm_start))
78495 - return addr;
78496 + if (TASK_SIZE - len >= addr) {
78497 + vma = find_vma(mm, addr);
78498 + if (check_heap_stack_gap(vma, addr, len))
78499 + return addr;
78500 + }
78501 }
78502 if (len > mm->cached_hole_size) {
78503 - start_addr = addr = mm->free_area_cache;
78504 + start_addr = addr = mm->free_area_cache;
78505 } else {
78506 - start_addr = addr = TASK_UNMAPPED_BASE;
78507 - mm->cached_hole_size = 0;
78508 + start_addr = addr = mm->mmap_base;
78509 + mm->cached_hole_size = 0;
78510 }
78511
78512 full_search:
78513 @@ -1303,34 +1502,40 @@ full_search:
78514 * Start a new search - just in case we missed
78515 * some holes.
78516 */
78517 - if (start_addr != TASK_UNMAPPED_BASE) {
78518 - addr = TASK_UNMAPPED_BASE;
78519 - start_addr = addr;
78520 + if (start_addr != mm->mmap_base) {
78521 + start_addr = addr = mm->mmap_base;
78522 mm->cached_hole_size = 0;
78523 goto full_search;
78524 }
78525 return -ENOMEM;
78526 }
78527 - if (!vma || addr + len <= vma->vm_start) {
78528 - /*
78529 - * Remember the place where we stopped the search:
78530 - */
78531 - mm->free_area_cache = addr + len;
78532 - return addr;
78533 - }
78534 + if (check_heap_stack_gap(vma, addr, len))
78535 + break;
78536 if (addr + mm->cached_hole_size < vma->vm_start)
78537 mm->cached_hole_size = vma->vm_start - addr;
78538 addr = vma->vm_end;
78539 }
78540 +
78541 + /*
78542 + * Remember the place where we stopped the search:
78543 + */
78544 + mm->free_area_cache = addr + len;
78545 + return addr;
78546 }
78547 #endif
78548
78549 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
78550 {
78551 +
78552 +#ifdef CONFIG_PAX_SEGMEXEC
78553 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
78554 + return;
78555 +#endif
78556 +
78557 /*
78558 * Is this a new hole at the lowest possible address?
78559 */
78560 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
78561 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
78562 mm->free_area_cache = addr;
78563 mm->cached_hole_size = ~0UL;
78564 }
78565 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78566 {
78567 struct vm_area_struct *vma;
78568 struct mm_struct *mm = current->mm;
78569 - unsigned long addr = addr0;
78570 + unsigned long base = mm->mmap_base, addr = addr0;
78571
78572 /* requested length too big for entire address space */
78573 if (len > TASK_SIZE)
78574 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78575 if (flags & MAP_FIXED)
78576 return addr;
78577
78578 +#ifdef CONFIG_PAX_RANDMMAP
78579 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
78580 +#endif
78581 +
78582 /* requesting a specific address */
78583 if (addr) {
78584 addr = PAGE_ALIGN(addr);
78585 - vma = find_vma(mm, addr);
78586 - if (TASK_SIZE - len >= addr &&
78587 - (!vma || addr + len <= vma->vm_start))
78588 - return addr;
78589 + if (TASK_SIZE - len >= addr) {
78590 + vma = find_vma(mm, addr);
78591 + if (check_heap_stack_gap(vma, addr, len))
78592 + return addr;
78593 + }
78594 }
78595
78596 /* check if free_area_cache is useful for us */
78597 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78598 /* make sure it can fit in the remaining address space */
78599 if (addr > len) {
78600 vma = find_vma(mm, addr-len);
78601 - if (!vma || addr <= vma->vm_start)
78602 + if (check_heap_stack_gap(vma, addr - len, len))
78603 /* remember the address as a hint for next time */
78604 return (mm->free_area_cache = addr-len);
78605 }
78606 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78607 * return with success:
78608 */
78609 vma = find_vma(mm, addr);
78610 - if (!vma || addr+len <= vma->vm_start)
78611 + if (check_heap_stack_gap(vma, addr, len))
78612 /* remember the address as a hint for next time */
78613 return (mm->free_area_cache = addr);
78614
78615 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78616 mm->cached_hole_size = vma->vm_start - addr;
78617
78618 /* try just below the current vma->vm_start */
78619 - addr = vma->vm_start-len;
78620 - } while (len < vma->vm_start);
78621 + addr = skip_heap_stack_gap(vma, len);
78622 + } while (!IS_ERR_VALUE(addr));
78623
78624 bottomup:
78625 /*
78626 @@ -1414,13 +1624,21 @@ bottomup:
78627 * can happen with large stack limits and large mmap()
78628 * allocations.
78629 */
78630 + mm->mmap_base = TASK_UNMAPPED_BASE;
78631 +
78632 +#ifdef CONFIG_PAX_RANDMMAP
78633 + if (mm->pax_flags & MF_PAX_RANDMMAP)
78634 + mm->mmap_base += mm->delta_mmap;
78635 +#endif
78636 +
78637 + mm->free_area_cache = mm->mmap_base;
78638 mm->cached_hole_size = ~0UL;
78639 - mm->free_area_cache = TASK_UNMAPPED_BASE;
78640 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
78641 /*
78642 * Restore the topdown base:
78643 */
78644 - mm->free_area_cache = mm->mmap_base;
78645 + mm->mmap_base = base;
78646 + mm->free_area_cache = base;
78647 mm->cached_hole_size = ~0UL;
78648
78649 return addr;
78650 @@ -1429,6 +1647,12 @@ bottomup:
78651
78652 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78653 {
78654 +
78655 +#ifdef CONFIG_PAX_SEGMEXEC
78656 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
78657 + return;
78658 +#endif
78659 +
78660 /*
78661 * Is this a new hole at the highest possible address?
78662 */
78663 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78664 mm->free_area_cache = addr;
78665
78666 /* dont allow allocations above current base */
78667 - if (mm->free_area_cache > mm->mmap_base)
78668 + if (mm->free_area_cache > mm->mmap_base) {
78669 mm->free_area_cache = mm->mmap_base;
78670 + mm->cached_hole_size = ~0UL;
78671 + }
78672 }
78673
78674 unsigned long
78675 @@ -1510,40 +1736,41 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
78676
78677 EXPORT_SYMBOL(find_vma);
78678
78679 -/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
78680 +/*
78681 + * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
78682 + * Note: pprev is set to NULL when return value is NULL.
78683 + */
78684 struct vm_area_struct *
78685 find_vma_prev(struct mm_struct *mm, unsigned long addr,
78686 struct vm_area_struct **pprev)
78687 {
78688 - struct vm_area_struct *vma = NULL, *prev = NULL;
78689 - struct rb_node *rb_node;
78690 - if (!mm)
78691 - goto out;
78692 + struct vm_area_struct *vma;
78693
78694 - /* Guard against addr being lower than the first VMA */
78695 - vma = mm->mmap;
78696 + vma = find_vma(mm, addr);
78697 + *pprev = vma ? vma->vm_prev : NULL;
78698 + return vma;
78699 +}
78700
78701 - /* Go through the RB tree quickly. */
78702 - rb_node = mm->mm_rb.rb_node;
78703 +#ifdef CONFIG_PAX_SEGMEXEC
78704 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
78705 +{
78706 + struct vm_area_struct *vma_m;
78707
78708 - while (rb_node) {
78709 - struct vm_area_struct *vma_tmp;
78710 - vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
78711 -
78712 - if (addr < vma_tmp->vm_end) {
78713 - rb_node = rb_node->rb_left;
78714 - } else {
78715 - prev = vma_tmp;
78716 - if (!prev->vm_next || (addr < prev->vm_next->vm_end))
78717 - break;
78718 - rb_node = rb_node->rb_right;
78719 - }
78720 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
78721 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
78722 + BUG_ON(vma->vm_mirror);
78723 + return NULL;
78724 }
78725 -
78726 -out:
78727 - *pprev = prev;
78728 - return prev ? prev->vm_next : vma;
78729 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
78730 + vma_m = vma->vm_mirror;
78731 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
78732 + BUG_ON(vma->vm_file != vma_m->vm_file);
78733 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
78734 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
78735 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
78736 + return vma_m;
78737 }
78738 +#endif
78739
78740 /*
78741 * Verify that the stack growth is acceptable and
78742 @@ -1561,6 +1788,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78743 return -ENOMEM;
78744
78745 /* Stack limit test */
78746 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
78747 if (size > rlim[RLIMIT_STACK].rlim_cur)
78748 return -ENOMEM;
78749
78750 @@ -1570,6 +1798,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78751 unsigned long limit;
78752 locked = mm->locked_vm + grow;
78753 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
78754 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
78755 if (locked > limit && !capable(CAP_IPC_LOCK))
78756 return -ENOMEM;
78757 }
78758 @@ -1600,37 +1829,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78759 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
78760 * vma is the last one with address > vma->vm_end. Have to extend vma.
78761 */
78762 +#ifndef CONFIG_IA64
78763 +static
78764 +#endif
78765 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
78766 {
78767 int error;
78768 + bool locknext;
78769
78770 if (!(vma->vm_flags & VM_GROWSUP))
78771 return -EFAULT;
78772
78773 + /* Also guard against wrapping around to address 0. */
78774 + if (address < PAGE_ALIGN(address+1))
78775 + address = PAGE_ALIGN(address+1);
78776 + else
78777 + return -ENOMEM;
78778 +
78779 /*
78780 * We must make sure the anon_vma is allocated
78781 * so that the anon_vma locking is not a noop.
78782 */
78783 if (unlikely(anon_vma_prepare(vma)))
78784 return -ENOMEM;
78785 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
78786 + if (locknext && anon_vma_prepare(vma->vm_next))
78787 + return -ENOMEM;
78788 anon_vma_lock(vma);
78789 + if (locknext)
78790 + anon_vma_lock(vma->vm_next);
78791
78792 /*
78793 * vma->vm_start/vm_end cannot change under us because the caller
78794 * is required to hold the mmap_sem in read mode. We need the
78795 - * anon_vma lock to serialize against concurrent expand_stacks.
78796 - * Also guard against wrapping around to address 0.
78797 + * anon_vma locks to serialize against concurrent expand_stacks
78798 + * and expand_upwards.
78799 */
78800 - if (address < PAGE_ALIGN(address+4))
78801 - address = PAGE_ALIGN(address+4);
78802 - else {
78803 - anon_vma_unlock(vma);
78804 - return -ENOMEM;
78805 - }
78806 error = 0;
78807
78808 /* Somebody else might have raced and expanded it already */
78809 - if (address > vma->vm_end) {
78810 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
78811 + error = -ENOMEM;
78812 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
78813 unsigned long size, grow;
78814
78815 size = address - vma->vm_start;
78816 @@ -1643,6 +1883,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
78817 vma->vm_end = address;
78818 }
78819 }
78820 + if (locknext)
78821 + anon_vma_unlock(vma->vm_next);
78822 anon_vma_unlock(vma);
78823 return error;
78824 }
78825 @@ -1655,6 +1897,8 @@ static int expand_downwards(struct vm_area_struct *vma,
78826 unsigned long address)
78827 {
78828 int error;
78829 + bool lockprev = false;
78830 + struct vm_area_struct *prev;
78831
78832 /*
78833 * We must make sure the anon_vma is allocated
78834 @@ -1668,6 +1912,15 @@ static int expand_downwards(struct vm_area_struct *vma,
78835 if (error)
78836 return error;
78837
78838 + prev = vma->vm_prev;
78839 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
78840 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
78841 +#endif
78842 + if (lockprev && anon_vma_prepare(prev))
78843 + return -ENOMEM;
78844 + if (lockprev)
78845 + anon_vma_lock(prev);
78846 +
78847 anon_vma_lock(vma);
78848
78849 /*
78850 @@ -1677,9 +1930,17 @@ static int expand_downwards(struct vm_area_struct *vma,
78851 */
78852
78853 /* Somebody else might have raced and expanded it already */
78854 - if (address < vma->vm_start) {
78855 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
78856 + error = -ENOMEM;
78857 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
78858 unsigned long size, grow;
78859
78860 +#ifdef CONFIG_PAX_SEGMEXEC
78861 + struct vm_area_struct *vma_m;
78862 +
78863 + vma_m = pax_find_mirror_vma(vma);
78864 +#endif
78865 +
78866 size = vma->vm_end - address;
78867 grow = (vma->vm_start - address) >> PAGE_SHIFT;
78868
78869 @@ -1689,10 +1950,22 @@ static int expand_downwards(struct vm_area_struct *vma,
78870 if (!error) {
78871 vma->vm_start = address;
78872 vma->vm_pgoff -= grow;
78873 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
78874 +
78875 +#ifdef CONFIG_PAX_SEGMEXEC
78876 + if (vma_m) {
78877 + vma_m->vm_start -= grow << PAGE_SHIFT;
78878 + vma_m->vm_pgoff -= grow;
78879 + }
78880 +#endif
78881 +
78882 +
78883 }
78884 }
78885 }
78886 anon_vma_unlock(vma);
78887 + if (lockprev)
78888 + anon_vma_unlock(prev);
78889 return error;
78890 }
78891
78892 @@ -1768,6 +2041,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
78893 do {
78894 long nrpages = vma_pages(vma);
78895
78896 +#ifdef CONFIG_PAX_SEGMEXEC
78897 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
78898 + vma = remove_vma(vma);
78899 + continue;
78900 + }
78901 +#endif
78902 +
78903 mm->total_vm -= nrpages;
78904 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
78905 vma = remove_vma(vma);
78906 @@ -1813,6 +2093,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
78907 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
78908 vma->vm_prev = NULL;
78909 do {
78910 +
78911 +#ifdef CONFIG_PAX_SEGMEXEC
78912 + if (vma->vm_mirror) {
78913 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
78914 + vma->vm_mirror->vm_mirror = NULL;
78915 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
78916 + vma->vm_mirror = NULL;
78917 + }
78918 +#endif
78919 +
78920 rb_erase(&vma->vm_rb, &mm->mm_rb);
78921 mm->map_count--;
78922 tail_vma = vma;
78923 @@ -1840,10 +2130,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78924 struct mempolicy *pol;
78925 struct vm_area_struct *new;
78926
78927 +#ifdef CONFIG_PAX_SEGMEXEC
78928 + struct vm_area_struct *vma_m, *new_m = NULL;
78929 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
78930 +#endif
78931 +
78932 if (is_vm_hugetlb_page(vma) && (addr &
78933 ~(huge_page_mask(hstate_vma(vma)))))
78934 return -EINVAL;
78935
78936 +#ifdef CONFIG_PAX_SEGMEXEC
78937 + vma_m = pax_find_mirror_vma(vma);
78938 +
78939 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
78940 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
78941 + if (mm->map_count >= sysctl_max_map_count-1)
78942 + return -ENOMEM;
78943 + } else
78944 +#endif
78945 +
78946 if (mm->map_count >= sysctl_max_map_count)
78947 return -ENOMEM;
78948
78949 @@ -1851,6 +2156,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78950 if (!new)
78951 return -ENOMEM;
78952
78953 +#ifdef CONFIG_PAX_SEGMEXEC
78954 + if (vma_m) {
78955 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
78956 + if (!new_m) {
78957 + kmem_cache_free(vm_area_cachep, new);
78958 + return -ENOMEM;
78959 + }
78960 + }
78961 +#endif
78962 +
78963 /* most fields are the same, copy all, and then fixup */
78964 *new = *vma;
78965
78966 @@ -1861,8 +2176,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78967 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
78968 }
78969
78970 +#ifdef CONFIG_PAX_SEGMEXEC
78971 + if (vma_m) {
78972 + *new_m = *vma_m;
78973 + new_m->vm_mirror = new;
78974 + new->vm_mirror = new_m;
78975 +
78976 + if (new_below)
78977 + new_m->vm_end = addr_m;
78978 + else {
78979 + new_m->vm_start = addr_m;
78980 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
78981 + }
78982 + }
78983 +#endif
78984 +
78985 pol = mpol_dup(vma_policy(vma));
78986 if (IS_ERR(pol)) {
78987 +
78988 +#ifdef CONFIG_PAX_SEGMEXEC
78989 + if (new_m)
78990 + kmem_cache_free(vm_area_cachep, new_m);
78991 +#endif
78992 +
78993 kmem_cache_free(vm_area_cachep, new);
78994 return PTR_ERR(pol);
78995 }
78996 @@ -1883,6 +2219,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78997 else
78998 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
78999
79000 +#ifdef CONFIG_PAX_SEGMEXEC
79001 + if (vma_m) {
79002 + mpol_get(pol);
79003 + vma_set_policy(new_m, pol);
79004 +
79005 + if (new_m->vm_file) {
79006 + get_file(new_m->vm_file);
79007 + if (vma_m->vm_flags & VM_EXECUTABLE)
79008 + added_exe_file_vma(mm);
79009 + }
79010 +
79011 + if (new_m->vm_ops && new_m->vm_ops->open)
79012 + new_m->vm_ops->open(new_m);
79013 +
79014 + if (new_below)
79015 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
79016 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
79017 + else
79018 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
79019 + }
79020 +#endif
79021 +
79022 return 0;
79023 }
79024
79025 @@ -1891,11 +2249,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79026 * work. This now handles partial unmappings.
79027 * Jeremy Fitzhardinge <jeremy@goop.org>
79028 */
79029 +#ifdef CONFIG_PAX_SEGMEXEC
79030 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79031 {
79032 + int ret = __do_munmap(mm, start, len);
79033 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
79034 + return ret;
79035 +
79036 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
79037 +}
79038 +
79039 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79040 +#else
79041 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79042 +#endif
79043 +{
79044 unsigned long end;
79045 struct vm_area_struct *vma, *prev, *last;
79046
79047 + /*
79048 + * mm->mmap_sem is required to protect against another thread
79049 + * changing the mappings in case we sleep.
79050 + */
79051 + verify_mm_writelocked(mm);
79052 +
79053 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
79054 return -EINVAL;
79055
79056 @@ -1959,6 +2336,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79057 /* Fix up all other VM information */
79058 remove_vma_list(mm, vma);
79059
79060 + track_exec_limit(mm, start, end, 0UL);
79061 +
79062 return 0;
79063 }
79064
79065 @@ -1971,22 +2350,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
79066
79067 profile_munmap(addr);
79068
79069 +#ifdef CONFIG_PAX_SEGMEXEC
79070 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
79071 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
79072 + return -EINVAL;
79073 +#endif
79074 +
79075 down_write(&mm->mmap_sem);
79076 ret = do_munmap(mm, addr, len);
79077 up_write(&mm->mmap_sem);
79078 return ret;
79079 }
79080
79081 -static inline void verify_mm_writelocked(struct mm_struct *mm)
79082 -{
79083 -#ifdef CONFIG_DEBUG_VM
79084 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
79085 - WARN_ON(1);
79086 - up_read(&mm->mmap_sem);
79087 - }
79088 -#endif
79089 -}
79090 -
79091 /*
79092 * this is really a simplified "do_mmap". it only handles
79093 * anonymous maps. eventually we may be able to do some
79094 @@ -2000,6 +2375,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79095 struct rb_node ** rb_link, * rb_parent;
79096 pgoff_t pgoff = addr >> PAGE_SHIFT;
79097 int error;
79098 + unsigned long charged;
79099
79100 len = PAGE_ALIGN(len);
79101 if (!len)
79102 @@ -2011,16 +2387,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79103
79104 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
79105
79106 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
79107 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
79108 + flags &= ~VM_EXEC;
79109 +
79110 +#ifdef CONFIG_PAX_MPROTECT
79111 + if (mm->pax_flags & MF_PAX_MPROTECT)
79112 + flags &= ~VM_MAYEXEC;
79113 +#endif
79114 +
79115 + }
79116 +#endif
79117 +
79118 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
79119 if (error & ~PAGE_MASK)
79120 return error;
79121
79122 + charged = len >> PAGE_SHIFT;
79123 +
79124 /*
79125 * mlock MCL_FUTURE?
79126 */
79127 if (mm->def_flags & VM_LOCKED) {
79128 unsigned long locked, lock_limit;
79129 - locked = len >> PAGE_SHIFT;
79130 + locked = charged;
79131 locked += mm->locked_vm;
79132 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
79133 lock_limit >>= PAGE_SHIFT;
79134 @@ -2037,22 +2427,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79135 /*
79136 * Clear old maps. this also does some error checking for us
79137 */
79138 - munmap_back:
79139 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
79140 if (vma && vma->vm_start < addr + len) {
79141 if (do_munmap(mm, addr, len))
79142 return -ENOMEM;
79143 - goto munmap_back;
79144 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
79145 + BUG_ON(vma && vma->vm_start < addr + len);
79146 }
79147
79148 /* Check against address space limits *after* clearing old maps... */
79149 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
79150 + if (!may_expand_vm(mm, charged))
79151 return -ENOMEM;
79152
79153 if (mm->map_count > sysctl_max_map_count)
79154 return -ENOMEM;
79155
79156 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
79157 + if (security_vm_enough_memory(charged))
79158 return -ENOMEM;
79159
79160 /* Can we just expand an old private anonymous mapping? */
79161 @@ -2066,7 +2456,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79162 */
79163 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79164 if (!vma) {
79165 - vm_unacct_memory(len >> PAGE_SHIFT);
79166 + vm_unacct_memory(charged);
79167 return -ENOMEM;
79168 }
79169
79170 @@ -2078,11 +2468,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79171 vma->vm_page_prot = vm_get_page_prot(flags);
79172 vma_link(mm, vma, prev, rb_link, rb_parent);
79173 out:
79174 - mm->total_vm += len >> PAGE_SHIFT;
79175 + mm->total_vm += charged;
79176 if (flags & VM_LOCKED) {
79177 if (!mlock_vma_pages_range(vma, addr, addr + len))
79178 - mm->locked_vm += (len >> PAGE_SHIFT);
79179 + mm->locked_vm += charged;
79180 }
79181 + track_exec_limit(mm, addr, addr + len, flags);
79182 return addr;
79183 }
79184
79185 @@ -2129,8 +2520,10 @@ void exit_mmap(struct mm_struct *mm)
79186 * Walk the list again, actually closing and freeing it,
79187 * with preemption enabled, without holding any MM locks.
79188 */
79189 - while (vma)
79190 + while (vma) {
79191 + vma->vm_mirror = NULL;
79192 vma = remove_vma(vma);
79193 + }
79194
79195 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
79196 }
79197 @@ -2144,6 +2537,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
79198 struct vm_area_struct * __vma, * prev;
79199 struct rb_node ** rb_link, * rb_parent;
79200
79201 +#ifdef CONFIG_PAX_SEGMEXEC
79202 + struct vm_area_struct *vma_m = NULL;
79203 +#endif
79204 +
79205 /*
79206 * The vm_pgoff of a purely anonymous vma should be irrelevant
79207 * until its first write fault, when page's anon_vma and index
79208 @@ -2166,7 +2563,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
79209 if ((vma->vm_flags & VM_ACCOUNT) &&
79210 security_vm_enough_memory_mm(mm, vma_pages(vma)))
79211 return -ENOMEM;
79212 +
79213 +#ifdef CONFIG_PAX_SEGMEXEC
79214 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
79215 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79216 + if (!vma_m)
79217 + return -ENOMEM;
79218 + }
79219 +#endif
79220 +
79221 vma_link(mm, vma, prev, rb_link, rb_parent);
79222 +
79223 +#ifdef CONFIG_PAX_SEGMEXEC
79224 + if (vma_m)
79225 + pax_mirror_vma(vma_m, vma);
79226 +#endif
79227 +
79228 return 0;
79229 }
79230
79231 @@ -2184,6 +2596,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
79232 struct rb_node **rb_link, *rb_parent;
79233 struct mempolicy *pol;
79234
79235 + BUG_ON(vma->vm_mirror);
79236 +
79237 /*
79238 * If anonymous vma has not yet been faulted, update new pgoff
79239 * to match new location, to increase its chance of merging.
79240 @@ -2227,6 +2641,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
79241 return new_vma;
79242 }
79243
79244 +#ifdef CONFIG_PAX_SEGMEXEC
79245 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
79246 +{
79247 + struct vm_area_struct *prev_m;
79248 + struct rb_node **rb_link_m, *rb_parent_m;
79249 + struct mempolicy *pol_m;
79250 +
79251 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
79252 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
79253 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
79254 + *vma_m = *vma;
79255 + pol_m = vma_policy(vma_m);
79256 + mpol_get(pol_m);
79257 + vma_set_policy(vma_m, pol_m);
79258 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
79259 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
79260 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
79261 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
79262 + if (vma_m->vm_file)
79263 + get_file(vma_m->vm_file);
79264 + if (vma_m->vm_ops && vma_m->vm_ops->open)
79265 + vma_m->vm_ops->open(vma_m);
79266 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
79267 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
79268 + vma_m->vm_mirror = vma;
79269 + vma->vm_mirror = vma_m;
79270 +}
79271 +#endif
79272 +
79273 /*
79274 * Return true if the calling process may expand its vm space by the passed
79275 * number of pages
79276 @@ -2237,7 +2680,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
79277 unsigned long lim;
79278
79279 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
79280 -
79281 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
79282 if (cur + npages > lim)
79283 return 0;
79284 return 1;
79285 @@ -2307,6 +2750,22 @@ int install_special_mapping(struct mm_struct *mm,
79286 vma->vm_start = addr;
79287 vma->vm_end = addr + len;
79288
79289 +#ifdef CONFIG_PAX_MPROTECT
79290 + if (mm->pax_flags & MF_PAX_MPROTECT) {
79291 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
79292 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
79293 + return -EPERM;
79294 + if (!(vm_flags & VM_EXEC))
79295 + vm_flags &= ~VM_MAYEXEC;
79296 +#else
79297 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
79298 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
79299 +#endif
79300 + else
79301 + vm_flags &= ~VM_MAYWRITE;
79302 + }
79303 +#endif
79304 +
79305 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
79306 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
79307
79308 diff --git a/mm/mprotect.c b/mm/mprotect.c
79309 index 1737c7e..c7faeb4 100644
79310 --- a/mm/mprotect.c
79311 +++ b/mm/mprotect.c
79312 @@ -24,10 +24,16 @@
79313 #include <linux/mmu_notifier.h>
79314 #include <linux/migrate.h>
79315 #include <linux/perf_event.h>
79316 +
79317 +#ifdef CONFIG_PAX_MPROTECT
79318 +#include <linux/elf.h>
79319 +#endif
79320 +
79321 #include <asm/uaccess.h>
79322 #include <asm/pgtable.h>
79323 #include <asm/cacheflush.h>
79324 #include <asm/tlbflush.h>
79325 +#include <asm/mmu_context.h>
79326
79327 #ifndef pgprot_modify
79328 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
79329 @@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
79330 flush_tlb_range(vma, start, end);
79331 }
79332
79333 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
79334 +/* called while holding the mmap semaphor for writing except stack expansion */
79335 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
79336 +{
79337 + unsigned long oldlimit, newlimit = 0UL;
79338 +
79339 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
79340 + return;
79341 +
79342 + spin_lock(&mm->page_table_lock);
79343 + oldlimit = mm->context.user_cs_limit;
79344 + if ((prot & VM_EXEC) && oldlimit < end)
79345 + /* USER_CS limit moved up */
79346 + newlimit = end;
79347 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
79348 + /* USER_CS limit moved down */
79349 + newlimit = start;
79350 +
79351 + if (newlimit) {
79352 + mm->context.user_cs_limit = newlimit;
79353 +
79354 +#ifdef CONFIG_SMP
79355 + wmb();
79356 + cpus_clear(mm->context.cpu_user_cs_mask);
79357 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
79358 +#endif
79359 +
79360 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
79361 + }
79362 + spin_unlock(&mm->page_table_lock);
79363 + if (newlimit == end) {
79364 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
79365 +
79366 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
79367 + if (is_vm_hugetlb_page(vma))
79368 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
79369 + else
79370 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
79371 + }
79372 +}
79373 +#endif
79374 +
79375 int
79376 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79377 unsigned long start, unsigned long end, unsigned long newflags)
79378 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79379 int error;
79380 int dirty_accountable = 0;
79381
79382 +#ifdef CONFIG_PAX_SEGMEXEC
79383 + struct vm_area_struct *vma_m = NULL;
79384 + unsigned long start_m, end_m;
79385 +
79386 + start_m = start + SEGMEXEC_TASK_SIZE;
79387 + end_m = end + SEGMEXEC_TASK_SIZE;
79388 +#endif
79389 +
79390 if (newflags == oldflags) {
79391 *pprev = vma;
79392 return 0;
79393 }
79394
79395 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
79396 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
79397 +
79398 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
79399 + return -ENOMEM;
79400 +
79401 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
79402 + return -ENOMEM;
79403 + }
79404 +
79405 /*
79406 * If we make a private mapping writable we increase our commit;
79407 * but (without finer accounting) cannot reduce our commit if we
79408 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79409 }
79410 }
79411
79412 +#ifdef CONFIG_PAX_SEGMEXEC
79413 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
79414 + if (start != vma->vm_start) {
79415 + error = split_vma(mm, vma, start, 1);
79416 + if (error)
79417 + goto fail;
79418 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
79419 + *pprev = (*pprev)->vm_next;
79420 + }
79421 +
79422 + if (end != vma->vm_end) {
79423 + error = split_vma(mm, vma, end, 0);
79424 + if (error)
79425 + goto fail;
79426 + }
79427 +
79428 + if (pax_find_mirror_vma(vma)) {
79429 + error = __do_munmap(mm, start_m, end_m - start_m);
79430 + if (error)
79431 + goto fail;
79432 + } else {
79433 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79434 + if (!vma_m) {
79435 + error = -ENOMEM;
79436 + goto fail;
79437 + }
79438 + vma->vm_flags = newflags;
79439 + pax_mirror_vma(vma_m, vma);
79440 + }
79441 + }
79442 +#endif
79443 +
79444 /*
79445 * First try to merge with previous and/or next vma.
79446 */
79447 @@ -195,9 +293,21 @@ success:
79448 * vm_flags and vm_page_prot are protected by the mmap_sem
79449 * held in write mode.
79450 */
79451 +
79452 +#ifdef CONFIG_PAX_SEGMEXEC
79453 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
79454 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
79455 +#endif
79456 +
79457 vma->vm_flags = newflags;
79458 +
79459 +#ifdef CONFIG_PAX_MPROTECT
79460 + if (mm->binfmt && mm->binfmt->handle_mprotect)
79461 + mm->binfmt->handle_mprotect(vma, newflags);
79462 +#endif
79463 +
79464 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
79465 - vm_get_page_prot(newflags));
79466 + vm_get_page_prot(vma->vm_flags));
79467
79468 if (vma_wants_writenotify(vma)) {
79469 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
79470 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79471 end = start + len;
79472 if (end <= start)
79473 return -ENOMEM;
79474 +
79475 +#ifdef CONFIG_PAX_SEGMEXEC
79476 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
79477 + if (end > SEGMEXEC_TASK_SIZE)
79478 + return -EINVAL;
79479 + } else
79480 +#endif
79481 +
79482 + if (end > TASK_SIZE)
79483 + return -EINVAL;
79484 +
79485 if (!arch_validate_prot(prot))
79486 return -EINVAL;
79487
79488 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79489 /*
79490 * Does the application expect PROT_READ to imply PROT_EXEC:
79491 */
79492 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
79493 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
79494 prot |= PROT_EXEC;
79495
79496 vm_flags = calc_vm_prot_bits(prot);
79497 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79498 if (start > vma->vm_start)
79499 prev = vma;
79500
79501 +#ifdef CONFIG_PAX_MPROTECT
79502 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
79503 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
79504 +#endif
79505 +
79506 for (nstart = start ; ; ) {
79507 unsigned long newflags;
79508
79509 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79510
79511 /* newflags >> 4 shift VM_MAY% in place of VM_% */
79512 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
79513 + if (prot & (PROT_WRITE | PROT_EXEC))
79514 + gr_log_rwxmprotect(vma->vm_file);
79515 +
79516 + error = -EACCES;
79517 + goto out;
79518 + }
79519 +
79520 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
79521 error = -EACCES;
79522 goto out;
79523 }
79524 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79525 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
79526 if (error)
79527 goto out;
79528 +
79529 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
79530 +
79531 nstart = tmp;
79532
79533 if (nstart < prev->vm_end)
79534 diff --git a/mm/mremap.c b/mm/mremap.c
79535 index 3e98d79..1706cec 100644
79536 --- a/mm/mremap.c
79537 +++ b/mm/mremap.c
79538 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
79539 continue;
79540 pte = ptep_clear_flush(vma, old_addr, old_pte);
79541 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
79542 +
79543 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
79544 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
79545 + pte = pte_exprotect(pte);
79546 +#endif
79547 +
79548 set_pte_at(mm, new_addr, new_pte, pte);
79549 }
79550
79551 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
79552 if (is_vm_hugetlb_page(vma))
79553 goto Einval;
79554
79555 +#ifdef CONFIG_PAX_SEGMEXEC
79556 + if (pax_find_mirror_vma(vma))
79557 + goto Einval;
79558 +#endif
79559 +
79560 /* We can't remap across vm area boundaries */
79561 if (old_len > vma->vm_end - addr)
79562 goto Efault;
79563 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
79564 unsigned long ret = -EINVAL;
79565 unsigned long charged = 0;
79566 unsigned long map_flags;
79567 + unsigned long pax_task_size = TASK_SIZE;
79568
79569 if (new_addr & ~PAGE_MASK)
79570 goto out;
79571
79572 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
79573 +#ifdef CONFIG_PAX_SEGMEXEC
79574 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
79575 + pax_task_size = SEGMEXEC_TASK_SIZE;
79576 +#endif
79577 +
79578 + pax_task_size -= PAGE_SIZE;
79579 +
79580 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
79581 goto out;
79582
79583 /* Check if the location we're moving into overlaps the
79584 * old location at all, and fail if it does.
79585 */
79586 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
79587 - goto out;
79588 -
79589 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
79590 + if (addr + old_len > new_addr && new_addr + new_len > addr)
79591 goto out;
79592
79593 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
79594 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
79595 struct vm_area_struct *vma;
79596 unsigned long ret = -EINVAL;
79597 unsigned long charged = 0;
79598 + unsigned long pax_task_size = TASK_SIZE;
79599
79600 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
79601 goto out;
79602 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
79603 if (!new_len)
79604 goto out;
79605
79606 +#ifdef CONFIG_PAX_SEGMEXEC
79607 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
79608 + pax_task_size = SEGMEXEC_TASK_SIZE;
79609 +#endif
79610 +
79611 + pax_task_size -= PAGE_SIZE;
79612 +
79613 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
79614 + old_len > pax_task_size || addr > pax_task_size-old_len)
79615 + goto out;
79616 +
79617 if (flags & MREMAP_FIXED) {
79618 if (flags & MREMAP_MAYMOVE)
79619 ret = mremap_to(addr, old_len, new_addr, new_len);
79620 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
79621 addr + new_len);
79622 }
79623 ret = addr;
79624 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
79625 goto out;
79626 }
79627 }
79628 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
79629 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
79630 if (ret)
79631 goto out;
79632 +
79633 + map_flags = vma->vm_flags;
79634 ret = move_vma(vma, addr, old_len, new_len, new_addr);
79635 + if (!(ret & ~PAGE_MASK)) {
79636 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
79637 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
79638 + }
79639 }
79640 out:
79641 if (ret & ~PAGE_MASK)
79642 diff --git a/mm/nommu.c b/mm/nommu.c
79643 index 406e8d4..53970d3 100644
79644 --- a/mm/nommu.c
79645 +++ b/mm/nommu.c
79646 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
79647 int sysctl_overcommit_ratio = 50; /* default is 50% */
79648 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
79649 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
79650 -int heap_stack_gap = 0;
79651
79652 atomic_long_t mmap_pages_allocated;
79653
79654 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
79655 EXPORT_SYMBOL(find_vma);
79656
79657 /*
79658 - * find a VMA
79659 - * - we don't extend stack VMAs under NOMMU conditions
79660 - */
79661 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
79662 -{
79663 - return find_vma(mm, addr);
79664 -}
79665 -
79666 -/*
79667 * expand a stack to a given address
79668 * - not supported under NOMMU conditions
79669 */
79670 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
79671 index 3ecab7e..594a471 100644
79672 --- a/mm/page_alloc.c
79673 +++ b/mm/page_alloc.c
79674 @@ -289,7 +289,7 @@ out:
79675 * This usage means that zero-order pages may not be compound.
79676 */
79677
79678 -static void free_compound_page(struct page *page)
79679 +void free_compound_page(struct page *page)
79680 {
79681 __free_pages_ok(page, compound_order(page));
79682 }
79683 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79684 int bad = 0;
79685 int wasMlocked = __TestClearPageMlocked(page);
79686
79687 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
79688 + unsigned long index = 1UL << order;
79689 +#endif
79690 +
79691 kmemcheck_free_shadow(page, order);
79692
79693 for (i = 0 ; i < (1 << order) ; ++i)
79694 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79695 debug_check_no_obj_freed(page_address(page),
79696 PAGE_SIZE << order);
79697 }
79698 +
79699 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
79700 + for (; index; --index)
79701 + sanitize_highpage(page + index - 1);
79702 +#endif
79703 +
79704 arch_free_page(page, order);
79705 kernel_map_pages(page, 1 << order, 0);
79706
79707 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
79708 arch_alloc_page(page, order);
79709 kernel_map_pages(page, 1 << order, 1);
79710
79711 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
79712 if (gfp_flags & __GFP_ZERO)
79713 prep_zero_page(page, order, gfp_flags);
79714 +#endif
79715
79716 if (order && (gfp_flags & __GFP_COMP))
79717 prep_compound_page(page, order);
79718 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
79719 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
79720 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
79721 }
79722 +
79723 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
79724 + sanitize_highpage(page);
79725 +#endif
79726 +
79727 arch_free_page(page, 0);
79728 kernel_map_pages(page, 1, 0);
79729
79730 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
79731 int cpu;
79732 struct zone *zone;
79733
79734 + pax_track_stack();
79735 +
79736 for_each_populated_zone(zone) {
79737 show_node(zone);
79738 printk("%s per-cpu:\n", zone->name);
79739 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
79740 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
79741 }
79742 #else
79743 -static void inline setup_usemap(struct pglist_data *pgdat,
79744 +static inline void setup_usemap(struct pglist_data *pgdat,
79745 struct zone *zone, unsigned long zonesize) {}
79746 #endif /* CONFIG_SPARSEMEM */
79747
79748 diff --git a/mm/percpu.c b/mm/percpu.c
79749 index c90614a..5f7b7b8 100644
79750 --- a/mm/percpu.c
79751 +++ b/mm/percpu.c
79752 @@ -115,7 +115,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
79753 static unsigned int pcpu_high_unit_cpu __read_mostly;
79754
79755 /* the address of the first chunk which starts with the kernel static area */
79756 -void *pcpu_base_addr __read_mostly;
79757 +void *pcpu_base_addr __read_only;
79758 EXPORT_SYMBOL_GPL(pcpu_base_addr);
79759
79760 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
79761 diff --git a/mm/rmap.c b/mm/rmap.c
79762 index dd43373..d848cd7 100644
79763 --- a/mm/rmap.c
79764 +++ b/mm/rmap.c
79765 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
79766 /* page_table_lock to protect against threads */
79767 spin_lock(&mm->page_table_lock);
79768 if (likely(!vma->anon_vma)) {
79769 +
79770 +#ifdef CONFIG_PAX_SEGMEXEC
79771 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
79772 +
79773 + if (vma_m) {
79774 + BUG_ON(vma_m->anon_vma);
79775 + vma_m->anon_vma = anon_vma;
79776 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
79777 + }
79778 +#endif
79779 +
79780 vma->anon_vma = anon_vma;
79781 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
79782 allocated = NULL;
79783 diff --git a/mm/shmem.c b/mm/shmem.c
79784 index 3e0005b..1d659a8 100644
79785 --- a/mm/shmem.c
79786 +++ b/mm/shmem.c
79787 @@ -31,7 +31,7 @@
79788 #include <linux/swap.h>
79789 #include <linux/ima.h>
79790
79791 -static struct vfsmount *shm_mnt;
79792 +struct vfsmount *shm_mnt;
79793
79794 #ifdef CONFIG_SHMEM
79795 /*
79796 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
79797 goto unlock;
79798 }
79799 entry = shmem_swp_entry(info, index, NULL);
79800 + if (!entry)
79801 + goto unlock;
79802 if (entry->val) {
79803 /*
79804 * The more uptodate page coming down from a stacked
79805 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
79806 struct vm_area_struct pvma;
79807 struct page *page;
79808
79809 + pax_track_stack();
79810 +
79811 spol = mpol_cond_copy(&mpol,
79812 mpol_shared_policy_lookup(&info->policy, idx));
79813
79814 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
79815
79816 info = SHMEM_I(inode);
79817 inode->i_size = len-1;
79818 - if (len <= (char *)inode - (char *)info) {
79819 + if (len <= (char *)inode - (char *)info && len <= 64) {
79820 /* do it inline */
79821 memcpy(info, symname, len);
79822 inode->i_op = &shmem_symlink_inline_operations;
79823 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
79824 int err = -ENOMEM;
79825
79826 /* Round up to L1_CACHE_BYTES to resist false sharing */
79827 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
79828 - L1_CACHE_BYTES), GFP_KERNEL);
79829 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
79830 if (!sbinfo)
79831 return -ENOMEM;
79832
79833 diff --git a/mm/slab.c b/mm/slab.c
79834 index c8d466a..909e01e 100644
79835 --- a/mm/slab.c
79836 +++ b/mm/slab.c
79837 @@ -174,7 +174,7 @@
79838
79839 /* Legal flag mask for kmem_cache_create(). */
79840 #if DEBUG
79841 -# define CREATE_MASK (SLAB_RED_ZONE | \
79842 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
79843 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
79844 SLAB_CACHE_DMA | \
79845 SLAB_STORE_USER | \
79846 @@ -182,7 +182,7 @@
79847 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
79848 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
79849 #else
79850 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
79851 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
79852 SLAB_CACHE_DMA | \
79853 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
79854 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
79855 @@ -308,7 +308,7 @@ struct kmem_list3 {
79856 * Need this for bootstrapping a per node allocator.
79857 */
79858 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
79859 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
79860 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
79861 #define CACHE_CACHE 0
79862 #define SIZE_AC MAX_NUMNODES
79863 #define SIZE_L3 (2 * MAX_NUMNODES)
79864 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
79865 if ((x)->max_freeable < i) \
79866 (x)->max_freeable = i; \
79867 } while (0)
79868 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
79869 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
79870 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
79871 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
79872 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
79873 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
79874 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
79875 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
79876 #else
79877 #define STATS_INC_ACTIVE(x) do { } while (0)
79878 #define STATS_DEC_ACTIVE(x) do { } while (0)
79879 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
79880 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
79881 */
79882 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
79883 - const struct slab *slab, void *obj)
79884 + const struct slab *slab, const void *obj)
79885 {
79886 u32 offset = (obj - slab->s_mem);
79887 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
79888 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
79889 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
79890 sizes[INDEX_AC].cs_size,
79891 ARCH_KMALLOC_MINALIGN,
79892 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79893 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79894 NULL);
79895
79896 if (INDEX_AC != INDEX_L3) {
79897 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
79898 kmem_cache_create(names[INDEX_L3].name,
79899 sizes[INDEX_L3].cs_size,
79900 ARCH_KMALLOC_MINALIGN,
79901 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79902 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79903 NULL);
79904 }
79905
79906 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
79907 sizes->cs_cachep = kmem_cache_create(names->name,
79908 sizes->cs_size,
79909 ARCH_KMALLOC_MINALIGN,
79910 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79911 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79912 NULL);
79913 }
79914 #ifdef CONFIG_ZONE_DMA
79915 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
79916 }
79917 /* cpu stats */
79918 {
79919 - unsigned long allochit = atomic_read(&cachep->allochit);
79920 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
79921 - unsigned long freehit = atomic_read(&cachep->freehit);
79922 - unsigned long freemiss = atomic_read(&cachep->freemiss);
79923 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
79924 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
79925 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
79926 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
79927
79928 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
79929 allochit, allocmiss, freehit, freemiss);
79930 @@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
79931
79932 static int __init slab_proc_init(void)
79933 {
79934 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
79935 + mode_t gr_mode = S_IRUGO;
79936 +
79937 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
79938 + gr_mode = S_IRUSR;
79939 +#endif
79940 +
79941 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
79942 #ifdef CONFIG_DEBUG_SLAB_LEAK
79943 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
79944 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
79945 #endif
79946 return 0;
79947 }
79948 module_init(slab_proc_init);
79949 #endif
79950
79951 +void check_object_size(const void *ptr, unsigned long n, bool to)
79952 +{
79953 +
79954 +#ifdef CONFIG_PAX_USERCOPY
79955 + struct page *page;
79956 + struct kmem_cache *cachep = NULL;
79957 + struct slab *slabp;
79958 + unsigned int objnr;
79959 + unsigned long offset;
79960 + const char *type;
79961 +
79962 + if (!n)
79963 + return;
79964 +
79965 + type = "<null>";
79966 + if (ZERO_OR_NULL_PTR(ptr))
79967 + goto report;
79968 +
79969 + if (!virt_addr_valid(ptr))
79970 + return;
79971 +
79972 + page = virt_to_head_page(ptr);
79973 +
79974 + type = "<process stack>";
79975 + if (!PageSlab(page)) {
79976 + if (object_is_on_stack(ptr, n) == -1)
79977 + goto report;
79978 + return;
79979 + }
79980 +
79981 + cachep = page_get_cache(page);
79982 + type = cachep->name;
79983 + if (!(cachep->flags & SLAB_USERCOPY))
79984 + goto report;
79985 +
79986 + slabp = page_get_slab(page);
79987 + objnr = obj_to_index(cachep, slabp, ptr);
79988 + BUG_ON(objnr >= cachep->num);
79989 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
79990 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
79991 + return;
79992 +
79993 +report:
79994 + pax_report_usercopy(ptr, n, to, type);
79995 +#endif
79996 +
79997 +}
79998 +EXPORT_SYMBOL(check_object_size);
79999 +
80000 /**
80001 * ksize - get the actual amount of memory allocated for a given object
80002 * @objp: Pointer to the object
80003 diff --git a/mm/slob.c b/mm/slob.c
80004 index 837ebd6..0bd23bc 100644
80005 --- a/mm/slob.c
80006 +++ b/mm/slob.c
80007 @@ -29,7 +29,7 @@
80008 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
80009 * alloc_pages() directly, allocating compound pages so the page order
80010 * does not have to be separately tracked, and also stores the exact
80011 - * allocation size in page->private so that it can be used to accurately
80012 + * allocation size in slob_page->size so that it can be used to accurately
80013 * provide ksize(). These objects are detected in kfree() because slob_page()
80014 * is false for them.
80015 *
80016 @@ -58,6 +58,7 @@
80017 */
80018
80019 #include <linux/kernel.h>
80020 +#include <linux/sched.h>
80021 #include <linux/slab.h>
80022 #include <linux/mm.h>
80023 #include <linux/swap.h> /* struct reclaim_state */
80024 @@ -100,7 +101,8 @@ struct slob_page {
80025 unsigned long flags; /* mandatory */
80026 atomic_t _count; /* mandatory */
80027 slobidx_t units; /* free units left in page */
80028 - unsigned long pad[2];
80029 + unsigned long pad[1];
80030 + unsigned long size; /* size when >=PAGE_SIZE */
80031 slob_t *free; /* first free slob_t in page */
80032 struct list_head list; /* linked list of free pages */
80033 };
80034 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
80035 */
80036 static inline int is_slob_page(struct slob_page *sp)
80037 {
80038 - return PageSlab((struct page *)sp);
80039 + return PageSlab((struct page *)sp) && !sp->size;
80040 }
80041
80042 static inline void set_slob_page(struct slob_page *sp)
80043 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
80044
80045 static inline struct slob_page *slob_page(const void *addr)
80046 {
80047 - return (struct slob_page *)virt_to_page(addr);
80048 + return (struct slob_page *)virt_to_head_page(addr);
80049 }
80050
80051 /*
80052 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
80053 /*
80054 * Return the size of a slob block.
80055 */
80056 -static slobidx_t slob_units(slob_t *s)
80057 +static slobidx_t slob_units(const slob_t *s)
80058 {
80059 if (s->units > 0)
80060 return s->units;
80061 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
80062 /*
80063 * Return the next free slob block pointer after this one.
80064 */
80065 -static slob_t *slob_next(slob_t *s)
80066 +static slob_t *slob_next(const slob_t *s)
80067 {
80068 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
80069 slobidx_t next;
80070 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
80071 /*
80072 * Returns true if s is the last free block in its page.
80073 */
80074 -static int slob_last(slob_t *s)
80075 +static int slob_last(const slob_t *s)
80076 {
80077 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
80078 }
80079 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
80080 if (!page)
80081 return NULL;
80082
80083 + set_slob_page(page);
80084 return page_address(page);
80085 }
80086
80087 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
80088 if (!b)
80089 return NULL;
80090 sp = slob_page(b);
80091 - set_slob_page(sp);
80092
80093 spin_lock_irqsave(&slob_lock, flags);
80094 sp->units = SLOB_UNITS(PAGE_SIZE);
80095 sp->free = b;
80096 + sp->size = 0;
80097 INIT_LIST_HEAD(&sp->list);
80098 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
80099 set_slob_page_free(sp, slob_list);
80100 @@ -475,10 +478,9 @@ out:
80101 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
80102 #endif
80103
80104 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80105 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
80106 {
80107 - unsigned int *m;
80108 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80109 + slob_t *m;
80110 void *ret;
80111
80112 lockdep_trace_alloc(gfp);
80113 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80114
80115 if (!m)
80116 return NULL;
80117 - *m = size;
80118 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
80119 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
80120 + m[0].units = size;
80121 + m[1].units = align;
80122 ret = (void *)m + align;
80123
80124 trace_kmalloc_node(_RET_IP_, ret,
80125 @@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80126
80127 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
80128 if (ret) {
80129 - struct page *page;
80130 - page = virt_to_page(ret);
80131 - page->private = size;
80132 + struct slob_page *sp;
80133 + sp = slob_page(ret);
80134 + sp->size = size;
80135 }
80136
80137 trace_kmalloc_node(_RET_IP_, ret,
80138 size, PAGE_SIZE << order, gfp, node);
80139 }
80140
80141 - kmemleak_alloc(ret, size, 1, gfp);
80142 + return ret;
80143 +}
80144 +
80145 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80146 +{
80147 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80148 + void *ret = __kmalloc_node_align(size, gfp, node, align);
80149 +
80150 + if (!ZERO_OR_NULL_PTR(ret))
80151 + kmemleak_alloc(ret, size, 1, gfp);
80152 return ret;
80153 }
80154 EXPORT_SYMBOL(__kmalloc_node);
80155 @@ -528,13 +542,92 @@ void kfree(const void *block)
80156 sp = slob_page(block);
80157 if (is_slob_page(sp)) {
80158 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80159 - unsigned int *m = (unsigned int *)(block - align);
80160 - slob_free(m, *m + align);
80161 - } else
80162 + slob_t *m = (slob_t *)(block - align);
80163 + slob_free(m, m[0].units + align);
80164 + } else {
80165 + clear_slob_page(sp);
80166 + free_slob_page(sp);
80167 + sp->size = 0;
80168 put_page(&sp->page);
80169 + }
80170 }
80171 EXPORT_SYMBOL(kfree);
80172
80173 +void check_object_size(const void *ptr, unsigned long n, bool to)
80174 +{
80175 +
80176 +#ifdef CONFIG_PAX_USERCOPY
80177 + struct slob_page *sp;
80178 + const slob_t *free;
80179 + const void *base;
80180 + unsigned long flags;
80181 + const char *type;
80182 +
80183 + if (!n)
80184 + return;
80185 +
80186 + type = "<null>";
80187 + if (ZERO_OR_NULL_PTR(ptr))
80188 + goto report;
80189 +
80190 + if (!virt_addr_valid(ptr))
80191 + return;
80192 +
80193 + type = "<process stack>";
80194 + sp = slob_page(ptr);
80195 + if (!PageSlab((struct page *)sp)) {
80196 + if (object_is_on_stack(ptr, n) == -1)
80197 + goto report;
80198 + return;
80199 + }
80200 +
80201 + type = "<slob>";
80202 + if (sp->size) {
80203 + base = page_address(&sp->page);
80204 + if (base <= ptr && n <= sp->size - (ptr - base))
80205 + return;
80206 + goto report;
80207 + }
80208 +
80209 + /* some tricky double walking to find the chunk */
80210 + spin_lock_irqsave(&slob_lock, flags);
80211 + base = (void *)((unsigned long)ptr & PAGE_MASK);
80212 + free = sp->free;
80213 +
80214 + while (!slob_last(free) && (void *)free <= ptr) {
80215 + base = free + slob_units(free);
80216 + free = slob_next(free);
80217 + }
80218 +
80219 + while (base < (void *)free) {
80220 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
80221 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
80222 + int offset;
80223 +
80224 + if (ptr < base + align)
80225 + break;
80226 +
80227 + offset = ptr - base - align;
80228 + if (offset >= m) {
80229 + base += size;
80230 + continue;
80231 + }
80232 +
80233 + if (n > m - offset)
80234 + break;
80235 +
80236 + spin_unlock_irqrestore(&slob_lock, flags);
80237 + return;
80238 + }
80239 +
80240 + spin_unlock_irqrestore(&slob_lock, flags);
80241 +report:
80242 + pax_report_usercopy(ptr, n, to, type);
80243 +#endif
80244 +
80245 +}
80246 +EXPORT_SYMBOL(check_object_size);
80247 +
80248 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
80249 size_t ksize(const void *block)
80250 {
80251 @@ -547,10 +640,10 @@ size_t ksize(const void *block)
80252 sp = slob_page(block);
80253 if (is_slob_page(sp)) {
80254 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80255 - unsigned int *m = (unsigned int *)(block - align);
80256 - return SLOB_UNITS(*m) * SLOB_UNIT;
80257 + slob_t *m = (slob_t *)(block - align);
80258 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
80259 } else
80260 - return sp->page.private;
80261 + return sp->size;
80262 }
80263 EXPORT_SYMBOL(ksize);
80264
80265 @@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80266 {
80267 struct kmem_cache *c;
80268
80269 +#ifdef CONFIG_PAX_USERCOPY
80270 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
80271 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
80272 +#else
80273 c = slob_alloc(sizeof(struct kmem_cache),
80274 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
80275 +#endif
80276
80277 if (c) {
80278 c->name = name;
80279 @@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
80280 {
80281 void *b;
80282
80283 +#ifdef CONFIG_PAX_USERCOPY
80284 + b = __kmalloc_node_align(c->size, flags, node, c->align);
80285 +#else
80286 if (c->size < PAGE_SIZE) {
80287 b = slob_alloc(c->size, flags, c->align, node);
80288 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
80289 SLOB_UNITS(c->size) * SLOB_UNIT,
80290 flags, node);
80291 } else {
80292 + struct slob_page *sp;
80293 +
80294 b = slob_new_pages(flags, get_order(c->size), node);
80295 + sp = slob_page(b);
80296 + sp->size = c->size;
80297 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
80298 PAGE_SIZE << get_order(c->size),
80299 flags, node);
80300 }
80301 +#endif
80302
80303 if (c->ctor)
80304 c->ctor(b);
80305 @@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
80306
80307 static void __kmem_cache_free(void *b, int size)
80308 {
80309 - if (size < PAGE_SIZE)
80310 + struct slob_page *sp = slob_page(b);
80311 +
80312 + if (is_slob_page(sp))
80313 slob_free(b, size);
80314 - else
80315 + else {
80316 + clear_slob_page(sp);
80317 + free_slob_page(sp);
80318 + sp->size = 0;
80319 slob_free_pages(b, get_order(size));
80320 + }
80321 }
80322
80323 static void kmem_rcu_free(struct rcu_head *head)
80324 @@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
80325
80326 void kmem_cache_free(struct kmem_cache *c, void *b)
80327 {
80328 + int size = c->size;
80329 +
80330 +#ifdef CONFIG_PAX_USERCOPY
80331 + if (size + c->align < PAGE_SIZE) {
80332 + size += c->align;
80333 + b -= c->align;
80334 + }
80335 +#endif
80336 +
80337 kmemleak_free_recursive(b, c->flags);
80338 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
80339 struct slob_rcu *slob_rcu;
80340 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
80341 + slob_rcu = b + (size - sizeof(struct slob_rcu));
80342 INIT_RCU_HEAD(&slob_rcu->head);
80343 - slob_rcu->size = c->size;
80344 + slob_rcu->size = size;
80345 call_rcu(&slob_rcu->head, kmem_rcu_free);
80346 } else {
80347 - __kmem_cache_free(b, c->size);
80348 + __kmem_cache_free(b, size);
80349 }
80350
80351 +#ifdef CONFIG_PAX_USERCOPY
80352 + trace_kfree(_RET_IP_, b);
80353 +#else
80354 trace_kmem_cache_free(_RET_IP_, b);
80355 +#endif
80356 +
80357 }
80358 EXPORT_SYMBOL(kmem_cache_free);
80359
80360 diff --git a/mm/slub.c b/mm/slub.c
80361 index 4996fc7..87e01d0 100644
80362 --- a/mm/slub.c
80363 +++ b/mm/slub.c
80364 @@ -201,7 +201,7 @@ struct track {
80365
80366 enum track_item { TRACK_ALLOC, TRACK_FREE };
80367
80368 -#ifdef CONFIG_SLUB_DEBUG
80369 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80370 static int sysfs_slab_add(struct kmem_cache *);
80371 static int sysfs_slab_alias(struct kmem_cache *, const char *);
80372 static void sysfs_slab_remove(struct kmem_cache *);
80373 @@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
80374 if (!t->addr)
80375 return;
80376
80377 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
80378 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
80379 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
80380 }
80381
80382 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
80383
80384 page = virt_to_head_page(x);
80385
80386 + BUG_ON(!PageSlab(page));
80387 +
80388 slab_free(s, page, x, _RET_IP_);
80389
80390 trace_kmem_cache_free(_RET_IP_, x);
80391 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
80392 * Merge control. If this is set then no merging of slab caches will occur.
80393 * (Could be removed. This was introduced to pacify the merge skeptics.)
80394 */
80395 -static int slub_nomerge;
80396 +static int slub_nomerge = 1;
80397
80398 /*
80399 * Calculate the order of allocation given an slab object size.
80400 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
80401 * list to avoid pounding the page allocator excessively.
80402 */
80403 set_min_partial(s, ilog2(s->size));
80404 - s->refcount = 1;
80405 + atomic_set(&s->refcount, 1);
80406 #ifdef CONFIG_NUMA
80407 s->remote_node_defrag_ratio = 1000;
80408 #endif
80409 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
80410 void kmem_cache_destroy(struct kmem_cache *s)
80411 {
80412 down_write(&slub_lock);
80413 - s->refcount--;
80414 - if (!s->refcount) {
80415 + if (atomic_dec_and_test(&s->refcount)) {
80416 list_del(&s->list);
80417 up_write(&slub_lock);
80418 if (kmem_cache_close(s)) {
80419 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
80420 __setup("slub_nomerge", setup_slub_nomerge);
80421
80422 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
80423 - const char *name, int size, gfp_t gfp_flags)
80424 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
80425 {
80426 - unsigned int flags = 0;
80427 -
80428 if (gfp_flags & SLUB_DMA)
80429 - flags = SLAB_CACHE_DMA;
80430 + flags |= SLAB_CACHE_DMA;
80431
80432 /*
80433 * This function is called with IRQs disabled during early-boot on
80434 @@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
80435 EXPORT_SYMBOL(__kmalloc_node);
80436 #endif
80437
80438 +void check_object_size(const void *ptr, unsigned long n, bool to)
80439 +{
80440 +
80441 +#ifdef CONFIG_PAX_USERCOPY
80442 + struct page *page;
80443 + struct kmem_cache *s = NULL;
80444 + unsigned long offset;
80445 + const char *type;
80446 +
80447 + if (!n)
80448 + return;
80449 +
80450 + type = "<null>";
80451 + if (ZERO_OR_NULL_PTR(ptr))
80452 + goto report;
80453 +
80454 + if (!virt_addr_valid(ptr))
80455 + return;
80456 +
80457 + page = get_object_page(ptr);
80458 +
80459 + type = "<process stack>";
80460 + if (!page) {
80461 + if (object_is_on_stack(ptr, n) == -1)
80462 + goto report;
80463 + return;
80464 + }
80465 +
80466 + s = page->slab;
80467 + type = s->name;
80468 + if (!(s->flags & SLAB_USERCOPY))
80469 + goto report;
80470 +
80471 + offset = (ptr - page_address(page)) % s->size;
80472 + if (offset <= s->objsize && n <= s->objsize - offset)
80473 + return;
80474 +
80475 +report:
80476 + pax_report_usercopy(ptr, n, to, type);
80477 +#endif
80478 +
80479 +}
80480 +EXPORT_SYMBOL(check_object_size);
80481 +
80482 size_t ksize(const void *object)
80483 {
80484 struct page *page;
80485 @@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
80486 * kmem_cache_open for slab_state == DOWN.
80487 */
80488 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
80489 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
80490 - kmalloc_caches[0].refcount = -1;
80491 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
80492 + atomic_set(&kmalloc_caches[0].refcount, -1);
80493 caches++;
80494
80495 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
80496 @@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
80497 /* Caches that are not of the two-to-the-power-of size */
80498 if (KMALLOC_MIN_SIZE <= 32) {
80499 create_kmalloc_cache(&kmalloc_caches[1],
80500 - "kmalloc-96", 96, GFP_NOWAIT);
80501 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
80502 caches++;
80503 }
80504 if (KMALLOC_MIN_SIZE <= 64) {
80505 create_kmalloc_cache(&kmalloc_caches[2],
80506 - "kmalloc-192", 192, GFP_NOWAIT);
80507 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
80508 caches++;
80509 }
80510
80511 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
80512 create_kmalloc_cache(&kmalloc_caches[i],
80513 - "kmalloc", 1 << i, GFP_NOWAIT);
80514 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
80515 caches++;
80516 }
80517
80518 @@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
80519 /*
80520 * We may have set a slab to be unmergeable during bootstrap.
80521 */
80522 - if (s->refcount < 0)
80523 + if (atomic_read(&s->refcount) < 0)
80524 return 1;
80525
80526 return 0;
80527 @@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80528 if (s) {
80529 int cpu;
80530
80531 - s->refcount++;
80532 + atomic_inc(&s->refcount);
80533 /*
80534 * Adjust the object sizes so that we clear
80535 * the complete object on kzalloc.
80536 @@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80537
80538 if (sysfs_slab_alias(s, name)) {
80539 down_write(&slub_lock);
80540 - s->refcount--;
80541 + atomic_dec(&s->refcount);
80542 up_write(&slub_lock);
80543 goto err;
80544 }
80545 @@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
80546
80547 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
80548 {
80549 - return sprintf(buf, "%d\n", s->refcount - 1);
80550 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
80551 }
80552 SLAB_ATTR_RO(aliases);
80553
80554 @@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
80555 kfree(s);
80556 }
80557
80558 -static struct sysfs_ops slab_sysfs_ops = {
80559 +static const struct sysfs_ops slab_sysfs_ops = {
80560 .show = slab_attr_show,
80561 .store = slab_attr_store,
80562 };
80563 @@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
80564 return 0;
80565 }
80566
80567 -static struct kset_uevent_ops slab_uevent_ops = {
80568 +static const struct kset_uevent_ops slab_uevent_ops = {
80569 .filter = uevent_filter,
80570 };
80571
80572 @@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
80573 return name;
80574 }
80575
80576 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80577 static int sysfs_slab_add(struct kmem_cache *s)
80578 {
80579 int err;
80580 @@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
80581 kobject_del(&s->kobj);
80582 kobject_put(&s->kobj);
80583 }
80584 +#endif
80585
80586 /*
80587 * Need to buffer aliases during bootup until sysfs becomes
80588 @@ -4632,6 +4677,7 @@ struct saved_alias {
80589
80590 static struct saved_alias *alias_list;
80591
80592 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80593 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
80594 {
80595 struct saved_alias *al;
80596 @@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
80597 alias_list = al;
80598 return 0;
80599 }
80600 +#endif
80601
80602 static int __init slab_sysfs_init(void)
80603 {
80604 @@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
80605
80606 static int __init slab_proc_init(void)
80607 {
80608 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
80609 + mode_t gr_mode = S_IRUGO;
80610 +
80611 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
80612 + gr_mode = S_IRUSR;
80613 +#endif
80614 +
80615 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
80616 return 0;
80617 }
80618 module_init(slab_proc_init);
80619 diff --git a/mm/swap.c b/mm/swap.c
80620 index 308e57d..5de19c0 100644
80621 --- a/mm/swap.c
80622 +++ b/mm/swap.c
80623 @@ -30,6 +30,7 @@
80624 #include <linux/notifier.h>
80625 #include <linux/backing-dev.h>
80626 #include <linux/memcontrol.h>
80627 +#include <linux/hugetlb.h>
80628
80629 #include "internal.h"
80630
80631 @@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
80632 compound_page_dtor *dtor;
80633
80634 dtor = get_compound_page_dtor(page);
80635 + if (!PageHuge(page))
80636 + BUG_ON(dtor != free_compound_page);
80637 (*dtor)(page);
80638 }
80639 }
80640 diff --git a/mm/util.c b/mm/util.c
80641 index e48b493..24a601d 100644
80642 --- a/mm/util.c
80643 +++ b/mm/util.c
80644 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
80645 void arch_pick_mmap_layout(struct mm_struct *mm)
80646 {
80647 mm->mmap_base = TASK_UNMAPPED_BASE;
80648 +
80649 +#ifdef CONFIG_PAX_RANDMMAP
80650 + if (mm->pax_flags & MF_PAX_RANDMMAP)
80651 + mm->mmap_base += mm->delta_mmap;
80652 +#endif
80653 +
80654 mm->get_unmapped_area = arch_get_unmapped_area;
80655 mm->unmap_area = arch_unmap_area;
80656 }
80657 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
80658 index f34ffd0..e60c44f 100644
80659 --- a/mm/vmalloc.c
80660 +++ b/mm/vmalloc.c
80661 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
80662
80663 pte = pte_offset_kernel(pmd, addr);
80664 do {
80665 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80666 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80667 +
80668 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80669 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
80670 + BUG_ON(!pte_exec(*pte));
80671 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
80672 + continue;
80673 + }
80674 +#endif
80675 +
80676 + {
80677 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80678 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80679 + }
80680 } while (pte++, addr += PAGE_SIZE, addr != end);
80681 }
80682
80683 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80684 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
80685 {
80686 pte_t *pte;
80687 + int ret = -ENOMEM;
80688
80689 /*
80690 * nr is a running index into the array which helps higher level
80691 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80692 pte = pte_alloc_kernel(pmd, addr);
80693 if (!pte)
80694 return -ENOMEM;
80695 +
80696 + pax_open_kernel();
80697 do {
80698 struct page *page = pages[*nr];
80699
80700 - if (WARN_ON(!pte_none(*pte)))
80701 - return -EBUSY;
80702 - if (WARN_ON(!page))
80703 - return -ENOMEM;
80704 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80705 + if (!(pgprot_val(prot) & _PAGE_NX))
80706 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
80707 + else
80708 +#endif
80709 +
80710 + if (WARN_ON(!pte_none(*pte))) {
80711 + ret = -EBUSY;
80712 + goto out;
80713 + }
80714 + if (WARN_ON(!page)) {
80715 + ret = -ENOMEM;
80716 + goto out;
80717 + }
80718 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
80719 (*nr)++;
80720 } while (pte++, addr += PAGE_SIZE, addr != end);
80721 - return 0;
80722 + ret = 0;
80723 +out:
80724 + pax_close_kernel();
80725 + return ret;
80726 }
80727
80728 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
80729 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
80730 * and fall back on vmalloc() if that fails. Others
80731 * just put it in the vmalloc space.
80732 */
80733 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
80734 +#ifdef CONFIG_MODULES
80735 +#ifdef MODULES_VADDR
80736 unsigned long addr = (unsigned long)x;
80737 if (addr >= MODULES_VADDR && addr < MODULES_END)
80738 return 1;
80739 #endif
80740 +
80741 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80742 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
80743 + return 1;
80744 +#endif
80745 +
80746 +#endif
80747 +
80748 return is_vmalloc_addr(x);
80749 }
80750
80751 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
80752
80753 if (!pgd_none(*pgd)) {
80754 pud_t *pud = pud_offset(pgd, addr);
80755 +#ifdef CONFIG_X86
80756 + if (!pud_large(*pud))
80757 +#endif
80758 if (!pud_none(*pud)) {
80759 pmd_t *pmd = pmd_offset(pud, addr);
80760 +#ifdef CONFIG_X86
80761 + if (!pmd_large(*pmd))
80762 +#endif
80763 if (!pmd_none(*pmd)) {
80764 pte_t *ptep, pte;
80765
80766 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
80767 struct rb_node *tmp;
80768
80769 while (*p) {
80770 - struct vmap_area *tmp;
80771 + struct vmap_area *varea;
80772
80773 parent = *p;
80774 - tmp = rb_entry(parent, struct vmap_area, rb_node);
80775 - if (va->va_start < tmp->va_end)
80776 + varea = rb_entry(parent, struct vmap_area, rb_node);
80777 + if (va->va_start < varea->va_end)
80778 p = &(*p)->rb_left;
80779 - else if (va->va_end > tmp->va_start)
80780 + else if (va->va_end > varea->va_start)
80781 p = &(*p)->rb_right;
80782 else
80783 BUG();
80784 @@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
80785 struct vm_struct *area;
80786
80787 BUG_ON(in_interrupt());
80788 +
80789 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80790 + if (flags & VM_KERNEXEC) {
80791 + if (start != VMALLOC_START || end != VMALLOC_END)
80792 + return NULL;
80793 + start = (unsigned long)MODULES_EXEC_VADDR;
80794 + end = (unsigned long)MODULES_EXEC_END;
80795 + }
80796 +#endif
80797 +
80798 if (flags & VM_IOREMAP) {
80799 int bit = fls(size);
80800
80801 @@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
80802 if (count > totalram_pages)
80803 return NULL;
80804
80805 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80806 + if (!(pgprot_val(prot) & _PAGE_NX))
80807 + flags |= VM_KERNEXEC;
80808 +#endif
80809 +
80810 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
80811 __builtin_return_address(0));
80812 if (!area)
80813 @@ -1594,6 +1651,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
80814 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
80815 return NULL;
80816
80817 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80818 + if (!(pgprot_val(prot) & _PAGE_NX))
80819 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
80820 + VMALLOC_START, VMALLOC_END, node,
80821 + gfp_mask, caller);
80822 + else
80823 +#endif
80824 +
80825 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
80826 VMALLOC_START, VMALLOC_END, node,
80827 gfp_mask, caller);
80828 @@ -1619,6 +1684,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
80829 return addr;
80830 }
80831
80832 +#undef __vmalloc
80833 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
80834 {
80835 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
80836 @@ -1635,6 +1701,7 @@ EXPORT_SYMBOL(__vmalloc);
80837 * For tight control over page level allocator and protection flags
80838 * use __vmalloc() instead.
80839 */
80840 +#undef vmalloc
80841 void *vmalloc(unsigned long size)
80842 {
80843 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
80844 @@ -1649,6 +1716,7 @@ EXPORT_SYMBOL(vmalloc);
80845 * The resulting memory area is zeroed so it can be mapped to userspace
80846 * without leaking data.
80847 */
80848 +#undef vmalloc_user
80849 void *vmalloc_user(unsigned long size)
80850 {
80851 struct vm_struct *area;
80852 @@ -1676,6 +1744,7 @@ EXPORT_SYMBOL(vmalloc_user);
80853 * For tight control over page level allocator and protection flags
80854 * use __vmalloc() instead.
80855 */
80856 +#undef vmalloc_node
80857 void *vmalloc_node(unsigned long size, int node)
80858 {
80859 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
80860 @@ -1698,10 +1767,10 @@ EXPORT_SYMBOL(vmalloc_node);
80861 * For tight control over page level allocator and protection flags
80862 * use __vmalloc() instead.
80863 */
80864 -
80865 +#undef vmalloc_exec
80866 void *vmalloc_exec(unsigned long size)
80867 {
80868 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
80869 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
80870 -1, __builtin_return_address(0));
80871 }
80872
80873 @@ -1720,6 +1789,7 @@ void *vmalloc_exec(unsigned long size)
80874 * Allocate enough 32bit PA addressable pages to cover @size from the
80875 * page level allocator and map them into contiguous kernel virtual space.
80876 */
80877 +#undef vmalloc_32
80878 void *vmalloc_32(unsigned long size)
80879 {
80880 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
80881 @@ -1734,6 +1804,7 @@ EXPORT_SYMBOL(vmalloc_32);
80882 * The resulting memory area is 32bit addressable and zeroed so it can be
80883 * mapped to userspace without leaking data.
80884 */
80885 +#undef vmalloc_32_user
80886 void *vmalloc_32_user(unsigned long size)
80887 {
80888 struct vm_struct *area;
80889 @@ -1998,6 +2069,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
80890 unsigned long uaddr = vma->vm_start;
80891 unsigned long usize = vma->vm_end - vma->vm_start;
80892
80893 + BUG_ON(vma->vm_mirror);
80894 +
80895 if ((PAGE_SIZE-1) & (unsigned long)addr)
80896 return -EINVAL;
80897
80898 diff --git a/mm/vmstat.c b/mm/vmstat.c
80899 index 42d76c6..5643dc4 100644
80900 --- a/mm/vmstat.c
80901 +++ b/mm/vmstat.c
80902 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
80903 *
80904 * vm_stat contains the global counters
80905 */
80906 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
80907 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
80908 EXPORT_SYMBOL(vm_stat);
80909
80910 #ifdef CONFIG_SMP
80911 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
80912 v = p->vm_stat_diff[i];
80913 p->vm_stat_diff[i] = 0;
80914 local_irq_restore(flags);
80915 - atomic_long_add(v, &zone->vm_stat[i]);
80916 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
80917 global_diff[i] += v;
80918 #ifdef CONFIG_NUMA
80919 /* 3 seconds idle till flush */
80920 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
80921
80922 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
80923 if (global_diff[i])
80924 - atomic_long_add(global_diff[i], &vm_stat[i]);
80925 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
80926 }
80927
80928 #endif
80929 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
80930 start_cpu_timer(cpu);
80931 #endif
80932 #ifdef CONFIG_PROC_FS
80933 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
80934 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
80935 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
80936 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
80937 + {
80938 + mode_t gr_mode = S_IRUGO;
80939 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
80940 + gr_mode = S_IRUSR;
80941 +#endif
80942 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
80943 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
80944 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
80945 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
80946 +#else
80947 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
80948 +#endif
80949 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
80950 + }
80951 #endif
80952 return 0;
80953 }
80954 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
80955 index a29c5ab..6143f20 100644
80956 --- a/net/8021q/vlan.c
80957 +++ b/net/8021q/vlan.c
80958 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
80959 err = -EPERM;
80960 if (!capable(CAP_NET_ADMIN))
80961 break;
80962 - if ((args.u.name_type >= 0) &&
80963 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
80964 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
80965 struct vlan_net *vn;
80966
80967 vn = net_generic(net, vlan_net_id);
80968 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
80969 index a2d2984..f9eb711 100644
80970 --- a/net/9p/trans_fd.c
80971 +++ b/net/9p/trans_fd.c
80972 @@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
80973 oldfs = get_fs();
80974 set_fs(get_ds());
80975 /* The cast to a user pointer is valid due to the set_fs() */
80976 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
80977 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
80978 set_fs(oldfs);
80979
80980 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
80981 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
80982 index 02cc7e7..4514f1b 100644
80983 --- a/net/atm/atm_misc.c
80984 +++ b/net/atm/atm_misc.c
80985 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
80986 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
80987 return 1;
80988 atm_return(vcc,truesize);
80989 - atomic_inc(&vcc->stats->rx_drop);
80990 + atomic_inc_unchecked(&vcc->stats->rx_drop);
80991 return 0;
80992 }
80993
80994 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
80995 }
80996 }
80997 atm_return(vcc,guess);
80998 - atomic_inc(&vcc->stats->rx_drop);
80999 + atomic_inc_unchecked(&vcc->stats->rx_drop);
81000 return NULL;
81001 }
81002
81003 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
81004
81005 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81006 {
81007 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
81008 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
81009 __SONET_ITEMS
81010 #undef __HANDLE_ITEM
81011 }
81012 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81013
81014 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81015 {
81016 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
81017 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
81018 __SONET_ITEMS
81019 #undef __HANDLE_ITEM
81020 }
81021 diff --git a/net/atm/lec.h b/net/atm/lec.h
81022 index 9d14d19..5c145f3 100644
81023 --- a/net/atm/lec.h
81024 +++ b/net/atm/lec.h
81025 @@ -48,7 +48,7 @@ struct lane2_ops {
81026 const u8 *tlvs, u32 sizeoftlvs);
81027 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
81028 const u8 *tlvs, u32 sizeoftlvs);
81029 -};
81030 +} __no_const;
81031
81032 /*
81033 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
81034 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
81035 index 0919a88..a23d54e 100644
81036 --- a/net/atm/mpc.h
81037 +++ b/net/atm/mpc.h
81038 @@ -33,7 +33,7 @@ struct mpoa_client {
81039 struct mpc_parameters parameters; /* parameters for this client */
81040
81041 const struct net_device_ops *old_ops;
81042 - struct net_device_ops new_ops;
81043 + net_device_ops_no_const new_ops;
81044 };
81045
81046
81047 diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
81048 index 4504a4b..1733f1e 100644
81049 --- a/net/atm/mpoa_caches.c
81050 +++ b/net/atm/mpoa_caches.c
81051 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
81052 struct timeval now;
81053 struct k_message msg;
81054
81055 + pax_track_stack();
81056 +
81057 do_gettimeofday(&now);
81058
81059 write_lock_irq(&client->egress_lock);
81060 diff --git a/net/atm/proc.c b/net/atm/proc.c
81061 index ab8419a..aa91497 100644
81062 --- a/net/atm/proc.c
81063 +++ b/net/atm/proc.c
81064 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
81065 const struct k_atm_aal_stats *stats)
81066 {
81067 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
81068 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
81069 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
81070 - atomic_read(&stats->rx_drop));
81071 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
81072 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
81073 + atomic_read_unchecked(&stats->rx_drop));
81074 }
81075
81076 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
81077 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
81078 {
81079 struct sock *sk = sk_atm(vcc);
81080
81081 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81082 + seq_printf(seq, "%p ", NULL);
81083 +#else
81084 seq_printf(seq, "%p ", vcc);
81085 +#endif
81086 +
81087 if (!vcc->dev)
81088 seq_printf(seq, "Unassigned ");
81089 else
81090 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
81091 {
81092 if (!vcc->dev)
81093 seq_printf(seq, sizeof(void *) == 4 ?
81094 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81095 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
81096 +#else
81097 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
81098 +#endif
81099 else
81100 seq_printf(seq, "%3d %3d %5d ",
81101 vcc->dev->number, vcc->vpi, vcc->vci);
81102 diff --git a/net/atm/resources.c b/net/atm/resources.c
81103 index 56b7322..c48b84e 100644
81104 --- a/net/atm/resources.c
81105 +++ b/net/atm/resources.c
81106 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
81107 static void copy_aal_stats(struct k_atm_aal_stats *from,
81108 struct atm_aal_stats *to)
81109 {
81110 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
81111 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
81112 __AAL_STAT_ITEMS
81113 #undef __HANDLE_ITEM
81114 }
81115 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
81116 static void subtract_aal_stats(struct k_atm_aal_stats *from,
81117 struct atm_aal_stats *to)
81118 {
81119 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
81120 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
81121 __AAL_STAT_ITEMS
81122 #undef __HANDLE_ITEM
81123 }
81124 diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
81125 index 8567d47..bba2292 100644
81126 --- a/net/bridge/br_private.h
81127 +++ b/net/bridge/br_private.h
81128 @@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
81129
81130 #ifdef CONFIG_SYSFS
81131 /* br_sysfs_if.c */
81132 -extern struct sysfs_ops brport_sysfs_ops;
81133 +extern const struct sysfs_ops brport_sysfs_ops;
81134 extern int br_sysfs_addif(struct net_bridge_port *p);
81135
81136 /* br_sysfs_br.c */
81137 diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
81138 index 9a52ac5..c97538e 100644
81139 --- a/net/bridge/br_stp_if.c
81140 +++ b/net/bridge/br_stp_if.c
81141 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
81142 char *envp[] = { NULL };
81143
81144 if (br->stp_enabled == BR_USER_STP) {
81145 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
81146 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
81147 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
81148 br->dev->name, r);
81149
81150 diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
81151 index 820643a..ce77fb3 100644
81152 --- a/net/bridge/br_sysfs_if.c
81153 +++ b/net/bridge/br_sysfs_if.c
81154 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
81155 return ret;
81156 }
81157
81158 -struct sysfs_ops brport_sysfs_ops = {
81159 +const struct sysfs_ops brport_sysfs_ops = {
81160 .show = brport_show,
81161 .store = brport_store,
81162 };
81163 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
81164 index d73d47f..72df42a 100644
81165 --- a/net/bridge/netfilter/ebtables.c
81166 +++ b/net/bridge/netfilter/ebtables.c
81167 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
81168 unsigned int entries_size, nentries;
81169 char *entries;
81170
81171 + pax_track_stack();
81172 +
81173 if (cmd == EBT_SO_GET_ENTRIES) {
81174 entries_size = t->private->entries_size;
81175 nentries = t->private->nentries;
81176 diff --git a/net/can/bcm.c b/net/can/bcm.c
81177 index 2ffd2e0..72a7486 100644
81178 --- a/net/can/bcm.c
81179 +++ b/net/can/bcm.c
81180 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
81181 struct bcm_sock *bo = bcm_sk(sk);
81182 struct bcm_op *op;
81183
81184 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81185 + seq_printf(m, ">>> socket %p", NULL);
81186 + seq_printf(m, " / sk %p", NULL);
81187 + seq_printf(m, " / bo %p", NULL);
81188 +#else
81189 seq_printf(m, ">>> socket %p", sk->sk_socket);
81190 seq_printf(m, " / sk %p", sk);
81191 seq_printf(m, " / bo %p", bo);
81192 +#endif
81193 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
81194 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
81195 seq_printf(m, " <<<\n");
81196 diff --git a/net/compat.c b/net/compat.c
81197 index 9559afc..ccd74e1 100644
81198 --- a/net/compat.c
81199 +++ b/net/compat.c
81200 @@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
81201 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
81202 __get_user(kmsg->msg_flags, &umsg->msg_flags))
81203 return -EFAULT;
81204 - kmsg->msg_name = compat_ptr(tmp1);
81205 - kmsg->msg_iov = compat_ptr(tmp2);
81206 - kmsg->msg_control = compat_ptr(tmp3);
81207 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
81208 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
81209 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
81210 return 0;
81211 }
81212
81213 @@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
81214 kern_msg->msg_name = NULL;
81215
81216 tot_len = iov_from_user_compat_to_kern(kern_iov,
81217 - (struct compat_iovec __user *)kern_msg->msg_iov,
81218 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
81219 kern_msg->msg_iovlen);
81220 if (tot_len >= 0)
81221 kern_msg->msg_iov = kern_iov;
81222 @@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
81223
81224 #define CMSG_COMPAT_FIRSTHDR(msg) \
81225 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
81226 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
81227 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
81228 (struct compat_cmsghdr __user *)NULL)
81229
81230 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
81231 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
81232 (ucmlen) <= (unsigned long) \
81233 ((mhdr)->msg_controllen - \
81234 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
81235 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
81236
81237 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
81238 struct compat_cmsghdr __user *cmsg, int cmsg_len)
81239 {
81240 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
81241 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
81242 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
81243 msg->msg_controllen)
81244 return NULL;
81245 return (struct compat_cmsghdr __user *)ptr;
81246 @@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
81247 {
81248 struct compat_timeval ctv;
81249 struct compat_timespec cts[3];
81250 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
81251 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
81252 struct compat_cmsghdr cmhdr;
81253 int cmlen;
81254
81255 @@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
81256
81257 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
81258 {
81259 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
81260 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
81261 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
81262 int fdnum = scm->fp->count;
81263 struct file **fp = scm->fp->fp;
81264 @@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
81265 len = sizeof(ktime);
81266 old_fs = get_fs();
81267 set_fs(KERNEL_DS);
81268 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
81269 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
81270 set_fs(old_fs);
81271
81272 if (!err) {
81273 @@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
81274 case MCAST_JOIN_GROUP:
81275 case MCAST_LEAVE_GROUP:
81276 {
81277 - struct compat_group_req __user *gr32 = (void *)optval;
81278 + struct compat_group_req __user *gr32 = (void __user *)optval;
81279 struct group_req __user *kgr =
81280 compat_alloc_user_space(sizeof(struct group_req));
81281 u32 interface;
81282 @@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
81283 case MCAST_BLOCK_SOURCE:
81284 case MCAST_UNBLOCK_SOURCE:
81285 {
81286 - struct compat_group_source_req __user *gsr32 = (void *)optval;
81287 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
81288 struct group_source_req __user *kgsr = compat_alloc_user_space(
81289 sizeof(struct group_source_req));
81290 u32 interface;
81291 @@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
81292 }
81293 case MCAST_MSFILTER:
81294 {
81295 - struct compat_group_filter __user *gf32 = (void *)optval;
81296 + struct compat_group_filter __user *gf32 = (void __user *)optval;
81297 struct group_filter __user *kgf;
81298 u32 interface, fmode, numsrc;
81299
81300 diff --git a/net/core/dev.c b/net/core/dev.c
81301 index 84a0705..575db4c 100644
81302 --- a/net/core/dev.c
81303 +++ b/net/core/dev.c
81304 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
81305 if (no_module && capable(CAP_NET_ADMIN))
81306 no_module = request_module("netdev-%s", name);
81307 if (no_module && capable(CAP_SYS_MODULE)) {
81308 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
81309 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
81310 +#else
81311 if (!request_module("%s", name))
81312 pr_err("Loading kernel module for a network device "
81313 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
81314 "instead\n", name);
81315 +#endif
81316 }
81317 }
81318 EXPORT_SYMBOL(dev_load);
81319 @@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
81320
81321 struct dev_gso_cb {
81322 void (*destructor)(struct sk_buff *skb);
81323 -};
81324 +} __no_const;
81325
81326 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
81327
81328 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
81329 }
81330 EXPORT_SYMBOL(netif_rx_ni);
81331
81332 -static void net_tx_action(struct softirq_action *h)
81333 +static void net_tx_action(void)
81334 {
81335 struct softnet_data *sd = &__get_cpu_var(softnet_data);
81336
81337 @@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi)
81338 EXPORT_SYMBOL(netif_napi_del);
81339
81340
81341 -static void net_rx_action(struct softirq_action *h)
81342 +static void net_rx_action(void)
81343 {
81344 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
81345 unsigned long time_limit = jiffies + 2;
81346 diff --git a/net/core/flow.c b/net/core/flow.c
81347 index 9601587..8c4824e 100644
81348 --- a/net/core/flow.c
81349 +++ b/net/core/flow.c
81350 @@ -35,11 +35,11 @@ struct flow_cache_entry {
81351 atomic_t *object_ref;
81352 };
81353
81354 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
81355 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
81356
81357 static u32 flow_hash_shift;
81358 #define flow_hash_size (1 << flow_hash_shift)
81359 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
81360 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
81361
81362 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
81363
81364 @@ -52,7 +52,7 @@ struct flow_percpu_info {
81365 u32 hash_rnd;
81366 int count;
81367 };
81368 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
81369 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
81370
81371 #define flow_hash_rnd_recalc(cpu) \
81372 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
81373 @@ -69,7 +69,7 @@ struct flow_flush_info {
81374 atomic_t cpuleft;
81375 struct completion completion;
81376 };
81377 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
81378 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
81379
81380 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
81381
81382 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
81383 if (fle->family == family &&
81384 fle->dir == dir &&
81385 flow_key_compare(key, &fle->key) == 0) {
81386 - if (fle->genid == atomic_read(&flow_cache_genid)) {
81387 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
81388 void *ret = fle->object;
81389
81390 if (ret)
81391 @@ -228,7 +228,7 @@ nocache:
81392 err = resolver(net, key, family, dir, &obj, &obj_ref);
81393
81394 if (fle && !err) {
81395 - fle->genid = atomic_read(&flow_cache_genid);
81396 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
81397
81398 if (fle->object)
81399 atomic_dec(fle->object_ref);
81400 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
81401
81402 fle = flow_table(cpu)[i];
81403 for (; fle; fle = fle->next) {
81404 - unsigned genid = atomic_read(&flow_cache_genid);
81405 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
81406
81407 if (!fle->object || fle->genid == genid)
81408 continue;
81409 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
81410 index d4fd895..ac9b1e6 100644
81411 --- a/net/core/rtnetlink.c
81412 +++ b/net/core/rtnetlink.c
81413 @@ -57,7 +57,7 @@ struct rtnl_link
81414 {
81415 rtnl_doit_func doit;
81416 rtnl_dumpit_func dumpit;
81417 -};
81418 +} __no_const;
81419
81420 static DEFINE_MUTEX(rtnl_mutex);
81421
81422 diff --git a/net/core/scm.c b/net/core/scm.c
81423 index d98eafc..1a190a9 100644
81424 --- a/net/core/scm.c
81425 +++ b/net/core/scm.c
81426 @@ -191,7 +191,7 @@ error:
81427 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
81428 {
81429 struct cmsghdr __user *cm
81430 - = (__force struct cmsghdr __user *)msg->msg_control;
81431 + = (struct cmsghdr __force_user *)msg->msg_control;
81432 struct cmsghdr cmhdr;
81433 int cmlen = CMSG_LEN(len);
81434 int err;
81435 @@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
81436 err = -EFAULT;
81437 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
81438 goto out;
81439 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
81440 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
81441 goto out;
81442 cmlen = CMSG_SPACE(len);
81443 if (msg->msg_controllen < cmlen)
81444 @@ -229,7 +229,7 @@ out:
81445 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
81446 {
81447 struct cmsghdr __user *cm
81448 - = (__force struct cmsghdr __user*)msg->msg_control;
81449 + = (struct cmsghdr __force_user *)msg->msg_control;
81450
81451 int fdmax = 0;
81452 int fdnum = scm->fp->count;
81453 @@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
81454 if (fdnum < fdmax)
81455 fdmax = fdnum;
81456
81457 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
81458 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
81459 i++, cmfptr++)
81460 {
81461 int new_fd;
81462 diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
81463 index 45329d7..626aaa6 100644
81464 --- a/net/core/secure_seq.c
81465 +++ b/net/core/secure_seq.c
81466 @@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
81467 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
81468
81469 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
81470 - __be16 dport)
81471 + __be16 dport)
81472 {
81473 u32 secret[MD5_MESSAGE_BYTES / 4];
81474 u32 hash[MD5_DIGEST_WORDS];
81475 @@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
81476 secret[i] = net_secret[i];
81477
81478 md5_transform(hash, secret);
81479 -
81480 return hash[0];
81481 }
81482 #endif
81483 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
81484 index 025f924..70a71c4 100644
81485 --- a/net/core/skbuff.c
81486 +++ b/net/core/skbuff.c
81487 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
81488 struct sk_buff *frag_iter;
81489 struct sock *sk = skb->sk;
81490
81491 + pax_track_stack();
81492 +
81493 /*
81494 * __skb_splice_bits() only fails if the output has no room left,
81495 * so no point in going over the frag_list for the error case.
81496 diff --git a/net/core/sock.c b/net/core/sock.c
81497 index 6605e75..3acebda 100644
81498 --- a/net/core/sock.c
81499 +++ b/net/core/sock.c
81500 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
81501 break;
81502
81503 case SO_PEERCRED:
81504 + {
81505 + struct ucred peercred;
81506 if (len > sizeof(sk->sk_peercred))
81507 len = sizeof(sk->sk_peercred);
81508 - if (copy_to_user(optval, &sk->sk_peercred, len))
81509 + peercred = sk->sk_peercred;
81510 + if (copy_to_user(optval, &peercred, len))
81511 return -EFAULT;
81512 goto lenout;
81513 + }
81514
81515 case SO_PEERNAME:
81516 {
81517 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
81518 */
81519 smp_wmb();
81520 atomic_set(&sk->sk_refcnt, 1);
81521 - atomic_set(&sk->sk_drops, 0);
81522 + atomic_set_unchecked(&sk->sk_drops, 0);
81523 }
81524 EXPORT_SYMBOL(sock_init_data);
81525
81526 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
81527 index 2036568..c55883d 100644
81528 --- a/net/decnet/sysctl_net_decnet.c
81529 +++ b/net/decnet/sysctl_net_decnet.c
81530 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
81531
81532 if (len > *lenp) len = *lenp;
81533
81534 - if (copy_to_user(buffer, addr, len))
81535 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
81536 return -EFAULT;
81537
81538 *lenp = len;
81539 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
81540
81541 if (len > *lenp) len = *lenp;
81542
81543 - if (copy_to_user(buffer, devname, len))
81544 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
81545 return -EFAULT;
81546
81547 *lenp = len;
81548 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
81549 index 39a2d29..f39c0fe 100644
81550 --- a/net/econet/Kconfig
81551 +++ b/net/econet/Kconfig
81552 @@ -4,7 +4,7 @@
81553
81554 config ECONET
81555 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
81556 - depends on EXPERIMENTAL && INET
81557 + depends on EXPERIMENTAL && INET && BROKEN
81558 ---help---
81559 Econet is a fairly old and slow networking protocol mainly used by
81560 Acorn computers to access file and print servers. It uses native
81561 diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
81562 index a413b1b..380849c 100644
81563 --- a/net/ieee802154/dgram.c
81564 +++ b/net/ieee802154/dgram.c
81565 @@ -318,7 +318,7 @@ out:
81566 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
81567 {
81568 if (sock_queue_rcv_skb(sk, skb) < 0) {
81569 - atomic_inc(&sk->sk_drops);
81570 + atomic_inc_unchecked(&sk->sk_drops);
81571 kfree_skb(skb);
81572 return NET_RX_DROP;
81573 }
81574 diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
81575 index 30e74ee..bfc6ee0 100644
81576 --- a/net/ieee802154/raw.c
81577 +++ b/net/ieee802154/raw.c
81578 @@ -206,7 +206,7 @@ out:
81579 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
81580 {
81581 if (sock_queue_rcv_skb(sk, skb) < 0) {
81582 - atomic_inc(&sk->sk_drops);
81583 + atomic_inc_unchecked(&sk->sk_drops);
81584 kfree_skb(skb);
81585 return NET_RX_DROP;
81586 }
81587 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
81588 index dba56d2..acee5d6 100644
81589 --- a/net/ipv4/inet_diag.c
81590 +++ b/net/ipv4/inet_diag.c
81591 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
81592 r->idiag_retrans = 0;
81593
81594 r->id.idiag_if = sk->sk_bound_dev_if;
81595 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81596 + r->id.idiag_cookie[0] = 0;
81597 + r->id.idiag_cookie[1] = 0;
81598 +#else
81599 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
81600 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
81601 +#endif
81602
81603 r->id.idiag_sport = inet->sport;
81604 r->id.idiag_dport = inet->dport;
81605 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
81606 r->idiag_family = tw->tw_family;
81607 r->idiag_retrans = 0;
81608 r->id.idiag_if = tw->tw_bound_dev_if;
81609 +
81610 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81611 + r->id.idiag_cookie[0] = 0;
81612 + r->id.idiag_cookie[1] = 0;
81613 +#else
81614 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
81615 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
81616 +#endif
81617 +
81618 r->id.idiag_sport = tw->tw_sport;
81619 r->id.idiag_dport = tw->tw_dport;
81620 r->id.idiag_src[0] = tw->tw_rcv_saddr;
81621 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
81622 if (sk == NULL)
81623 goto unlock;
81624
81625 +#ifndef CONFIG_GRKERNSEC_HIDESYM
81626 err = -ESTALE;
81627 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
81628 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
81629 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
81630 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
81631 goto out;
81632 +#endif
81633
81634 err = -ENOMEM;
81635 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
81636 @@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
81637 r->idiag_retrans = req->retrans;
81638
81639 r->id.idiag_if = sk->sk_bound_dev_if;
81640 +
81641 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81642 + r->id.idiag_cookie[0] = 0;
81643 + r->id.idiag_cookie[1] = 0;
81644 +#else
81645 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
81646 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
81647 +#endif
81648
81649 tmo = req->expires - jiffies;
81650 if (tmo < 0)
81651 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
81652 index d717267..56de7e7 100644
81653 --- a/net/ipv4/inet_hashtables.c
81654 +++ b/net/ipv4/inet_hashtables.c
81655 @@ -18,12 +18,15 @@
81656 #include <linux/sched.h>
81657 #include <linux/slab.h>
81658 #include <linux/wait.h>
81659 +#include <linux/security.h>
81660
81661 #include <net/inet_connection_sock.h>
81662 #include <net/inet_hashtables.h>
81663 #include <net/secure_seq.h>
81664 #include <net/ip.h>
81665
81666 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
81667 +
81668 /*
81669 * Allocate and initialize a new local port bind bucket.
81670 * The bindhash mutex for snum's hash chain must be held here.
81671 @@ -491,6 +494,8 @@ ok:
81672 }
81673 spin_unlock(&head->lock);
81674
81675 + gr_update_task_in_ip_table(current, inet_sk(sk));
81676 +
81677 if (tw) {
81678 inet_twsk_deschedule(tw, death_row);
81679 inet_twsk_put(tw);
81680 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
81681 index 13b229f..6956484 100644
81682 --- a/net/ipv4/inetpeer.c
81683 +++ b/net/ipv4/inetpeer.c
81684 @@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81685 struct inet_peer *p, *n;
81686 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
81687
81688 + pax_track_stack();
81689 +
81690 /* Look up for the address quickly. */
81691 read_lock_bh(&peer_pool_lock);
81692 p = lookup(daddr, NULL);
81693 @@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81694 return NULL;
81695 n->v4daddr = daddr;
81696 atomic_set(&n->refcnt, 1);
81697 - atomic_set(&n->rid, 0);
81698 + atomic_set_unchecked(&n->rid, 0);
81699 n->ip_id_count = secure_ip_id(daddr);
81700 n->tcp_ts_stamp = 0;
81701
81702 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
81703 index d3fe10b..feeafc9 100644
81704 --- a/net/ipv4/ip_fragment.c
81705 +++ b/net/ipv4/ip_fragment.c
81706 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
81707 return 0;
81708
81709 start = qp->rid;
81710 - end = atomic_inc_return(&peer->rid);
81711 + end = atomic_inc_return_unchecked(&peer->rid);
81712 qp->rid = end;
81713
81714 rc = qp->q.fragments && (end - start) > max;
81715 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
81716 index e982b5c..f079d75 100644
81717 --- a/net/ipv4/ip_sockglue.c
81718 +++ b/net/ipv4/ip_sockglue.c
81719 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
81720 int val;
81721 int len;
81722
81723 + pax_track_stack();
81724 +
81725 if (level != SOL_IP)
81726 return -EOPNOTSUPP;
81727
81728 @@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
81729 if (sk->sk_type != SOCK_STREAM)
81730 return -ENOPROTOOPT;
81731
81732 - msg.msg_control = optval;
81733 + msg.msg_control = (void __force_kernel *)optval;
81734 msg.msg_controllen = len;
81735 msg.msg_flags = 0;
81736
81737 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
81738 index f8d04c2..c1188f2 100644
81739 --- a/net/ipv4/ipconfig.c
81740 +++ b/net/ipv4/ipconfig.c
81741 @@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
81742
81743 mm_segment_t oldfs = get_fs();
81744 set_fs(get_ds());
81745 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
81746 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
81747 set_fs(oldfs);
81748 return res;
81749 }
81750 @@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
81751
81752 mm_segment_t oldfs = get_fs();
81753 set_fs(get_ds());
81754 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
81755 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
81756 set_fs(oldfs);
81757 return res;
81758 }
81759 @@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
81760
81761 mm_segment_t oldfs = get_fs();
81762 set_fs(get_ds());
81763 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
81764 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
81765 set_fs(oldfs);
81766 return res;
81767 }
81768 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
81769 index c8b0cc3..4da5ae2 100644
81770 --- a/net/ipv4/netfilter/arp_tables.c
81771 +++ b/net/ipv4/netfilter/arp_tables.c
81772 @@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81773 private = &tmp;
81774 }
81775 #endif
81776 + memset(&info, 0, sizeof(info));
81777 info.valid_hooks = t->valid_hooks;
81778 memcpy(info.hook_entry, private->hook_entry,
81779 sizeof(info.hook_entry));
81780 diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
81781 index c156db2..e772975 100644
81782 --- a/net/ipv4/netfilter/ip_queue.c
81783 +++ b/net/ipv4/netfilter/ip_queue.c
81784 @@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
81785
81786 if (v->data_len < sizeof(*user_iph))
81787 return 0;
81788 + if (v->data_len > 65535)
81789 + return -EMSGSIZE;
81790 +
81791 diff = v->data_len - e->skb->len;
81792 if (diff < 0) {
81793 if (pskb_trim(e->skb, v->data_len))
81794 @@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
81795 static inline void
81796 __ipq_rcv_skb(struct sk_buff *skb)
81797 {
81798 - int status, type, pid, flags, nlmsglen, skblen;
81799 + int status, type, pid, flags;
81800 + unsigned int nlmsglen, skblen;
81801 struct nlmsghdr *nlh;
81802
81803 skblen = skb->len;
81804 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
81805 index 0606db1..02e7e4c 100644
81806 --- a/net/ipv4/netfilter/ip_tables.c
81807 +++ b/net/ipv4/netfilter/ip_tables.c
81808 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81809 private = &tmp;
81810 }
81811 #endif
81812 + memset(&info, 0, sizeof(info));
81813 info.valid_hooks = t->valid_hooks;
81814 memcpy(info.hook_entry, private->hook_entry,
81815 sizeof(info.hook_entry));
81816 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
81817 index d9521f6..3c3eb25 100644
81818 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
81819 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
81820 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
81821
81822 *len = 0;
81823
81824 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
81825 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
81826 if (*octets == NULL) {
81827 if (net_ratelimit())
81828 printk("OOM in bsalg (%d)\n", __LINE__);
81829 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
81830 index ab996f9..3da5f96 100644
81831 --- a/net/ipv4/raw.c
81832 +++ b/net/ipv4/raw.c
81833 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
81834 /* Charge it to the socket. */
81835
81836 if (sock_queue_rcv_skb(sk, skb) < 0) {
81837 - atomic_inc(&sk->sk_drops);
81838 + atomic_inc_unchecked(&sk->sk_drops);
81839 kfree_skb(skb);
81840 return NET_RX_DROP;
81841 }
81842 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
81843 int raw_rcv(struct sock *sk, struct sk_buff *skb)
81844 {
81845 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
81846 - atomic_inc(&sk->sk_drops);
81847 + atomic_inc_unchecked(&sk->sk_drops);
81848 kfree_skb(skb);
81849 return NET_RX_DROP;
81850 }
81851 @@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
81852
81853 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
81854 {
81855 + struct icmp_filter filter;
81856 +
81857 + if (optlen < 0)
81858 + return -EINVAL;
81859 if (optlen > sizeof(struct icmp_filter))
81860 optlen = sizeof(struct icmp_filter);
81861 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
81862 + if (copy_from_user(&filter, optval, optlen))
81863 return -EFAULT;
81864 + raw_sk(sk)->filter = filter;
81865 +
81866 return 0;
81867 }
81868
81869 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
81870 {
81871 int len, ret = -EFAULT;
81872 + struct icmp_filter filter;
81873
81874 if (get_user(len, optlen))
81875 goto out;
81876 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
81877 if (len > sizeof(struct icmp_filter))
81878 len = sizeof(struct icmp_filter);
81879 ret = -EFAULT;
81880 - if (put_user(len, optlen) ||
81881 - copy_to_user(optval, &raw_sk(sk)->filter, len))
81882 + filter = raw_sk(sk)->filter;
81883 + if (put_user(len, optlen) || len > sizeof filter ||
81884 + copy_to_user(optval, &filter, len))
81885 goto out;
81886 ret = 0;
81887 out: return ret;
81888 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
81889 sk_wmem_alloc_get(sp),
81890 sk_rmem_alloc_get(sp),
81891 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
81892 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
81893 + atomic_read(&sp->sk_refcnt),
81894 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81895 + NULL,
81896 +#else
81897 + sp,
81898 +#endif
81899 + atomic_read_unchecked(&sp->sk_drops));
81900 }
81901
81902 static int raw_seq_show(struct seq_file *seq, void *v)
81903 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
81904 index 58f141b..b759702 100644
81905 --- a/net/ipv4/route.c
81906 +++ b/net/ipv4/route.c
81907 @@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
81908
81909 static inline int rt_genid(struct net *net)
81910 {
81911 - return atomic_read(&net->ipv4.rt_genid);
81912 + return atomic_read_unchecked(&net->ipv4.rt_genid);
81913 }
81914
81915 #ifdef CONFIG_PROC_FS
81916 @@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
81917 unsigned char shuffle;
81918
81919 get_random_bytes(&shuffle, sizeof(shuffle));
81920 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
81921 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
81922 }
81923
81924 /*
81925 @@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
81926
81927 static __net_init int rt_secret_timer_init(struct net *net)
81928 {
81929 - atomic_set(&net->ipv4.rt_genid,
81930 + atomic_set_unchecked(&net->ipv4.rt_genid,
81931 (int) ((num_physpages ^ (num_physpages>>8)) ^
81932 (jiffies ^ (jiffies >> 7))));
81933
81934 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
81935 index f095659..adc892a 100644
81936 --- a/net/ipv4/tcp.c
81937 +++ b/net/ipv4/tcp.c
81938 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
81939 int val;
81940 int err = 0;
81941
81942 + pax_track_stack();
81943 +
81944 /* This is a string value all the others are int's */
81945 if (optname == TCP_CONGESTION) {
81946 char name[TCP_CA_NAME_MAX];
81947 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
81948 struct tcp_sock *tp = tcp_sk(sk);
81949 int val, len;
81950
81951 + pax_track_stack();
81952 +
81953 if (get_user(len, optlen))
81954 return -EFAULT;
81955
81956 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
81957 index 6fc7961..33bad4a 100644
81958 --- a/net/ipv4/tcp_ipv4.c
81959 +++ b/net/ipv4/tcp_ipv4.c
81960 @@ -85,6 +85,9 @@
81961 int sysctl_tcp_tw_reuse __read_mostly;
81962 int sysctl_tcp_low_latency __read_mostly;
81963
81964 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81965 +extern int grsec_enable_blackhole;
81966 +#endif
81967
81968 #ifdef CONFIG_TCP_MD5SIG
81969 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
81970 @@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
81971 return 0;
81972
81973 reset:
81974 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81975 + if (!grsec_enable_blackhole)
81976 +#endif
81977 tcp_v4_send_reset(rsk, skb);
81978 discard:
81979 kfree_skb(skb);
81980 @@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
81981 TCP_SKB_CB(skb)->sacked = 0;
81982
81983 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
81984 - if (!sk)
81985 + if (!sk) {
81986 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81987 + ret = 1;
81988 +#endif
81989 goto no_tcp_socket;
81990 + }
81991
81992 process:
81993 - if (sk->sk_state == TCP_TIME_WAIT)
81994 + if (sk->sk_state == TCP_TIME_WAIT) {
81995 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81996 + ret = 2;
81997 +#endif
81998 goto do_time_wait;
81999 + }
82000
82001 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
82002 goto discard_and_relse;
82003 @@ -1651,6 +1665,10 @@ no_tcp_socket:
82004 bad_packet:
82005 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
82006 } else {
82007 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82008 + if (!grsec_enable_blackhole || (ret == 1 &&
82009 + (skb->dev->flags & IFF_LOOPBACK)))
82010 +#endif
82011 tcp_v4_send_reset(NULL, skb);
82012 }
82013
82014 @@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
82015 0, /* non standard timer */
82016 0, /* open_requests have no inode */
82017 atomic_read(&sk->sk_refcnt),
82018 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82019 + NULL,
82020 +#else
82021 req,
82022 +#endif
82023 len);
82024 }
82025
82026 @@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
82027 sock_i_uid(sk),
82028 icsk->icsk_probes_out,
82029 sock_i_ino(sk),
82030 - atomic_read(&sk->sk_refcnt), sk,
82031 + atomic_read(&sk->sk_refcnt),
82032 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82033 + NULL,
82034 +#else
82035 + sk,
82036 +#endif
82037 jiffies_to_clock_t(icsk->icsk_rto),
82038 jiffies_to_clock_t(icsk->icsk_ack.ato),
82039 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
82040 @@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
82041 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
82042 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
82043 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
82044 - atomic_read(&tw->tw_refcnt), tw, len);
82045 + atomic_read(&tw->tw_refcnt),
82046 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82047 + NULL,
82048 +#else
82049 + tw,
82050 +#endif
82051 + len);
82052 }
82053
82054 #define TMPSZ 150
82055 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
82056 index 4c03598..e09a8e8 100644
82057 --- a/net/ipv4/tcp_minisocks.c
82058 +++ b/net/ipv4/tcp_minisocks.c
82059 @@ -26,6 +26,10 @@
82060 #include <net/inet_common.h>
82061 #include <net/xfrm.h>
82062
82063 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82064 +extern int grsec_enable_blackhole;
82065 +#endif
82066 +
82067 #ifdef CONFIG_SYSCTL
82068 #define SYNC_INIT 0 /* let the user enable it */
82069 #else
82070 @@ -672,6 +676,10 @@ listen_overflow:
82071
82072 embryonic_reset:
82073 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
82074 +
82075 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82076 + if (!grsec_enable_blackhole)
82077 +#endif
82078 if (!(flg & TCP_FLAG_RST))
82079 req->rsk_ops->send_reset(sk, skb);
82080
82081 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
82082 index af83bdf..ec91cb2 100644
82083 --- a/net/ipv4/tcp_output.c
82084 +++ b/net/ipv4/tcp_output.c
82085 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
82086 __u8 *md5_hash_location;
82087 int mss;
82088
82089 + pax_track_stack();
82090 +
82091 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
82092 if (skb == NULL)
82093 return NULL;
82094 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
82095 index 59f5b5e..193860f 100644
82096 --- a/net/ipv4/tcp_probe.c
82097 +++ b/net/ipv4/tcp_probe.c
82098 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
82099 if (cnt + width >= len)
82100 break;
82101
82102 - if (copy_to_user(buf + cnt, tbuf, width))
82103 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
82104 return -EFAULT;
82105 cnt += width;
82106 }
82107 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
82108 index 57d5501..a9ed13a 100644
82109 --- a/net/ipv4/tcp_timer.c
82110 +++ b/net/ipv4/tcp_timer.c
82111 @@ -21,6 +21,10 @@
82112 #include <linux/module.h>
82113 #include <net/tcp.h>
82114
82115 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82116 +extern int grsec_lastack_retries;
82117 +#endif
82118 +
82119 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
82120 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
82121 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
82122 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
82123 }
82124 }
82125
82126 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82127 + if ((sk->sk_state == TCP_LAST_ACK) &&
82128 + (grsec_lastack_retries > 0) &&
82129 + (grsec_lastack_retries < retry_until))
82130 + retry_until = grsec_lastack_retries;
82131 +#endif
82132 +
82133 if (retransmits_timed_out(sk, retry_until)) {
82134 /* Has it gone just too far? */
82135 tcp_write_err(sk);
82136 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
82137 index 8e28770..72105c8 100644
82138 --- a/net/ipv4/udp.c
82139 +++ b/net/ipv4/udp.c
82140 @@ -86,6 +86,7 @@
82141 #include <linux/types.h>
82142 #include <linux/fcntl.h>
82143 #include <linux/module.h>
82144 +#include <linux/security.h>
82145 #include <linux/socket.h>
82146 #include <linux/sockios.h>
82147 #include <linux/igmp.h>
82148 @@ -106,6 +107,10 @@
82149 #include <net/xfrm.h>
82150 #include "udp_impl.h"
82151
82152 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82153 +extern int grsec_enable_blackhole;
82154 +#endif
82155 +
82156 struct udp_table udp_table;
82157 EXPORT_SYMBOL(udp_table);
82158
82159 @@ -371,6 +376,9 @@ found:
82160 return s;
82161 }
82162
82163 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
82164 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
82165 +
82166 /*
82167 * This routine is called by the ICMP module when it gets some
82168 * sort of error condition. If err < 0 then the socket should
82169 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
82170 dport = usin->sin_port;
82171 if (dport == 0)
82172 return -EINVAL;
82173 +
82174 + err = gr_search_udp_sendmsg(sk, usin);
82175 + if (err)
82176 + return err;
82177 } else {
82178 if (sk->sk_state != TCP_ESTABLISHED)
82179 return -EDESTADDRREQ;
82180 +
82181 + err = gr_search_udp_sendmsg(sk, NULL);
82182 + if (err)
82183 + return err;
82184 +
82185 daddr = inet->daddr;
82186 dport = inet->dport;
82187 /* Open fast path for connected socket.
82188 @@ -945,6 +962,10 @@ try_again:
82189 if (!skb)
82190 goto out;
82191
82192 + err = gr_search_udp_recvmsg(sk, skb);
82193 + if (err)
82194 + goto out_free;
82195 +
82196 ulen = skb->len - sizeof(struct udphdr);
82197 copied = len;
82198 if (copied > ulen)
82199 @@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
82200 if (rc == -ENOMEM) {
82201 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
82202 is_udplite);
82203 - atomic_inc(&sk->sk_drops);
82204 + atomic_inc_unchecked(&sk->sk_drops);
82205 }
82206 goto drop;
82207 }
82208 @@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
82209 goto csum_error;
82210
82211 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
82212 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82213 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
82214 +#endif
82215 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
82216
82217 /*
82218 @@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
82219 sk_wmem_alloc_get(sp),
82220 sk_rmem_alloc_get(sp),
82221 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
82222 - atomic_read(&sp->sk_refcnt), sp,
82223 - atomic_read(&sp->sk_drops), len);
82224 + atomic_read(&sp->sk_refcnt),
82225 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82226 + NULL,
82227 +#else
82228 + sp,
82229 +#endif
82230 + atomic_read_unchecked(&sp->sk_drops), len);
82231 }
82232
82233 int udp4_seq_show(struct seq_file *seq, void *v)
82234 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
82235 index 8ac3d09..fc58c5f 100644
82236 --- a/net/ipv6/addrconf.c
82237 +++ b/net/ipv6/addrconf.c
82238 @@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
82239 p.iph.ihl = 5;
82240 p.iph.protocol = IPPROTO_IPV6;
82241 p.iph.ttl = 64;
82242 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
82243 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
82244
82245 if (ops->ndo_do_ioctl) {
82246 mm_segment_t oldfs = get_fs();
82247 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
82248 index cc4797d..7cfdfcc 100644
82249 --- a/net/ipv6/inet6_connection_sock.c
82250 +++ b/net/ipv6/inet6_connection_sock.c
82251 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
82252 #ifdef CONFIG_XFRM
82253 {
82254 struct rt6_info *rt = (struct rt6_info *)dst;
82255 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
82256 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
82257 }
82258 #endif
82259 }
82260 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
82261 #ifdef CONFIG_XFRM
82262 if (dst) {
82263 struct rt6_info *rt = (struct rt6_info *)dst;
82264 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
82265 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
82266 sk->sk_dst_cache = NULL;
82267 dst_release(dst);
82268 dst = NULL;
82269 diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
82270 index 093e9b2..f72cddb 100644
82271 --- a/net/ipv6/inet6_hashtables.c
82272 +++ b/net/ipv6/inet6_hashtables.c
82273 @@ -119,7 +119,7 @@ out:
82274 }
82275 EXPORT_SYMBOL(__inet6_lookup_established);
82276
82277 -static int inline compute_score(struct sock *sk, struct net *net,
82278 +static inline int compute_score(struct sock *sk, struct net *net,
82279 const unsigned short hnum,
82280 const struct in6_addr *daddr,
82281 const int dif)
82282 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
82283 index 4f7aaf6..f7acf45 100644
82284 --- a/net/ipv6/ipv6_sockglue.c
82285 +++ b/net/ipv6/ipv6_sockglue.c
82286 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
82287 int val, valbool;
82288 int retv = -ENOPROTOOPT;
82289
82290 + pax_track_stack();
82291 +
82292 if (optval == NULL)
82293 val=0;
82294 else {
82295 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
82296 int len;
82297 int val;
82298
82299 + pax_track_stack();
82300 +
82301 if (ip6_mroute_opt(optname))
82302 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
82303
82304 @@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
82305 if (sk->sk_type != SOCK_STREAM)
82306 return -ENOPROTOOPT;
82307
82308 - msg.msg_control = optval;
82309 + msg.msg_control = (void __force_kernel *)optval;
82310 msg.msg_controllen = len;
82311 msg.msg_flags = 0;
82312
82313 diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
82314 index 1cf3f0c..1d4376f 100644
82315 --- a/net/ipv6/netfilter/ip6_queue.c
82316 +++ b/net/ipv6/netfilter/ip6_queue.c
82317 @@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
82318
82319 if (v->data_len < sizeof(*user_iph))
82320 return 0;
82321 + if (v->data_len > 65535)
82322 + return -EMSGSIZE;
82323 +
82324 diff = v->data_len - e->skb->len;
82325 if (diff < 0) {
82326 if (pskb_trim(e->skb, v->data_len))
82327 @@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
82328 static inline void
82329 __ipq_rcv_skb(struct sk_buff *skb)
82330 {
82331 - int status, type, pid, flags, nlmsglen, skblen;
82332 + int status, type, pid, flags;
82333 + unsigned int nlmsglen, skblen;
82334 struct nlmsghdr *nlh;
82335
82336 skblen = skb->len;
82337 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
82338 index 78b5a36..7f37433 100644
82339 --- a/net/ipv6/netfilter/ip6_tables.c
82340 +++ b/net/ipv6/netfilter/ip6_tables.c
82341 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
82342 private = &tmp;
82343 }
82344 #endif
82345 + memset(&info, 0, sizeof(info));
82346 info.valid_hooks = t->valid_hooks;
82347 memcpy(info.hook_entry, private->hook_entry,
82348 sizeof(info.hook_entry));
82349 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
82350 index 4f24570..b813b34 100644
82351 --- a/net/ipv6/raw.c
82352 +++ b/net/ipv6/raw.c
82353 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
82354 {
82355 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
82356 skb_checksum_complete(skb)) {
82357 - atomic_inc(&sk->sk_drops);
82358 + atomic_inc_unchecked(&sk->sk_drops);
82359 kfree_skb(skb);
82360 return NET_RX_DROP;
82361 }
82362
82363 /* Charge it to the socket. */
82364 if (sock_queue_rcv_skb(sk,skb)<0) {
82365 - atomic_inc(&sk->sk_drops);
82366 + atomic_inc_unchecked(&sk->sk_drops);
82367 kfree_skb(skb);
82368 return NET_RX_DROP;
82369 }
82370 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
82371 struct raw6_sock *rp = raw6_sk(sk);
82372
82373 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
82374 - atomic_inc(&sk->sk_drops);
82375 + atomic_inc_unchecked(&sk->sk_drops);
82376 kfree_skb(skb);
82377 return NET_RX_DROP;
82378 }
82379 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
82380
82381 if (inet->hdrincl) {
82382 if (skb_checksum_complete(skb)) {
82383 - atomic_inc(&sk->sk_drops);
82384 + atomic_inc_unchecked(&sk->sk_drops);
82385 kfree_skb(skb);
82386 return NET_RX_DROP;
82387 }
82388 @@ -518,7 +518,7 @@ csum_copy_err:
82389 as some normal condition.
82390 */
82391 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
82392 - atomic_inc(&sk->sk_drops);
82393 + atomic_inc_unchecked(&sk->sk_drops);
82394 goto out;
82395 }
82396
82397 @@ -600,7 +600,7 @@ out:
82398 return err;
82399 }
82400
82401 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
82402 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
82403 struct flowi *fl, struct rt6_info *rt,
82404 unsigned int flags)
82405 {
82406 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
82407 u16 proto;
82408 int err;
82409
82410 + pax_track_stack();
82411 +
82412 /* Rough check on arithmetic overflow,
82413 better check is made in ip6_append_data().
82414 */
82415 @@ -916,12 +918,17 @@ do_confirm:
82416 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
82417 char __user *optval, int optlen)
82418 {
82419 + struct icmp6_filter filter;
82420 +
82421 switch (optname) {
82422 case ICMPV6_FILTER:
82423 + if (optlen < 0)
82424 + return -EINVAL;
82425 if (optlen > sizeof(struct icmp6_filter))
82426 optlen = sizeof(struct icmp6_filter);
82427 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
82428 + if (copy_from_user(&filter, optval, optlen))
82429 return -EFAULT;
82430 + raw6_sk(sk)->filter = filter;
82431 return 0;
82432 default:
82433 return -ENOPROTOOPT;
82434 @@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
82435 char __user *optval, int __user *optlen)
82436 {
82437 int len;
82438 + struct icmp6_filter filter;
82439
82440 switch (optname) {
82441 case ICMPV6_FILTER:
82442 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
82443 len = sizeof(struct icmp6_filter);
82444 if (put_user(len, optlen))
82445 return -EFAULT;
82446 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
82447 + filter = raw6_sk(sk)->filter;
82448 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
82449 return -EFAULT;
82450 return 0;
82451 default:
82452 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
82453 0, 0L, 0,
82454 sock_i_uid(sp), 0,
82455 sock_i_ino(sp),
82456 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
82457 + atomic_read(&sp->sk_refcnt),
82458 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82459 + NULL,
82460 +#else
82461 + sp,
82462 +#endif
82463 + atomic_read_unchecked(&sp->sk_drops));
82464 }
82465
82466 static int raw6_seq_show(struct seq_file *seq, void *v)
82467 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
82468 index faae6df..d4430c1 100644
82469 --- a/net/ipv6/tcp_ipv6.c
82470 +++ b/net/ipv6/tcp_ipv6.c
82471 @@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
82472 }
82473 #endif
82474
82475 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82476 +extern int grsec_enable_blackhole;
82477 +#endif
82478 +
82479 static void tcp_v6_hash(struct sock *sk)
82480 {
82481 if (sk->sk_state != TCP_CLOSE) {
82482 @@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
82483 return 0;
82484
82485 reset:
82486 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82487 + if (!grsec_enable_blackhole)
82488 +#endif
82489 tcp_v6_send_reset(sk, skb);
82490 discard:
82491 if (opt_skb)
82492 @@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
82493 TCP_SKB_CB(skb)->sacked = 0;
82494
82495 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
82496 - if (!sk)
82497 + if (!sk) {
82498 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82499 + ret = 1;
82500 +#endif
82501 goto no_tcp_socket;
82502 + }
82503
82504 process:
82505 - if (sk->sk_state == TCP_TIME_WAIT)
82506 + if (sk->sk_state == TCP_TIME_WAIT) {
82507 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82508 + ret = 2;
82509 +#endif
82510 goto do_time_wait;
82511 + }
82512
82513 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
82514 goto discard_and_relse;
82515 @@ -1701,6 +1716,10 @@ no_tcp_socket:
82516 bad_packet:
82517 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
82518 } else {
82519 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82520 + if (!grsec_enable_blackhole || (ret == 1 &&
82521 + (skb->dev->flags & IFF_LOOPBACK)))
82522 +#endif
82523 tcp_v6_send_reset(NULL, skb);
82524 }
82525
82526 @@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
82527 uid,
82528 0, /* non standard timer */
82529 0, /* open_requests have no inode */
82530 - 0, req);
82531 + 0,
82532 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82533 + NULL
82534 +#else
82535 + req
82536 +#endif
82537 + );
82538 }
82539
82540 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
82541 @@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
82542 sock_i_uid(sp),
82543 icsk->icsk_probes_out,
82544 sock_i_ino(sp),
82545 - atomic_read(&sp->sk_refcnt), sp,
82546 + atomic_read(&sp->sk_refcnt),
82547 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82548 + NULL,
82549 +#else
82550 + sp,
82551 +#endif
82552 jiffies_to_clock_t(icsk->icsk_rto),
82553 jiffies_to_clock_t(icsk->icsk_ack.ato),
82554 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
82555 @@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
82556 dest->s6_addr32[2], dest->s6_addr32[3], destp,
82557 tw->tw_substate, 0, 0,
82558 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
82559 - atomic_read(&tw->tw_refcnt), tw);
82560 + atomic_read(&tw->tw_refcnt),
82561 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82562 + NULL
82563 +#else
82564 + tw
82565 +#endif
82566 + );
82567 }
82568
82569 static int tcp6_seq_show(struct seq_file *seq, void *v)
82570 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
82571 index 9cc6289..052c521 100644
82572 --- a/net/ipv6/udp.c
82573 +++ b/net/ipv6/udp.c
82574 @@ -49,6 +49,10 @@
82575 #include <linux/seq_file.h>
82576 #include "udp_impl.h"
82577
82578 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82579 +extern int grsec_enable_blackhole;
82580 +#endif
82581 +
82582 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
82583 {
82584 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
82585 @@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
82586 if (rc == -ENOMEM) {
82587 UDP6_INC_STATS_BH(sock_net(sk),
82588 UDP_MIB_RCVBUFERRORS, is_udplite);
82589 - atomic_inc(&sk->sk_drops);
82590 + atomic_inc_unchecked(&sk->sk_drops);
82591 }
82592 goto drop;
82593 }
82594 @@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
82595 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
82596 proto == IPPROTO_UDPLITE);
82597
82598 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82599 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
82600 +#endif
82601 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
82602
82603 kfree_skb(skb);
82604 @@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
82605 0, 0L, 0,
82606 sock_i_uid(sp), 0,
82607 sock_i_ino(sp),
82608 - atomic_read(&sp->sk_refcnt), sp,
82609 - atomic_read(&sp->sk_drops));
82610 + atomic_read(&sp->sk_refcnt),
82611 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82612 + NULL,
82613 +#else
82614 + sp,
82615 +#endif
82616 + atomic_read_unchecked(&sp->sk_drops));
82617 }
82618
82619 int udp6_seq_show(struct seq_file *seq, void *v)
82620 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
82621 index 811984d..11f59b7 100644
82622 --- a/net/irda/ircomm/ircomm_tty.c
82623 +++ b/net/irda/ircomm/ircomm_tty.c
82624 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82625 add_wait_queue(&self->open_wait, &wait);
82626
82627 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
82628 - __FILE__,__LINE__, tty->driver->name, self->open_count );
82629 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82630
82631 /* As far as I can see, we protect open_count - Jean II */
82632 spin_lock_irqsave(&self->spinlock, flags);
82633 if (!tty_hung_up_p(filp)) {
82634 extra_count = 1;
82635 - self->open_count--;
82636 + local_dec(&self->open_count);
82637 }
82638 spin_unlock_irqrestore(&self->spinlock, flags);
82639 - self->blocked_open++;
82640 + local_inc(&self->blocked_open);
82641
82642 while (1) {
82643 if (tty->termios->c_cflag & CBAUD) {
82644 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82645 }
82646
82647 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
82648 - __FILE__,__LINE__, tty->driver->name, self->open_count );
82649 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82650
82651 schedule();
82652 }
82653 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82654 if (extra_count) {
82655 /* ++ is not atomic, so this should be protected - Jean II */
82656 spin_lock_irqsave(&self->spinlock, flags);
82657 - self->open_count++;
82658 + local_inc(&self->open_count);
82659 spin_unlock_irqrestore(&self->spinlock, flags);
82660 }
82661 - self->blocked_open--;
82662 + local_dec(&self->blocked_open);
82663
82664 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
82665 - __FILE__,__LINE__, tty->driver->name, self->open_count);
82666 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
82667
82668 if (!retval)
82669 self->flags |= ASYNC_NORMAL_ACTIVE;
82670 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
82671 }
82672 /* ++ is not atomic, so this should be protected - Jean II */
82673 spin_lock_irqsave(&self->spinlock, flags);
82674 - self->open_count++;
82675 + local_inc(&self->open_count);
82676
82677 tty->driver_data = self;
82678 self->tty = tty;
82679 spin_unlock_irqrestore(&self->spinlock, flags);
82680
82681 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
82682 - self->line, self->open_count);
82683 + self->line, local_read(&self->open_count));
82684
82685 /* Not really used by us, but lets do it anyway */
82686 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
82687 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82688 return;
82689 }
82690
82691 - if ((tty->count == 1) && (self->open_count != 1)) {
82692 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
82693 /*
82694 * Uh, oh. tty->count is 1, which means that the tty
82695 * structure will be freed. state->count should always
82696 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82697 */
82698 IRDA_DEBUG(0, "%s(), bad serial port count; "
82699 "tty->count is 1, state->count is %d\n", __func__ ,
82700 - self->open_count);
82701 - self->open_count = 1;
82702 + local_read(&self->open_count));
82703 + local_set(&self->open_count, 1);
82704 }
82705
82706 - if (--self->open_count < 0) {
82707 + if (local_dec_return(&self->open_count) < 0) {
82708 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
82709 - __func__, self->line, self->open_count);
82710 - self->open_count = 0;
82711 + __func__, self->line, local_read(&self->open_count));
82712 + local_set(&self->open_count, 0);
82713 }
82714 - if (self->open_count) {
82715 + if (local_read(&self->open_count)) {
82716 spin_unlock_irqrestore(&self->spinlock, flags);
82717
82718 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
82719 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82720 tty->closing = 0;
82721 self->tty = NULL;
82722
82723 - if (self->blocked_open) {
82724 + if (local_read(&self->blocked_open)) {
82725 if (self->close_delay)
82726 schedule_timeout_interruptible(self->close_delay);
82727 wake_up_interruptible(&self->open_wait);
82728 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
82729 spin_lock_irqsave(&self->spinlock, flags);
82730 self->flags &= ~ASYNC_NORMAL_ACTIVE;
82731 self->tty = NULL;
82732 - self->open_count = 0;
82733 + local_set(&self->open_count, 0);
82734 spin_unlock_irqrestore(&self->spinlock, flags);
82735
82736 wake_up_interruptible(&self->open_wait);
82737 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
82738 seq_putc(m, '\n');
82739
82740 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
82741 - seq_printf(m, "Open count: %d\n", self->open_count);
82742 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
82743 seq_printf(m, "Max data size: %d\n", self->max_data_size);
82744 seq_printf(m, "Max header size: %d\n", self->max_header_size);
82745
82746 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
82747 index bada1b9..f325943 100644
82748 --- a/net/iucv/af_iucv.c
82749 +++ b/net/iucv/af_iucv.c
82750 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
82751
82752 write_lock_bh(&iucv_sk_list.lock);
82753
82754 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
82755 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
82756 while (__iucv_get_sock_by_name(name)) {
82757 sprintf(name, "%08x",
82758 - atomic_inc_return(&iucv_sk_list.autobind_name));
82759 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
82760 }
82761
82762 write_unlock_bh(&iucv_sk_list.lock);
82763 diff --git a/net/key/af_key.c b/net/key/af_key.c
82764 index 4e98193..439b449 100644
82765 --- a/net/key/af_key.c
82766 +++ b/net/key/af_key.c
82767 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
82768 struct xfrm_migrate m[XFRM_MAX_DEPTH];
82769 struct xfrm_kmaddress k;
82770
82771 + pax_track_stack();
82772 +
82773 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
82774 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
82775 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
82776 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
82777 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
82778 else
82779 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
82780 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82781 + NULL,
82782 +#else
82783 s,
82784 +#endif
82785 atomic_read(&s->sk_refcnt),
82786 sk_rmem_alloc_get(s),
82787 sk_wmem_alloc_get(s),
82788 diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
82789 index bda96d1..c038b72 100644
82790 --- a/net/lapb/lapb_iface.c
82791 +++ b/net/lapb/lapb_iface.c
82792 @@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
82793 goto out;
82794
82795 lapb->dev = dev;
82796 - lapb->callbacks = *callbacks;
82797 + lapb->callbacks = callbacks;
82798
82799 __lapb_insert_cb(lapb);
82800
82801 @@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
82802
82803 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
82804 {
82805 - if (lapb->callbacks.connect_confirmation)
82806 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
82807 + if (lapb->callbacks->connect_confirmation)
82808 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
82809 }
82810
82811 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
82812 {
82813 - if (lapb->callbacks.connect_indication)
82814 - lapb->callbacks.connect_indication(lapb->dev, reason);
82815 + if (lapb->callbacks->connect_indication)
82816 + lapb->callbacks->connect_indication(lapb->dev, reason);
82817 }
82818
82819 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
82820 {
82821 - if (lapb->callbacks.disconnect_confirmation)
82822 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
82823 + if (lapb->callbacks->disconnect_confirmation)
82824 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
82825 }
82826
82827 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
82828 {
82829 - if (lapb->callbacks.disconnect_indication)
82830 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
82831 + if (lapb->callbacks->disconnect_indication)
82832 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
82833 }
82834
82835 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
82836 {
82837 - if (lapb->callbacks.data_indication)
82838 - return lapb->callbacks.data_indication(lapb->dev, skb);
82839 + if (lapb->callbacks->data_indication)
82840 + return lapb->callbacks->data_indication(lapb->dev, skb);
82841
82842 kfree_skb(skb);
82843 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
82844 @@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
82845 {
82846 int used = 0;
82847
82848 - if (lapb->callbacks.data_transmit) {
82849 - lapb->callbacks.data_transmit(lapb->dev, skb);
82850 + if (lapb->callbacks->data_transmit) {
82851 + lapb->callbacks->data_transmit(lapb->dev, skb);
82852 used = 1;
82853 }
82854
82855 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
82856 index fe2d3f8..e57f683 100644
82857 --- a/net/mac80211/cfg.c
82858 +++ b/net/mac80211/cfg.c
82859 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
82860 return err;
82861 }
82862
82863 -struct cfg80211_ops mac80211_config_ops = {
82864 +const struct cfg80211_ops mac80211_config_ops = {
82865 .add_virtual_intf = ieee80211_add_iface,
82866 .del_virtual_intf = ieee80211_del_iface,
82867 .change_virtual_intf = ieee80211_change_iface,
82868 diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
82869 index 7d7879f..2d51f62 100644
82870 --- a/net/mac80211/cfg.h
82871 +++ b/net/mac80211/cfg.h
82872 @@ -4,6 +4,6 @@
82873 #ifndef __CFG_H
82874 #define __CFG_H
82875
82876 -extern struct cfg80211_ops mac80211_config_ops;
82877 +extern const struct cfg80211_ops mac80211_config_ops;
82878
82879 #endif /* __CFG_H */
82880 diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
82881 index 99c7525..9cb4937 100644
82882 --- a/net/mac80211/debugfs_key.c
82883 +++ b/net/mac80211/debugfs_key.c
82884 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
82885 size_t count, loff_t *ppos)
82886 {
82887 struct ieee80211_key *key = file->private_data;
82888 - int i, res, bufsize = 2 * key->conf.keylen + 2;
82889 + int i, bufsize = 2 * key->conf.keylen + 2;
82890 char *buf = kmalloc(bufsize, GFP_KERNEL);
82891 char *p = buf;
82892 + ssize_t res;
82893 +
82894 + if (buf == NULL)
82895 + return -ENOMEM;
82896
82897 for (i = 0; i < key->conf.keylen; i++)
82898 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
82899 diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
82900 index 33a2e89..08650c8 100644
82901 --- a/net/mac80211/debugfs_sta.c
82902 +++ b/net/mac80211/debugfs_sta.c
82903 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
82904 int i;
82905 struct sta_info *sta = file->private_data;
82906
82907 + pax_track_stack();
82908 +
82909 spin_lock_bh(&sta->lock);
82910 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
82911 sta->ampdu_mlme.dialog_token_allocator + 1);
82912 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
82913 index ca62bfe..6657a03 100644
82914 --- a/net/mac80211/ieee80211_i.h
82915 +++ b/net/mac80211/ieee80211_i.h
82916 @@ -25,6 +25,7 @@
82917 #include <linux/etherdevice.h>
82918 #include <net/cfg80211.h>
82919 #include <net/mac80211.h>
82920 +#include <asm/local.h>
82921 #include "key.h"
82922 #include "sta_info.h"
82923
82924 @@ -635,7 +636,7 @@ struct ieee80211_local {
82925 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
82926 spinlock_t queue_stop_reason_lock;
82927
82928 - int open_count;
82929 + local_t open_count;
82930 int monitors, cooked_mntrs;
82931 /* number of interfaces with corresponding FIF_ flags */
82932 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
82933 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
82934 index 079c500..eb3c6d4 100644
82935 --- a/net/mac80211/iface.c
82936 +++ b/net/mac80211/iface.c
82937 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
82938 break;
82939 }
82940
82941 - if (local->open_count == 0) {
82942 + if (local_read(&local->open_count) == 0) {
82943 res = drv_start(local);
82944 if (res)
82945 goto err_del_bss;
82946 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
82947 * Validate the MAC address for this device.
82948 */
82949 if (!is_valid_ether_addr(dev->dev_addr)) {
82950 - if (!local->open_count)
82951 + if (!local_read(&local->open_count))
82952 drv_stop(local);
82953 return -EADDRNOTAVAIL;
82954 }
82955 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
82956
82957 hw_reconf_flags |= __ieee80211_recalc_idle(local);
82958
82959 - local->open_count++;
82960 + local_inc(&local->open_count);
82961 if (hw_reconf_flags) {
82962 ieee80211_hw_config(local, hw_reconf_flags);
82963 /*
82964 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
82965 err_del_interface:
82966 drv_remove_interface(local, &conf);
82967 err_stop:
82968 - if (!local->open_count)
82969 + if (!local_read(&local->open_count))
82970 drv_stop(local);
82971 err_del_bss:
82972 sdata->bss = NULL;
82973 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
82974 WARN_ON(!list_empty(&sdata->u.ap.vlans));
82975 }
82976
82977 - local->open_count--;
82978 + local_dec(&local->open_count);
82979
82980 switch (sdata->vif.type) {
82981 case NL80211_IFTYPE_AP_VLAN:
82982 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
82983
82984 ieee80211_recalc_ps(local, -1);
82985
82986 - if (local->open_count == 0) {
82987 + if (local_read(&local->open_count) == 0) {
82988 ieee80211_clear_tx_pending(local);
82989 ieee80211_stop_device(local);
82990
82991 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
82992 index 2dfe176..74e4388 100644
82993 --- a/net/mac80211/main.c
82994 +++ b/net/mac80211/main.c
82995 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
82996 local->hw.conf.power_level = power;
82997 }
82998
82999 - if (changed && local->open_count) {
83000 + if (changed && local_read(&local->open_count)) {
83001 ret = drv_config(local, changed);
83002 /*
83003 * Goal:
83004 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
83005 index e67eea7..fcc227e 100644
83006 --- a/net/mac80211/mlme.c
83007 +++ b/net/mac80211/mlme.c
83008 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
83009 bool have_higher_than_11mbit = false, newsta = false;
83010 u16 ap_ht_cap_flags;
83011
83012 + pax_track_stack();
83013 +
83014 /*
83015 * AssocResp and ReassocResp have identical structure, so process both
83016 * of them in this function.
83017 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
83018 index e535f1c..4d733d1 100644
83019 --- a/net/mac80211/pm.c
83020 +++ b/net/mac80211/pm.c
83021 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
83022 }
83023
83024 /* stop hardware - this must stop RX */
83025 - if (local->open_count)
83026 + if (local_read(&local->open_count))
83027 ieee80211_stop_device(local);
83028
83029 local->suspended = true;
83030 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
83031 index b33efc4..0a2efb6 100644
83032 --- a/net/mac80211/rate.c
83033 +++ b/net/mac80211/rate.c
83034 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
83035 struct rate_control_ref *ref, *old;
83036
83037 ASSERT_RTNL();
83038 - if (local->open_count)
83039 + if (local_read(&local->open_count))
83040 return -EBUSY;
83041
83042 ref = rate_control_alloc(name, local);
83043 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
83044 index b1d7904..57e4da7 100644
83045 --- a/net/mac80211/tx.c
83046 +++ b/net/mac80211/tx.c
83047 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
83048 return cpu_to_le16(dur);
83049 }
83050
83051 -static int inline is_ieee80211_device(struct ieee80211_local *local,
83052 +static inline int is_ieee80211_device(struct ieee80211_local *local,
83053 struct net_device *dev)
83054 {
83055 return local == wdev_priv(dev->ieee80211_ptr);
83056 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
83057 index 31b1085..48fb26d 100644
83058 --- a/net/mac80211/util.c
83059 +++ b/net/mac80211/util.c
83060 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
83061 local->resuming = true;
83062
83063 /* restart hardware */
83064 - if (local->open_count) {
83065 + if (local_read(&local->open_count)) {
83066 /*
83067 * Upon resume hardware can sometimes be goofy due to
83068 * various platform / driver / bus issues, so restarting
83069 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
83070 index 634d14a..b35a608 100644
83071 --- a/net/netfilter/Kconfig
83072 +++ b/net/netfilter/Kconfig
83073 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
83074
83075 To compile it as a module, choose M here. If unsure, say N.
83076
83077 +config NETFILTER_XT_MATCH_GRADM
83078 + tristate '"gradm" match support'
83079 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
83080 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
83081 + ---help---
83082 + The gradm match allows to match on grsecurity RBAC being enabled.
83083 + It is useful when iptables rules are applied early on bootup to
83084 + prevent connections to the machine (except from a trusted host)
83085 + while the RBAC system is disabled.
83086 +
83087 config NETFILTER_XT_MATCH_HASHLIMIT
83088 tristate '"hashlimit" match support'
83089 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
83090 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
83091 index 49f62ee..a17b2c6 100644
83092 --- a/net/netfilter/Makefile
83093 +++ b/net/netfilter/Makefile
83094 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
83095 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
83096 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
83097 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
83098 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
83099 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
83100 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
83101 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
83102 diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
83103 index 3c7e427..724043c 100644
83104 --- a/net/netfilter/ipvs/ip_vs_app.c
83105 +++ b/net/netfilter/ipvs/ip_vs_app.c
83106 @@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
83107 .open = ip_vs_app_open,
83108 .read = seq_read,
83109 .llseek = seq_lseek,
83110 - .release = seq_release,
83111 + .release = seq_release_net,
83112 };
83113 #endif
83114
83115 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
83116 index 95682e5..457dbac 100644
83117 --- a/net/netfilter/ipvs/ip_vs_conn.c
83118 +++ b/net/netfilter/ipvs/ip_vs_conn.c
83119 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
83120 /* if the connection is not template and is created
83121 * by sync, preserve the activity flag.
83122 */
83123 - cp->flags |= atomic_read(&dest->conn_flags) &
83124 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
83125 (~IP_VS_CONN_F_INACTIVE);
83126 else
83127 - cp->flags |= atomic_read(&dest->conn_flags);
83128 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
83129 cp->dest = dest;
83130
83131 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
83132 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
83133 atomic_set(&cp->refcnt, 1);
83134
83135 atomic_set(&cp->n_control, 0);
83136 - atomic_set(&cp->in_pkts, 0);
83137 + atomic_set_unchecked(&cp->in_pkts, 0);
83138
83139 atomic_inc(&ip_vs_conn_count);
83140 if (flags & IP_VS_CONN_F_NO_CPORT)
83141 @@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
83142 .open = ip_vs_conn_open,
83143 .read = seq_read,
83144 .llseek = seq_lseek,
83145 - .release = seq_release,
83146 + .release = seq_release_net,
83147 };
83148
83149 static const char *ip_vs_origin_name(unsigned flags)
83150 @@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
83151 .open = ip_vs_conn_sync_open,
83152 .read = seq_read,
83153 .llseek = seq_lseek,
83154 - .release = seq_release,
83155 + .release = seq_release_net,
83156 };
83157
83158 #endif
83159 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
83160
83161 /* Don't drop the entry if its number of incoming packets is not
83162 located in [0, 8] */
83163 - i = atomic_read(&cp->in_pkts);
83164 + i = atomic_read_unchecked(&cp->in_pkts);
83165 if (i > 8 || i < 0) return 0;
83166
83167 if (!todrop_rate[i]) return 0;
83168 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
83169 index b95699f..5fee919 100644
83170 --- a/net/netfilter/ipvs/ip_vs_core.c
83171 +++ b/net/netfilter/ipvs/ip_vs_core.c
83172 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
83173 ret = cp->packet_xmit(skb, cp, pp);
83174 /* do not touch skb anymore */
83175
83176 - atomic_inc(&cp->in_pkts);
83177 + atomic_inc_unchecked(&cp->in_pkts);
83178 ip_vs_conn_put(cp);
83179 return ret;
83180 }
83181 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
83182 * Sync connection if it is about to close to
83183 * encorage the standby servers to update the connections timeout
83184 */
83185 - pkts = atomic_add_return(1, &cp->in_pkts);
83186 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
83187 if (af == AF_INET &&
83188 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
83189 (((cp->protocol != IPPROTO_TCP ||
83190 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
83191 index 02b2610..2d89424 100644
83192 --- a/net/netfilter/ipvs/ip_vs_ctl.c
83193 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
83194 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
83195 ip_vs_rs_hash(dest);
83196 write_unlock_bh(&__ip_vs_rs_lock);
83197 }
83198 - atomic_set(&dest->conn_flags, conn_flags);
83199 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
83200
83201 /* bind the service */
83202 if (!dest->svc) {
83203 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
83204 " %-7s %-6d %-10d %-10d\n",
83205 &dest->addr.in6,
83206 ntohs(dest->port),
83207 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
83208 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
83209 atomic_read(&dest->weight),
83210 atomic_read(&dest->activeconns),
83211 atomic_read(&dest->inactconns));
83212 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
83213 "%-7s %-6d %-10d %-10d\n",
83214 ntohl(dest->addr.ip),
83215 ntohs(dest->port),
83216 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
83217 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
83218 atomic_read(&dest->weight),
83219 atomic_read(&dest->activeconns),
83220 atomic_read(&dest->inactconns));
83221 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
83222 .open = ip_vs_info_open,
83223 .read = seq_read,
83224 .llseek = seq_lseek,
83225 - .release = seq_release_private,
83226 + .release = seq_release_net,
83227 };
83228
83229 #endif
83230 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
83231 .open = ip_vs_stats_seq_open,
83232 .read = seq_read,
83233 .llseek = seq_lseek,
83234 - .release = single_release,
83235 + .release = single_release_net,
83236 };
83237
83238 #endif
83239 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
83240
83241 entry.addr = dest->addr.ip;
83242 entry.port = dest->port;
83243 - entry.conn_flags = atomic_read(&dest->conn_flags);
83244 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
83245 entry.weight = atomic_read(&dest->weight);
83246 entry.u_threshold = dest->u_threshold;
83247 entry.l_threshold = dest->l_threshold;
83248 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
83249 unsigned char arg[128];
83250 int ret = 0;
83251
83252 + pax_track_stack();
83253 +
83254 if (!capable(CAP_NET_ADMIN))
83255 return -EPERM;
83256
83257 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
83258 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
83259
83260 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
83261 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
83262 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
83263 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
83264 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
83265 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
83266 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
83267 index e177f0d..55e8581 100644
83268 --- a/net/netfilter/ipvs/ip_vs_sync.c
83269 +++ b/net/netfilter/ipvs/ip_vs_sync.c
83270 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
83271
83272 if (opt)
83273 memcpy(&cp->in_seq, opt, sizeof(*opt));
83274 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
83275 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
83276 cp->state = state;
83277 cp->old_state = cp->state;
83278 /*
83279 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
83280 index 30b3189..e2e4b55 100644
83281 --- a/net/netfilter/ipvs/ip_vs_xmit.c
83282 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
83283 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
83284 else
83285 rc = NF_ACCEPT;
83286 /* do not touch skb anymore */
83287 - atomic_inc(&cp->in_pkts);
83288 + atomic_inc_unchecked(&cp->in_pkts);
83289 goto out;
83290 }
83291
83292 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
83293 else
83294 rc = NF_ACCEPT;
83295 /* do not touch skb anymore */
83296 - atomic_inc(&cp->in_pkts);
83297 + atomic_inc_unchecked(&cp->in_pkts);
83298 goto out;
83299 }
83300
83301 diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
83302 index d521718..d0fd7a1 100644
83303 --- a/net/netfilter/nf_conntrack_netlink.c
83304 +++ b/net/netfilter/nf_conntrack_netlink.c
83305 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
83306 static int
83307 ctnetlink_parse_tuple(const struct nlattr * const cda[],
83308 struct nf_conntrack_tuple *tuple,
83309 - enum ctattr_tuple type, u_int8_t l3num)
83310 + enum ctattr_type type, u_int8_t l3num)
83311 {
83312 struct nlattr *tb[CTA_TUPLE_MAX+1];
83313 int err;
83314 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
83315 index f900dc3..5e45346 100644
83316 --- a/net/netfilter/nfnetlink_log.c
83317 +++ b/net/netfilter/nfnetlink_log.c
83318 @@ -68,7 +68,7 @@ struct nfulnl_instance {
83319 };
83320
83321 static DEFINE_RWLOCK(instances_lock);
83322 -static atomic_t global_seq;
83323 +static atomic_unchecked_t global_seq;
83324
83325 #define INSTANCE_BUCKETS 16
83326 static struct hlist_head instance_table[INSTANCE_BUCKETS];
83327 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
83328 /* global sequence number */
83329 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
83330 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
83331 - htonl(atomic_inc_return(&global_seq)));
83332 + htonl(atomic_inc_return_unchecked(&global_seq)));
83333
83334 if (data_len) {
83335 struct nlattr *nla;
83336 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
83337 new file mode 100644
83338 index 0000000..b1bac76
83339 --- /dev/null
83340 +++ b/net/netfilter/xt_gradm.c
83341 @@ -0,0 +1,51 @@
83342 +/*
83343 + * gradm match for netfilter
83344 + * Copyright © Zbigniew Krzystolik, 2010
83345 + *
83346 + * This program is free software; you can redistribute it and/or modify
83347 + * it under the terms of the GNU General Public License; either version
83348 + * 2 or 3 as published by the Free Software Foundation.
83349 + */
83350 +#include <linux/module.h>
83351 +#include <linux/moduleparam.h>
83352 +#include <linux/skbuff.h>
83353 +#include <linux/netfilter/x_tables.h>
83354 +#include <linux/grsecurity.h>
83355 +#include <linux/netfilter/xt_gradm.h>
83356 +
83357 +static bool
83358 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
83359 +{
83360 + const struct xt_gradm_mtinfo *info = par->matchinfo;
83361 + bool retval = false;
83362 + if (gr_acl_is_enabled())
83363 + retval = true;
83364 + return retval ^ info->invflags;
83365 +}
83366 +
83367 +static struct xt_match gradm_mt_reg __read_mostly = {
83368 + .name = "gradm",
83369 + .revision = 0,
83370 + .family = NFPROTO_UNSPEC,
83371 + .match = gradm_mt,
83372 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
83373 + .me = THIS_MODULE,
83374 +};
83375 +
83376 +static int __init gradm_mt_init(void)
83377 +{
83378 + return xt_register_match(&gradm_mt_reg);
83379 +}
83380 +
83381 +static void __exit gradm_mt_exit(void)
83382 +{
83383 + xt_unregister_match(&gradm_mt_reg);
83384 +}
83385 +
83386 +module_init(gradm_mt_init);
83387 +module_exit(gradm_mt_exit);
83388 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
83389 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
83390 +MODULE_LICENSE("GPL");
83391 +MODULE_ALIAS("ipt_gradm");
83392 +MODULE_ALIAS("ip6t_gradm");
83393 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
83394 index 5a7dcdf..24a3578 100644
83395 --- a/net/netlink/af_netlink.c
83396 +++ b/net/netlink/af_netlink.c
83397 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock *sk)
83398 sk->sk_error_report(sk);
83399 }
83400 }
83401 - atomic_inc(&sk->sk_drops);
83402 + atomic_inc_unchecked(&sk->sk_drops);
83403 }
83404
83405 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
83406 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
83407 struct netlink_sock *nlk = nlk_sk(s);
83408
83409 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
83410 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83411 + NULL,
83412 +#else
83413 s,
83414 +#endif
83415 s->sk_protocol,
83416 nlk->pid,
83417 nlk->groups ? (u32)nlk->groups[0] : 0,
83418 sk_rmem_alloc_get(s),
83419 sk_wmem_alloc_get(s),
83420 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83421 + NULL,
83422 +#else
83423 nlk->cb,
83424 +#endif
83425 atomic_read(&s->sk_refcnt),
83426 - atomic_read(&s->sk_drops)
83427 + atomic_read_unchecked(&s->sk_drops)
83428 );
83429
83430 }
83431 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
83432 index 7a83495..ab0062f 100644
83433 --- a/net/netrom/af_netrom.c
83434 +++ b/net/netrom/af_netrom.c
83435 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
83436 struct sock *sk = sock->sk;
83437 struct nr_sock *nr = nr_sk(sk);
83438
83439 + memset(sax, 0, sizeof(*sax));
83440 lock_sock(sk);
83441 if (peer != 0) {
83442 if (sk->sk_state != TCP_ESTABLISHED) {
83443 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
83444 *uaddr_len = sizeof(struct full_sockaddr_ax25);
83445 } else {
83446 sax->fsa_ax25.sax25_family = AF_NETROM;
83447 - sax->fsa_ax25.sax25_ndigis = 0;
83448 sax->fsa_ax25.sax25_call = nr->source_addr;
83449 *uaddr_len = sizeof(struct sockaddr_ax25);
83450 }
83451 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
83452 index 35cfa79..4e78ff7 100644
83453 --- a/net/packet/af_packet.c
83454 +++ b/net/packet/af_packet.c
83455 @@ -2429,7 +2429,11 @@ static int packet_seq_show(struct seq_file *seq, void *v)
83456
83457 seq_printf(seq,
83458 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
83459 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83460 + NULL,
83461 +#else
83462 s,
83463 +#endif
83464 atomic_read(&s->sk_refcnt),
83465 s->sk_type,
83466 ntohs(po->num),
83467 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
83468 index 519ff9d..a422a90 100644
83469 --- a/net/phonet/af_phonet.c
83470 +++ b/net/phonet/af_phonet.c
83471 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(int protocol)
83472 {
83473 struct phonet_protocol *pp;
83474
83475 - if (protocol >= PHONET_NPROTO)
83476 + if (protocol < 0 || protocol >= PHONET_NPROTO)
83477 return NULL;
83478
83479 spin_lock(&proto_tab_lock);
83480 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_register(int protocol,
83481 {
83482 int err = 0;
83483
83484 - if (protocol >= PHONET_NPROTO)
83485 + if (protocol < 0 || protocol >= PHONET_NPROTO)
83486 return -EINVAL;
83487
83488 err = proto_register(pp->prot, 1);
83489 diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
83490 index ef5c75c..2b6c2fa 100644
83491 --- a/net/phonet/datagram.c
83492 +++ b/net/phonet/datagram.c
83493 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
83494 if (err < 0) {
83495 kfree_skb(skb);
83496 if (err == -ENOMEM)
83497 - atomic_inc(&sk->sk_drops);
83498 + atomic_inc_unchecked(&sk->sk_drops);
83499 }
83500 return err ? NET_RX_DROP : NET_RX_SUCCESS;
83501 }
83502 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
83503 index 9cdd35e..16cd850 100644
83504 --- a/net/phonet/pep.c
83505 +++ b/net/phonet/pep.c
83506 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
83507
83508 case PNS_PEP_CTRL_REQ:
83509 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
83510 - atomic_inc(&sk->sk_drops);
83511 + atomic_inc_unchecked(&sk->sk_drops);
83512 break;
83513 }
83514 __skb_pull(skb, 4);
83515 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
83516 if (!err)
83517 return 0;
83518 if (err == -ENOMEM)
83519 - atomic_inc(&sk->sk_drops);
83520 + atomic_inc_unchecked(&sk->sk_drops);
83521 break;
83522 }
83523
83524 if (pn->rx_credits == 0) {
83525 - atomic_inc(&sk->sk_drops);
83526 + atomic_inc_unchecked(&sk->sk_drops);
83527 err = -ENOBUFS;
83528 break;
83529 }
83530 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
83531 index aa5b5a9..c09b4f8 100644
83532 --- a/net/phonet/socket.c
83533 +++ b/net/phonet/socket.c
83534 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
83535 sk->sk_state,
83536 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
83537 sock_i_uid(sk), sock_i_ino(sk),
83538 - atomic_read(&sk->sk_refcnt), sk,
83539 - atomic_read(&sk->sk_drops), &len);
83540 + atomic_read(&sk->sk_refcnt),
83541 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83542 + NULL,
83543 +#else
83544 + sk,
83545 +#endif
83546 + atomic_read_unchecked(&sk->sk_drops), &len);
83547 }
83548 seq_printf(seq, "%*s\n", 127 - len, "");
83549 return 0;
83550 diff --git a/net/rds/Kconfig b/net/rds/Kconfig
83551 index ec753b3..821187c 100644
83552 --- a/net/rds/Kconfig
83553 +++ b/net/rds/Kconfig
83554 @@ -1,7 +1,7 @@
83555
83556 config RDS
83557 tristate "The RDS Protocol (EXPERIMENTAL)"
83558 - depends on INET && EXPERIMENTAL
83559 + depends on INET && EXPERIMENTAL && BROKEN
83560 ---help---
83561 The RDS (Reliable Datagram Sockets) protocol provides reliable,
83562 sequenced delivery of datagrams over Infiniband, iWARP,
83563 diff --git a/net/rds/cong.c b/net/rds/cong.c
83564 index dd2711d..1c7ed12 100644
83565 --- a/net/rds/cong.c
83566 +++ b/net/rds/cong.c
83567 @@ -77,7 +77,7 @@
83568 * finds that the saved generation number is smaller than the global generation
83569 * number, it wakes up the process.
83570 */
83571 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
83572 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
83573
83574 /*
83575 * Congestion monitoring
83576 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
83577 rdsdebug("waking map %p for %pI4\n",
83578 map, &map->m_addr);
83579 rds_stats_inc(s_cong_update_received);
83580 - atomic_inc(&rds_cong_generation);
83581 + atomic_inc_unchecked(&rds_cong_generation);
83582 if (waitqueue_active(&map->m_waitq))
83583 wake_up(&map->m_waitq);
83584 if (waitqueue_active(&rds_poll_waitq))
83585 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
83586
83587 int rds_cong_updated_since(unsigned long *recent)
83588 {
83589 - unsigned long gen = atomic_read(&rds_cong_generation);
83590 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
83591
83592 if (likely(*recent == gen))
83593 return 0;
83594 diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
83595 index de4a1b1..94ec861 100644
83596 --- a/net/rds/iw_rdma.c
83597 +++ b/net/rds/iw_rdma.c
83598 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
83599 struct rdma_cm_id *pcm_id;
83600 int rc;
83601
83602 + pax_track_stack();
83603 +
83604 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
83605 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
83606
83607 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
83608 index b5198ae..8b9fb90 100644
83609 --- a/net/rds/tcp.c
83610 +++ b/net/rds/tcp.c
83611 @@ -57,7 +57,7 @@ void rds_tcp_nonagle(struct socket *sock)
83612 int val = 1;
83613
83614 set_fs(KERNEL_DS);
83615 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
83616 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
83617 sizeof(val));
83618 set_fs(oldfs);
83619 }
83620 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
83621 index ab545e0..4079b3b 100644
83622 --- a/net/rds/tcp_send.c
83623 +++ b/net/rds/tcp_send.c
83624 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
83625
83626 oldfs = get_fs();
83627 set_fs(KERNEL_DS);
83628 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
83629 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
83630 sizeof(val));
83631 set_fs(oldfs);
83632 }
83633 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
83634 index a86afce..8657bce 100644
83635 --- a/net/rxrpc/af_rxrpc.c
83636 +++ b/net/rxrpc/af_rxrpc.c
83637 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_ops;
83638 __be32 rxrpc_epoch;
83639
83640 /* current debugging ID */
83641 -atomic_t rxrpc_debug_id;
83642 +atomic_unchecked_t rxrpc_debug_id;
83643
83644 /* count of skbs currently in use */
83645 atomic_t rxrpc_n_skbs;
83646 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
83647 index b4a2209..539106c 100644
83648 --- a/net/rxrpc/ar-ack.c
83649 +++ b/net/rxrpc/ar-ack.c
83650 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
83651
83652 _enter("{%d,%d,%d,%d},",
83653 call->acks_hard, call->acks_unacked,
83654 - atomic_read(&call->sequence),
83655 + atomic_read_unchecked(&call->sequence),
83656 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
83657
83658 stop = 0;
83659 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
83660
83661 /* each Tx packet has a new serial number */
83662 sp->hdr.serial =
83663 - htonl(atomic_inc_return(&call->conn->serial));
83664 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
83665
83666 hdr = (struct rxrpc_header *) txb->head;
83667 hdr->serial = sp->hdr.serial;
83668 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
83669 */
83670 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
83671 {
83672 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
83673 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
83674 }
83675
83676 /*
83677 @@ -627,7 +627,7 @@ process_further:
83678
83679 latest = ntohl(sp->hdr.serial);
83680 hard = ntohl(ack.firstPacket);
83681 - tx = atomic_read(&call->sequence);
83682 + tx = atomic_read_unchecked(&call->sequence);
83683
83684 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
83685 latest,
83686 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_struct *work)
83687 u32 abort_code = RX_PROTOCOL_ERROR;
83688 u8 *acks = NULL;
83689
83690 + pax_track_stack();
83691 +
83692 //printk("\n--------------------\n");
83693 _enter("{%d,%s,%lx} [%lu]",
83694 call->debug_id, rxrpc_call_states[call->state], call->events,
83695 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
83696 goto maybe_reschedule;
83697
83698 send_ACK_with_skew:
83699 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
83700 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
83701 ntohl(ack.serial));
83702 send_ACK:
83703 mtu = call->conn->trans->peer->if_mtu;
83704 @@ -1171,7 +1173,7 @@ send_ACK:
83705 ackinfo.rxMTU = htonl(5692);
83706 ackinfo.jumbo_max = htonl(4);
83707
83708 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
83709 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
83710 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
83711 ntohl(hdr.serial),
83712 ntohs(ack.maxSkew),
83713 @@ -1189,7 +1191,7 @@ send_ACK:
83714 send_message:
83715 _debug("send message");
83716
83717 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
83718 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
83719 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
83720 send_message_2:
83721
83722 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
83723 index bc0019f..e1b4b24 100644
83724 --- a/net/rxrpc/ar-call.c
83725 +++ b/net/rxrpc/ar-call.c
83726 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
83727 spin_lock_init(&call->lock);
83728 rwlock_init(&call->state_lock);
83729 atomic_set(&call->usage, 1);
83730 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
83731 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
83732 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
83733
83734 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
83735 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
83736 index 9f1ce84..ff8d061 100644
83737 --- a/net/rxrpc/ar-connection.c
83738 +++ b/net/rxrpc/ar-connection.c
83739 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
83740 rwlock_init(&conn->lock);
83741 spin_lock_init(&conn->state_lock);
83742 atomic_set(&conn->usage, 1);
83743 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
83744 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
83745 conn->avail_calls = RXRPC_MAXCALLS;
83746 conn->size_align = 4;
83747 conn->header_size = sizeof(struct rxrpc_header);
83748 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
83749 index 0505cdc..f0748ce 100644
83750 --- a/net/rxrpc/ar-connevent.c
83751 +++ b/net/rxrpc/ar-connevent.c
83752 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
83753
83754 len = iov[0].iov_len + iov[1].iov_len;
83755
83756 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
83757 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
83758 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
83759
83760 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
83761 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
83762 index f98c802..9e8488e 100644
83763 --- a/net/rxrpc/ar-input.c
83764 +++ b/net/rxrpc/ar-input.c
83765 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
83766 /* track the latest serial number on this connection for ACK packet
83767 * information */
83768 serial = ntohl(sp->hdr.serial);
83769 - hi_serial = atomic_read(&call->conn->hi_serial);
83770 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
83771 while (serial > hi_serial)
83772 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
83773 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
83774 serial);
83775
83776 /* request ACK generation for any ACK or DATA packet that requests
83777 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
83778 index 7043b29..06edcdf 100644
83779 --- a/net/rxrpc/ar-internal.h
83780 +++ b/net/rxrpc/ar-internal.h
83781 @@ -272,8 +272,8 @@ struct rxrpc_connection {
83782 int error; /* error code for local abort */
83783 int debug_id; /* debug ID for printks */
83784 unsigned call_counter; /* call ID counter */
83785 - atomic_t serial; /* packet serial number counter */
83786 - atomic_t hi_serial; /* highest serial number received */
83787 + atomic_unchecked_t serial; /* packet serial number counter */
83788 + atomic_unchecked_t hi_serial; /* highest serial number received */
83789 u8 avail_calls; /* number of calls available */
83790 u8 size_align; /* data size alignment (for security) */
83791 u8 header_size; /* rxrpc + security header size */
83792 @@ -346,7 +346,7 @@ struct rxrpc_call {
83793 spinlock_t lock;
83794 rwlock_t state_lock; /* lock for state transition */
83795 atomic_t usage;
83796 - atomic_t sequence; /* Tx data packet sequence counter */
83797 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
83798 u32 abort_code; /* local/remote abort code */
83799 enum { /* current state of call */
83800 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
83801 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
83802 */
83803 extern atomic_t rxrpc_n_skbs;
83804 extern __be32 rxrpc_epoch;
83805 -extern atomic_t rxrpc_debug_id;
83806 +extern atomic_unchecked_t rxrpc_debug_id;
83807 extern struct workqueue_struct *rxrpc_workqueue;
83808
83809 /*
83810 diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
83811 index 74697b2..10f9b77 100644
83812 --- a/net/rxrpc/ar-key.c
83813 +++ b/net/rxrpc/ar-key.c
83814 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
83815 return ret;
83816
83817 plen -= sizeof(*token);
83818 - token = kmalloc(sizeof(*token), GFP_KERNEL);
83819 + token = kzalloc(sizeof(*token), GFP_KERNEL);
83820 if (!token)
83821 return -ENOMEM;
83822
83823 - token->kad = kmalloc(plen, GFP_KERNEL);
83824 + token->kad = kzalloc(plen, GFP_KERNEL);
83825 if (!token->kad) {
83826 kfree(token);
83827 return -ENOMEM;
83828 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
83829 goto error;
83830
83831 ret = -ENOMEM;
83832 - token = kmalloc(sizeof(*token), GFP_KERNEL);
83833 + token = kzalloc(sizeof(*token), GFP_KERNEL);
83834 if (!token)
83835 goto error;
83836 - token->kad = kmalloc(plen, GFP_KERNEL);
83837 + token->kad = kzalloc(plen, GFP_KERNEL);
83838 if (!token->kad)
83839 goto error_free;
83840
83841 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
83842 index 807535f..5b7f19e 100644
83843 --- a/net/rxrpc/ar-local.c
83844 +++ b/net/rxrpc/ar-local.c
83845 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
83846 spin_lock_init(&local->lock);
83847 rwlock_init(&local->services_lock);
83848 atomic_set(&local->usage, 1);
83849 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
83850 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
83851 memcpy(&local->srx, srx, sizeof(*srx));
83852 }
83853
83854 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
83855 index cc9102c..7d3888e 100644
83856 --- a/net/rxrpc/ar-output.c
83857 +++ b/net/rxrpc/ar-output.c
83858 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
83859 sp->hdr.cid = call->cid;
83860 sp->hdr.callNumber = call->call_id;
83861 sp->hdr.seq =
83862 - htonl(atomic_inc_return(&call->sequence));
83863 + htonl(atomic_inc_return_unchecked(&call->sequence));
83864 sp->hdr.serial =
83865 - htonl(atomic_inc_return(&conn->serial));
83866 + htonl(atomic_inc_return_unchecked(&conn->serial));
83867 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
83868 sp->hdr.userStatus = 0;
83869 sp->hdr.securityIndex = conn->security_ix;
83870 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
83871 index edc026c..4bd4e2d 100644
83872 --- a/net/rxrpc/ar-peer.c
83873 +++ b/net/rxrpc/ar-peer.c
83874 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
83875 INIT_LIST_HEAD(&peer->error_targets);
83876 spin_lock_init(&peer->lock);
83877 atomic_set(&peer->usage, 1);
83878 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
83879 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
83880 memcpy(&peer->srx, srx, sizeof(*srx));
83881
83882 rxrpc_assess_MTU_size(peer);
83883 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
83884 index 38047f7..9f48511 100644
83885 --- a/net/rxrpc/ar-proc.c
83886 +++ b/net/rxrpc/ar-proc.c
83887 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
83888 atomic_read(&conn->usage),
83889 rxrpc_conn_states[conn->state],
83890 key_serial(conn->key),
83891 - atomic_read(&conn->serial),
83892 - atomic_read(&conn->hi_serial));
83893 + atomic_read_unchecked(&conn->serial),
83894 + atomic_read_unchecked(&conn->hi_serial));
83895
83896 return 0;
83897 }
83898 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
83899 index 0936e1a..437c640 100644
83900 --- a/net/rxrpc/ar-transport.c
83901 +++ b/net/rxrpc/ar-transport.c
83902 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
83903 spin_lock_init(&trans->client_lock);
83904 rwlock_init(&trans->conn_lock);
83905 atomic_set(&trans->usage, 1);
83906 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
83907 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
83908
83909 if (peer->srx.transport.family == AF_INET) {
83910 switch (peer->srx.transport_type) {
83911 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
83912 index 713ac59..306f6ae 100644
83913 --- a/net/rxrpc/rxkad.c
83914 +++ b/net/rxrpc/rxkad.c
83915 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
83916 u16 check;
83917 int nsg;
83918
83919 + pax_track_stack();
83920 +
83921 sp = rxrpc_skb(skb);
83922
83923 _enter("");
83924 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
83925 u16 check;
83926 int nsg;
83927
83928 + pax_track_stack();
83929 +
83930 _enter("");
83931
83932 sp = rxrpc_skb(skb);
83933 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
83934
83935 len = iov[0].iov_len + iov[1].iov_len;
83936
83937 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
83938 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
83939 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
83940
83941 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
83942 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
83943
83944 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
83945
83946 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
83947 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
83948 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
83949
83950 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
83951 diff --git a/net/sctp/auth.c b/net/sctp/auth.c
83952 index 914c419..7a16d2c 100644
83953 --- a/net/sctp/auth.c
83954 +++ b/net/sctp/auth.c
83955 @@ -81,7 +81,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
83956 struct sctp_auth_bytes *key;
83957
83958 /* Verify that we are not going to overflow INT_MAX */
83959 - if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
83960 + if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
83961 return NULL;
83962
83963 /* Allocate the shared key */
83964 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
83965 index d093cbf..9fc36fc 100644
83966 --- a/net/sctp/proc.c
83967 +++ b/net/sctp/proc.c
83968 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
83969 sctp_for_each_hentry(epb, node, &head->chain) {
83970 ep = sctp_ep(epb);
83971 sk = epb->sk;
83972 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
83973 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
83974 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83975 + NULL, NULL,
83976 +#else
83977 + ep, sk,
83978 +#endif
83979 sctp_sk(sk)->type, sk->sk_state, hash,
83980 epb->bind_addr.port,
83981 sock_i_uid(sk), sock_i_ino(sk));
83982 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
83983 seq_printf(seq,
83984 "%8p %8p %-3d %-3d %-2d %-4d "
83985 "%4d %8d %8d %7d %5lu %-5d %5d ",
83986 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
83987 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83988 + NULL, NULL,
83989 +#else
83990 + assoc, sk,
83991 +#endif
83992 + sctp_sk(sk)->type, sk->sk_state,
83993 assoc->state, hash,
83994 assoc->assoc_id,
83995 assoc->sndbuf_used,
83996 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
83997 index 3a95fcb..c40fc1d 100644
83998 --- a/net/sctp/socket.c
83999 +++ b/net/sctp/socket.c
84000 @@ -5802,7 +5802,6 @@ pp_found:
84001 */
84002 int reuse = sk->sk_reuse;
84003 struct sock *sk2;
84004 - struct hlist_node *node;
84005
84006 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
84007 if (pp->fastreuse && sk->sk_reuse &&
84008 diff --git a/net/socket.c b/net/socket.c
84009 index d449812..4ac08d3c 100644
84010 --- a/net/socket.c
84011 +++ b/net/socket.c
84012 @@ -87,6 +87,7 @@
84013 #include <linux/wireless.h>
84014 #include <linux/nsproxy.h>
84015 #include <linux/magic.h>
84016 +#include <linux/in.h>
84017
84018 #include <asm/uaccess.h>
84019 #include <asm/unistd.h>
84020 @@ -97,6 +98,21 @@
84021 #include <net/sock.h>
84022 #include <linux/netfilter.h>
84023
84024 +extern void gr_attach_curr_ip(const struct sock *sk);
84025 +extern int gr_handle_sock_all(const int family, const int type,
84026 + const int protocol);
84027 +extern int gr_handle_sock_server(const struct sockaddr *sck);
84028 +extern int gr_handle_sock_server_other(const struct sock *sck);
84029 +extern int gr_handle_sock_client(const struct sockaddr *sck);
84030 +extern int gr_search_connect(struct socket * sock,
84031 + struct sockaddr_in * addr);
84032 +extern int gr_search_bind(struct socket * sock,
84033 + struct sockaddr_in * addr);
84034 +extern int gr_search_listen(struct socket * sock);
84035 +extern int gr_search_accept(struct socket * sock);
84036 +extern int gr_search_socket(const int domain, const int type,
84037 + const int protocol);
84038 +
84039 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
84040 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
84041 unsigned long nr_segs, loff_t pos);
84042 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_system_type *fs_type,
84043 mnt);
84044 }
84045
84046 -static struct vfsmount *sock_mnt __read_mostly;
84047 +struct vfsmount *sock_mnt __read_mostly;
84048
84049 static struct file_system_type sock_fs_type = {
84050 .name = "sockfs",
84051 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net, int family, int type, int protocol,
84052 return -EAFNOSUPPORT;
84053 if (type < 0 || type >= SOCK_MAX)
84054 return -EINVAL;
84055 + if (protocol < 0)
84056 + return -EINVAL;
84057
84058 /* Compatibility.
84059
84060 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
84061 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
84062 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
84063
84064 + if(!gr_search_socket(family, type, protocol)) {
84065 + retval = -EACCES;
84066 + goto out;
84067 + }
84068 +
84069 + if (gr_handle_sock_all(family, type, protocol)) {
84070 + retval = -EACCES;
84071 + goto out;
84072 + }
84073 +
84074 retval = sock_create(family, type, protocol, &sock);
84075 if (retval < 0)
84076 goto out;
84077 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
84078 if (sock) {
84079 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
84080 if (err >= 0) {
84081 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
84082 + err = -EACCES;
84083 + goto error;
84084 + }
84085 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
84086 + if (err)
84087 + goto error;
84088 +
84089 err = security_socket_bind(sock,
84090 (struct sockaddr *)&address,
84091 addrlen);
84092 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
84093 (struct sockaddr *)
84094 &address, addrlen);
84095 }
84096 +error:
84097 fput_light(sock->file, fput_needed);
84098 }
84099 return err;
84100 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
84101 if ((unsigned)backlog > somaxconn)
84102 backlog = somaxconn;
84103
84104 + if (gr_handle_sock_server_other(sock->sk)) {
84105 + err = -EPERM;
84106 + goto error;
84107 + }
84108 +
84109 + err = gr_search_listen(sock);
84110 + if (err)
84111 + goto error;
84112 +
84113 err = security_socket_listen(sock, backlog);
84114 if (!err)
84115 err = sock->ops->listen(sock, backlog);
84116
84117 +error:
84118 fput_light(sock->file, fput_needed);
84119 }
84120 return err;
84121 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
84122 newsock->type = sock->type;
84123 newsock->ops = sock->ops;
84124
84125 + if (gr_handle_sock_server_other(sock->sk)) {
84126 + err = -EPERM;
84127 + sock_release(newsock);
84128 + goto out_put;
84129 + }
84130 +
84131 + err = gr_search_accept(sock);
84132 + if (err) {
84133 + sock_release(newsock);
84134 + goto out_put;
84135 + }
84136 +
84137 /*
84138 * We don't need try_module_get here, as the listening socket (sock)
84139 * has the protocol module (sock->ops->owner) held.
84140 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
84141 fd_install(newfd, newfile);
84142 err = newfd;
84143
84144 + gr_attach_curr_ip(newsock->sk);
84145 +
84146 out_put:
84147 fput_light(sock->file, fput_needed);
84148 out:
84149 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
84150 int, addrlen)
84151 {
84152 struct socket *sock;
84153 + struct sockaddr *sck;
84154 struct sockaddr_storage address;
84155 int err, fput_needed;
84156
84157 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
84158 if (err < 0)
84159 goto out_put;
84160
84161 + sck = (struct sockaddr *)&address;
84162 +
84163 + if (gr_handle_sock_client(sck)) {
84164 + err = -EACCES;
84165 + goto out_put;
84166 + }
84167 +
84168 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
84169 + if (err)
84170 + goto out_put;
84171 +
84172 err =
84173 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
84174 if (err)
84175 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
84176 int err, ctl_len, iov_size, total_len;
84177 int fput_needed;
84178
84179 + pax_track_stack();
84180 +
84181 err = -EFAULT;
84182 if (MSG_CMSG_COMPAT & flags) {
84183 if (get_compat_msghdr(&msg_sys, msg_compat))
84184 @@ -2022,7 +2097,7 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
84185 * kernel msghdr to use the kernel address space)
84186 */
84187
84188 - uaddr = (__force void __user *)msg_sys.msg_name;
84189 + uaddr = (void __force_user *)msg_sys.msg_name;
84190 uaddr_len = COMPAT_NAMELEN(msg);
84191 if (MSG_CMSG_COMPAT & flags) {
84192 err = verify_compat_iovec(&msg_sys, iov,
84193 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
84194 index ac94477..8afe5c3 100644
84195 --- a/net/sunrpc/sched.c
84196 +++ b/net/sunrpc/sched.c
84197 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *word)
84198 #ifdef RPC_DEBUG
84199 static void rpc_task_set_debuginfo(struct rpc_task *task)
84200 {
84201 - static atomic_t rpc_pid;
84202 + static atomic_unchecked_t rpc_pid;
84203
84204 task->tk_magic = RPC_TASK_MAGIC_ID;
84205 - task->tk_pid = atomic_inc_return(&rpc_pid);
84206 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
84207 }
84208 #else
84209 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
84210 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
84211 index 35fb68b..236a8bf 100644
84212 --- a/net/sunrpc/xprtrdma/svc_rdma.c
84213 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
84214 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
84215 static unsigned int min_max_inline = 4096;
84216 static unsigned int max_max_inline = 65536;
84217
84218 -atomic_t rdma_stat_recv;
84219 -atomic_t rdma_stat_read;
84220 -atomic_t rdma_stat_write;
84221 -atomic_t rdma_stat_sq_starve;
84222 -atomic_t rdma_stat_rq_starve;
84223 -atomic_t rdma_stat_rq_poll;
84224 -atomic_t rdma_stat_rq_prod;
84225 -atomic_t rdma_stat_sq_poll;
84226 -atomic_t rdma_stat_sq_prod;
84227 +atomic_unchecked_t rdma_stat_recv;
84228 +atomic_unchecked_t rdma_stat_read;
84229 +atomic_unchecked_t rdma_stat_write;
84230 +atomic_unchecked_t rdma_stat_sq_starve;
84231 +atomic_unchecked_t rdma_stat_rq_starve;
84232 +atomic_unchecked_t rdma_stat_rq_poll;
84233 +atomic_unchecked_t rdma_stat_rq_prod;
84234 +atomic_unchecked_t rdma_stat_sq_poll;
84235 +atomic_unchecked_t rdma_stat_sq_prod;
84236
84237 /* Temporary NFS request map and context caches */
84238 struct kmem_cache *svc_rdma_map_cachep;
84239 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *table, int write,
84240 len -= *ppos;
84241 if (len > *lenp)
84242 len = *lenp;
84243 - if (len && copy_to_user(buffer, str_buf, len))
84244 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
84245 return -EFAULT;
84246 *lenp = len;
84247 *ppos += len;
84248 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] = {
84249 {
84250 .procname = "rdma_stat_read",
84251 .data = &rdma_stat_read,
84252 - .maxlen = sizeof(atomic_t),
84253 + .maxlen = sizeof(atomic_unchecked_t),
84254 .mode = 0644,
84255 .proc_handler = &read_reset_stat,
84256 },
84257 {
84258 .procname = "rdma_stat_recv",
84259 .data = &rdma_stat_recv,
84260 - .maxlen = sizeof(atomic_t),
84261 + .maxlen = sizeof(atomic_unchecked_t),
84262 .mode = 0644,
84263 .proc_handler = &read_reset_stat,
84264 },
84265 {
84266 .procname = "rdma_stat_write",
84267 .data = &rdma_stat_write,
84268 - .maxlen = sizeof(atomic_t),
84269 + .maxlen = sizeof(atomic_unchecked_t),
84270 .mode = 0644,
84271 .proc_handler = &read_reset_stat,
84272 },
84273 {
84274 .procname = "rdma_stat_sq_starve",
84275 .data = &rdma_stat_sq_starve,
84276 - .maxlen = sizeof(atomic_t),
84277 + .maxlen = sizeof(atomic_unchecked_t),
84278 .mode = 0644,
84279 .proc_handler = &read_reset_stat,
84280 },
84281 {
84282 .procname = "rdma_stat_rq_starve",
84283 .data = &rdma_stat_rq_starve,
84284 - .maxlen = sizeof(atomic_t),
84285 + .maxlen = sizeof(atomic_unchecked_t),
84286 .mode = 0644,
84287 .proc_handler = &read_reset_stat,
84288 },
84289 {
84290 .procname = "rdma_stat_rq_poll",
84291 .data = &rdma_stat_rq_poll,
84292 - .maxlen = sizeof(atomic_t),
84293 + .maxlen = sizeof(atomic_unchecked_t),
84294 .mode = 0644,
84295 .proc_handler = &read_reset_stat,
84296 },
84297 {
84298 .procname = "rdma_stat_rq_prod",
84299 .data = &rdma_stat_rq_prod,
84300 - .maxlen = sizeof(atomic_t),
84301 + .maxlen = sizeof(atomic_unchecked_t),
84302 .mode = 0644,
84303 .proc_handler = &read_reset_stat,
84304 },
84305 {
84306 .procname = "rdma_stat_sq_poll",
84307 .data = &rdma_stat_sq_poll,
84308 - .maxlen = sizeof(atomic_t),
84309 + .maxlen = sizeof(atomic_unchecked_t),
84310 .mode = 0644,
84311 .proc_handler = &read_reset_stat,
84312 },
84313 {
84314 .procname = "rdma_stat_sq_prod",
84315 .data = &rdma_stat_sq_prod,
84316 - .maxlen = sizeof(atomic_t),
84317 + .maxlen = sizeof(atomic_unchecked_t),
84318 .mode = 0644,
84319 .proc_handler = &read_reset_stat,
84320 },
84321 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
84322 index 9e88438..8ed5cf0 100644
84323 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
84324 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
84325 @@ -495,7 +495,7 @@ next_sge:
84326 svc_rdma_put_context(ctxt, 0);
84327 goto out;
84328 }
84329 - atomic_inc(&rdma_stat_read);
84330 + atomic_inc_unchecked(&rdma_stat_read);
84331
84332 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
84333 chl_map->ch[ch_no].count -= read_wr.num_sge;
84334 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
84335 dto_q);
84336 list_del_init(&ctxt->dto_q);
84337 } else {
84338 - atomic_inc(&rdma_stat_rq_starve);
84339 + atomic_inc_unchecked(&rdma_stat_rq_starve);
84340 clear_bit(XPT_DATA, &xprt->xpt_flags);
84341 ctxt = NULL;
84342 }
84343 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
84344 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
84345 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
84346 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
84347 - atomic_inc(&rdma_stat_recv);
84348 + atomic_inc_unchecked(&rdma_stat_recv);
84349
84350 /* Build up the XDR from the receive buffers. */
84351 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
84352 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
84353 index f11be72..7aad4e8 100644
84354 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
84355 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
84356 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
84357 write_wr.wr.rdma.remote_addr = to;
84358
84359 /* Post It */
84360 - atomic_inc(&rdma_stat_write);
84361 + atomic_inc_unchecked(&rdma_stat_write);
84362 if (svc_rdma_send(xprt, &write_wr))
84363 goto err;
84364 return 0;
84365 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
84366 index 3fa5751..030ba89 100644
84367 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
84368 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
84369 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
84370 return;
84371
84372 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
84373 - atomic_inc(&rdma_stat_rq_poll);
84374 + atomic_inc_unchecked(&rdma_stat_rq_poll);
84375
84376 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
84377 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
84378 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
84379 }
84380
84381 if (ctxt)
84382 - atomic_inc(&rdma_stat_rq_prod);
84383 + atomic_inc_unchecked(&rdma_stat_rq_prod);
84384
84385 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
84386 /*
84387 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
84388 return;
84389
84390 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
84391 - atomic_inc(&rdma_stat_sq_poll);
84392 + atomic_inc_unchecked(&rdma_stat_sq_poll);
84393 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
84394 if (wc.status != IB_WC_SUCCESS)
84395 /* Close the transport */
84396 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
84397 }
84398
84399 if (ctxt)
84400 - atomic_inc(&rdma_stat_sq_prod);
84401 + atomic_inc_unchecked(&rdma_stat_sq_prod);
84402 }
84403
84404 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
84405 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
84406 spin_lock_bh(&xprt->sc_lock);
84407 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
84408 spin_unlock_bh(&xprt->sc_lock);
84409 - atomic_inc(&rdma_stat_sq_starve);
84410 + atomic_inc_unchecked(&rdma_stat_sq_starve);
84411
84412 /* See if we can opportunistically reap SQ WR to make room */
84413 sq_cq_reap(xprt);
84414 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
84415 index 0b15d72..7934fbb 100644
84416 --- a/net/sysctl_net.c
84417 +++ b/net/sysctl_net.c
84418 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
84419 struct ctl_table *table)
84420 {
84421 /* Allow network administrator to have same access as root. */
84422 - if (capable(CAP_NET_ADMIN)) {
84423 + if (capable_nolog(CAP_NET_ADMIN)) {
84424 int mode = (table->mode >> 6) & 7;
84425 return (mode << 6) | (mode << 3) | mode;
84426 }
84427 diff --git a/net/tipc/link.c b/net/tipc/link.c
84428 index dd4c18b..f40d38d 100644
84429 --- a/net/tipc/link.c
84430 +++ b/net/tipc/link.c
84431 @@ -1418,7 +1418,7 @@ again:
84432
84433 if (!sect_rest) {
84434 sect_rest = msg_sect[++curr_sect].iov_len;
84435 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
84436 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
84437 }
84438
84439 if (sect_rest < fragm_rest)
84440 @@ -1437,7 +1437,7 @@ error:
84441 }
84442 } else
84443 skb_copy_to_linear_data_offset(buf, fragm_crs,
84444 - sect_crs, sz);
84445 + (const void __force_kernel *)sect_crs, sz);
84446 sect_crs += sz;
84447 sect_rest -= sz;
84448 fragm_crs += sz;
84449 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
84450 index 0747d8a..e8bf3f3 100644
84451 --- a/net/tipc/subscr.c
84452 +++ b/net/tipc/subscr.c
84453 @@ -104,7 +104,7 @@ static void subscr_send_event(struct subscription *sub,
84454 {
84455 struct iovec msg_sect;
84456
84457 - msg_sect.iov_base = (void *)&sub->evt;
84458 + msg_sect.iov_base = (void __force_user *)&sub->evt;
84459 msg_sect.iov_len = sizeof(struct tipc_event);
84460
84461 sub->evt.event = htohl(event, sub->swap);
84462 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
84463 index db8d51a..608692d 100644
84464 --- a/net/unix/af_unix.c
84465 +++ b/net/unix/af_unix.c
84466 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(struct net *net,
84467 err = -ECONNREFUSED;
84468 if (!S_ISSOCK(inode->i_mode))
84469 goto put_fail;
84470 +
84471 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
84472 + err = -EACCES;
84473 + goto put_fail;
84474 + }
84475 +
84476 u = unix_find_socket_byinode(net, inode);
84477 if (!u)
84478 goto put_fail;
84479 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(struct net *net,
84480 if (u) {
84481 struct dentry *dentry;
84482 dentry = unix_sk(u)->dentry;
84483 +
84484 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
84485 + err = -EPERM;
84486 + sock_put(u);
84487 + goto fail;
84488 + }
84489 +
84490 if (dentry)
84491 touch_atime(unix_sk(u)->mnt, dentry);
84492 } else
84493 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
84494 err = security_path_mknod(&nd.path, dentry, mode, 0);
84495 if (err)
84496 goto out_mknod_drop_write;
84497 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
84498 + err = -EACCES;
84499 + goto out_mknod_drop_write;
84500 + }
84501 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
84502 out_mknod_drop_write:
84503 mnt_drop_write(nd.path.mnt);
84504 if (err)
84505 goto out_mknod_dput;
84506 +
84507 + gr_handle_create(dentry, nd.path.mnt);
84508 +
84509 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
84510 dput(nd.path.dentry);
84511 nd.path.dentry = dentry;
84512 @@ -2211,7 +2231,11 @@ static int unix_seq_show(struct seq_file *seq, void *v)
84513 unix_state_lock(s);
84514
84515 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
84516 +#ifdef CONFIG_GRKERNSEC_HIDESYM
84517 + NULL,
84518 +#else
84519 s,
84520 +#endif
84521 atomic_read(&s->sk_refcnt),
84522 0,
84523 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
84524 diff --git a/net/wireless/core.h b/net/wireless/core.h
84525 index 376798f..109a61f 100644
84526 --- a/net/wireless/core.h
84527 +++ b/net/wireless/core.h
84528 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
84529 struct mutex mtx;
84530
84531 /* rfkill support */
84532 - struct rfkill_ops rfkill_ops;
84533 + rfkill_ops_no_const rfkill_ops;
84534 struct rfkill *rfkill;
84535 struct work_struct rfkill_sync;
84536
84537 diff --git a/net/wireless/wext.c b/net/wireless/wext.c
84538 index a2e4c60..0979cbe 100644
84539 --- a/net/wireless/wext.c
84540 +++ b/net/wireless/wext.c
84541 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
84542 */
84543
84544 /* Support for very large requests */
84545 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
84546 - (user_length > descr->max_tokens)) {
84547 + if (user_length > descr->max_tokens) {
84548 /* Allow userspace to GET more than max so
84549 * we can support any size GET requests.
84550 * There is still a limit : -ENOMEM.
84551 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
84552 }
84553 }
84554
84555 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
84556 - /*
84557 - * If this is a GET, but not NOMAX, it means that the extra
84558 - * data is not bounded by userspace, but by max_tokens. Thus
84559 - * set the length to max_tokens. This matches the extra data
84560 - * allocation.
84561 - * The driver should fill it with the number of tokens it
84562 - * provided, and it may check iwp->length rather than having
84563 - * knowledge of max_tokens. If the driver doesn't change the
84564 - * iwp->length, this ioctl just copies back max_token tokens
84565 - * filled with zeroes. Hopefully the driver isn't claiming
84566 - * them to be valid data.
84567 - */
84568 - iwp->length = descr->max_tokens;
84569 - }
84570 -
84571 err = handler(dev, info, (union iwreq_data *) iwp, extra);
84572
84573 iwp->length += essid_compat;
84574 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
84575 index cb81ca3..e15d49a 100644
84576 --- a/net/xfrm/xfrm_policy.c
84577 +++ b/net/xfrm/xfrm_policy.c
84578 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
84579 hlist_add_head(&policy->bydst, chain);
84580 xfrm_pol_hold(policy);
84581 net->xfrm.policy_count[dir]++;
84582 - atomic_inc(&flow_cache_genid);
84583 + atomic_inc_unchecked(&flow_cache_genid);
84584 if (delpol)
84585 __xfrm_policy_unlink(delpol, dir);
84586 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
84587 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
84588 write_unlock_bh(&xfrm_policy_lock);
84589
84590 if (ret && delete) {
84591 - atomic_inc(&flow_cache_genid);
84592 + atomic_inc_unchecked(&flow_cache_genid);
84593 xfrm_policy_kill(ret);
84594 }
84595 return ret;
84596 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id,
84597 write_unlock_bh(&xfrm_policy_lock);
84598
84599 if (ret && delete) {
84600 - atomic_inc(&flow_cache_genid);
84601 + atomic_inc_unchecked(&flow_cache_genid);
84602 xfrm_policy_kill(ret);
84603 }
84604 return ret;
84605 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
84606 }
84607
84608 }
84609 - atomic_inc(&flow_cache_genid);
84610 + atomic_inc_unchecked(&flow_cache_genid);
84611 out:
84612 write_unlock_bh(&xfrm_policy_lock);
84613 return err;
84614 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
84615 write_unlock_bh(&xfrm_policy_lock);
84616 if (pol) {
84617 if (dir < XFRM_POLICY_MAX)
84618 - atomic_inc(&flow_cache_genid);
84619 + atomic_inc_unchecked(&flow_cache_genid);
84620 xfrm_policy_kill(pol);
84621 return 0;
84622 }
84623 @@ -1477,7 +1477,7 @@ free_dst:
84624 goto out;
84625 }
84626
84627 -static int inline
84628 +static inline int
84629 xfrm_dst_alloc_copy(void **target, void *src, int size)
84630 {
84631 if (!*target) {
84632 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void *src, int size)
84633 return 0;
84634 }
84635
84636 -static int inline
84637 +static inline int
84638 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
84639 {
84640 #ifdef CONFIG_XFRM_SUB_POLICY
84641 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
84642 #endif
84643 }
84644
84645 -static int inline
84646 +static inline int
84647 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
84648 {
84649 #ifdef CONFIG_XFRM_SUB_POLICY
84650 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
84651 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
84652
84653 restart:
84654 - genid = atomic_read(&flow_cache_genid);
84655 + genid = atomic_read_unchecked(&flow_cache_genid);
84656 policy = NULL;
84657 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
84658 pols[pi] = NULL;
84659 @@ -1680,7 +1680,7 @@ restart:
84660 goto error;
84661 }
84662 if (nx == -EAGAIN ||
84663 - genid != atomic_read(&flow_cache_genid)) {
84664 + genid != atomic_read_unchecked(&flow_cache_genid)) {
84665 xfrm_pols_put(pols, npols);
84666 goto restart;
84667 }
84668 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
84669 index b95a2d6..85c4d78 100644
84670 --- a/net/xfrm/xfrm_user.c
84671 +++ b/net/xfrm/xfrm_user.c
84672 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
84673 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
84674 int i;
84675
84676 + pax_track_stack();
84677 +
84678 if (xp->xfrm_nr == 0)
84679 return 0;
84680
84681 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
84682 int err;
84683 int n = 0;
84684
84685 + pax_track_stack();
84686 +
84687 if (attrs[XFRMA_MIGRATE] == NULL)
84688 return -EINVAL;
84689
84690 diff --git a/samples/kobject/kset-example.c b/samples/kobject/kset-example.c
84691 index 45b7d56..19e828c 100644
84692 --- a/samples/kobject/kset-example.c
84693 +++ b/samples/kobject/kset-example.c
84694 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kobject *kobj,
84695 }
84696
84697 /* Our custom sysfs_ops that we will associate with our ktype later on */
84698 -static struct sysfs_ops foo_sysfs_ops = {
84699 +static const struct sysfs_ops foo_sysfs_ops = {
84700 .show = foo_attr_show,
84701 .store = foo_attr_store,
84702 };
84703 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
84704 index 341b589..405aed3 100644
84705 --- a/scripts/Makefile.build
84706 +++ b/scripts/Makefile.build
84707 @@ -59,7 +59,7 @@ endif
84708 endif
84709
84710 # Do not include host rules unless needed
84711 -ifneq ($(hostprogs-y)$(hostprogs-m),)
84712 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
84713 include scripts/Makefile.host
84714 endif
84715
84716 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
84717 index 6f89fbb..53adc9c 100644
84718 --- a/scripts/Makefile.clean
84719 +++ b/scripts/Makefile.clean
84720 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
84721 __clean-files := $(extra-y) $(always) \
84722 $(targets) $(clean-files) \
84723 $(host-progs) \
84724 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
84725 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
84726 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
84727
84728 # as clean-files is given relative to the current directory, this adds
84729 # a $(obj) prefix, except for absolute paths
84730 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
84731 index 1ac414f..a1c1451 100644
84732 --- a/scripts/Makefile.host
84733 +++ b/scripts/Makefile.host
84734 @@ -31,6 +31,7 @@
84735 # Note: Shared libraries consisting of C++ files are not supported
84736
84737 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
84738 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
84739
84740 # C code
84741 # Executables compiled from a single .c file
84742 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
84743 # Shared libaries (only .c supported)
84744 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
84745 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
84746 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
84747 # Remove .so files from "xxx-objs"
84748 host-cobjs := $(filter-out %.so,$(host-cobjs))
84749
84750 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
84751 index 6bf21f8..c0546b3 100644
84752 --- a/scripts/basic/fixdep.c
84753 +++ b/scripts/basic/fixdep.c
84754 @@ -162,7 +162,7 @@ static void grow_config(int len)
84755 /*
84756 * Lookup a value in the configuration string.
84757 */
84758 -static int is_defined_config(const char * name, int len)
84759 +static int is_defined_config(const char * name, unsigned int len)
84760 {
84761 const char * pconfig;
84762 const char * plast = str_config + len_config - len;
84763 @@ -199,7 +199,7 @@ static void clear_config(void)
84764 /*
84765 * Record the use of a CONFIG_* word.
84766 */
84767 -static void use_config(char *m, int slen)
84768 +static void use_config(char *m, unsigned int slen)
84769 {
84770 char s[PATH_MAX];
84771 char *p;
84772 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen)
84773
84774 static void parse_config_file(char *map, size_t len)
84775 {
84776 - int *end = (int *) (map + len);
84777 + unsigned int *end = (unsigned int *) (map + len);
84778 /* start at +1, so that p can never be < map */
84779 - int *m = (int *) map + 1;
84780 + unsigned int *m = (unsigned int *) map + 1;
84781 char *p, *q;
84782
84783 for (; m < end; m++) {
84784 @@ -371,7 +371,7 @@ static void print_deps(void)
84785 static void traps(void)
84786 {
84787 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
84788 - int *p = (int *)test;
84789 + unsigned int *p = (unsigned int *)test;
84790
84791 if (*p != INT_CONF) {
84792 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
84793 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
84794 new file mode 100644
84795 index 0000000..8729101
84796 --- /dev/null
84797 +++ b/scripts/gcc-plugin.sh
84798 @@ -0,0 +1,2 @@
84799 +#!/bin/sh
84800 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
84801 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
84802 index 62a9025..65b82ad 100644
84803 --- a/scripts/mod/file2alias.c
84804 +++ b/scripts/mod/file2alias.c
84805 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
84806 unsigned long size, unsigned long id_size,
84807 void *symval)
84808 {
84809 - int i;
84810 + unsigned int i;
84811
84812 if (size % id_size || size < id_size) {
84813 if (cross_build != 0)
84814 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
84815 /* USB is special because the bcdDevice can be matched against a numeric range */
84816 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
84817 static void do_usb_entry(struct usb_device_id *id,
84818 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
84819 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
84820 unsigned char range_lo, unsigned char range_hi,
84821 struct module *mod)
84822 {
84823 @@ -151,7 +151,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
84824 {
84825 unsigned int devlo, devhi;
84826 unsigned char chi, clo;
84827 - int ndigits;
84828 + unsigned int ndigits;
84829
84830 id->match_flags = TO_NATIVE(id->match_flags);
84831 id->idVendor = TO_NATIVE(id->idVendor);
84832 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
84833 for (i = 0; i < count; i++) {
84834 const char *id = (char *)devs[i].id;
84835 char acpi_id[sizeof(devs[0].id)];
84836 - int j;
84837 + unsigned int j;
84838
84839 buf_printf(&mod->dev_table_buf,
84840 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
84841 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
84842
84843 for (j = 0; j < PNP_MAX_DEVICES; j++) {
84844 const char *id = (char *)card->devs[j].id;
84845 - int i2, j2;
84846 + unsigned int i2, j2;
84847 int dup = 0;
84848
84849 if (!id[0])
84850 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
84851 /* add an individual alias for every device entry */
84852 if (!dup) {
84853 char acpi_id[sizeof(card->devs[0].id)];
84854 - int k;
84855 + unsigned int k;
84856
84857 buf_printf(&mod->dev_table_buf,
84858 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
84859 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, const char *s)
84860 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
84861 char *alias)
84862 {
84863 - int i, j;
84864 + unsigned int i, j;
84865
84866 sprintf(alias, "dmi*");
84867
84868 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
84869 index 03efeab..0888989 100644
84870 --- a/scripts/mod/modpost.c
84871 +++ b/scripts/mod/modpost.c
84872 @@ -835,6 +835,7 @@ enum mismatch {
84873 INIT_TO_EXIT,
84874 EXIT_TO_INIT,
84875 EXPORT_TO_INIT_EXIT,
84876 + DATA_TO_TEXT
84877 };
84878
84879 struct sectioncheck {
84880 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[] = {
84881 .fromsec = { "__ksymtab*", NULL },
84882 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
84883 .mismatch = EXPORT_TO_INIT_EXIT
84884 +},
84885 +/* Do not reference code from writable data */
84886 +{
84887 + .fromsec = { DATA_SECTIONS, NULL },
84888 + .tosec = { TEXT_SECTIONS, NULL },
84889 + .mismatch = DATA_TO_TEXT
84890 }
84891 };
84892
84893 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
84894 continue;
84895 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
84896 continue;
84897 - if (sym->st_value == addr)
84898 - return sym;
84899 /* Find a symbol nearby - addr are maybe negative */
84900 d = sym->st_value - addr;
84901 + if (d == 0)
84902 + return sym;
84903 if (d < 0)
84904 d = addr - sym->st_value;
84905 if (d < distance) {
84906 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const char *modname, enum mismatch mismatch,
84907 "Fix this by removing the %sannotation of %s "
84908 "or drop the export.\n",
84909 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
84910 + case DATA_TO_TEXT:
84911 +/*
84912 + fprintf(stderr,
84913 + "The variable %s references\n"
84914 + "the %s %s%s%s\n",
84915 + fromsym, to, sec2annotation(tosec), tosym, to_p);
84916 +*/
84917 + break;
84918 case NO_MISMATCH:
84919 /* To get warnings on missing members */
84920 break;
84921 @@ -1495,7 +1510,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
84922 static void check_sec_ref(struct module *mod, const char *modname,
84923 struct elf_info *elf)
84924 {
84925 - int i;
84926 + unsigned int i;
84927 Elf_Shdr *sechdrs = elf->sechdrs;
84928
84929 /* Walk through all sections */
84930 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
84931 va_end(ap);
84932 }
84933
84934 -void buf_write(struct buffer *buf, const char *s, int len)
84935 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
84936 {
84937 if (buf->size - buf->pos < len) {
84938 buf->size += len + SZ;
84939 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
84940 if (fstat(fileno(file), &st) < 0)
84941 goto close_write;
84942
84943 - if (st.st_size != b->pos)
84944 + if (st.st_size != (off_t)b->pos)
84945 goto close_write;
84946
84947 tmp = NOFAIL(malloc(b->pos));
84948 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
84949 index 09f58e3..4b66092 100644
84950 --- a/scripts/mod/modpost.h
84951 +++ b/scripts/mod/modpost.h
84952 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
84953
84954 struct buffer {
84955 char *p;
84956 - int pos;
84957 - int size;
84958 + unsigned int pos;
84959 + unsigned int size;
84960 };
84961
84962 void __attribute__((format(printf, 2, 3)))
84963 buf_printf(struct buffer *buf, const char *fmt, ...);
84964
84965 void
84966 -buf_write(struct buffer *buf, const char *s, int len);
84967 +buf_write(struct buffer *buf, const char *s, unsigned int len);
84968
84969 struct module {
84970 struct module *next;
84971 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
84972 index ecf9c7d..d52b38e 100644
84973 --- a/scripts/mod/sumversion.c
84974 +++ b/scripts/mod/sumversion.c
84975 @@ -455,7 +455,7 @@ static void write_version(const char *filename, const char *sum,
84976 goto out;
84977 }
84978
84979 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
84980 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
84981 warn("writing sum in %s failed: %s\n",
84982 filename, strerror(errno));
84983 goto out;
84984 diff --git a/scripts/package/mkspec b/scripts/package/mkspec
84985 index 47bdd2f..d4d4e93 100755
84986 --- a/scripts/package/mkspec
84987 +++ b/scripts/package/mkspec
84988 @@ -70,7 +70,7 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib/modules'
84989 echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
84990 echo "%endif"
84991
84992 -echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} KBUILD_SRC= modules_install'
84993 +echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
84994 echo "%ifarch ia64"
84995 echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
84996 echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
84997 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
84998 index 5c11312..72742b5 100644
84999 --- a/scripts/pnmtologo.c
85000 +++ b/scripts/pnmtologo.c
85001 @@ -237,14 +237,14 @@ static void write_header(void)
85002 fprintf(out, " * Linux logo %s\n", logoname);
85003 fputs(" */\n\n", out);
85004 fputs("#include <linux/linux_logo.h>\n\n", out);
85005 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
85006 + fprintf(out, "static unsigned char %s_data[] = {\n",
85007 logoname);
85008 }
85009
85010 static void write_footer(void)
85011 {
85012 fputs("\n};\n\n", out);
85013 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
85014 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
85015 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
85016 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
85017 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
85018 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
85019 fputs("\n};\n\n", out);
85020
85021 /* write logo clut */
85022 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
85023 + fprintf(out, "static unsigned char %s_clut[] = {\n",
85024 logoname);
85025 write_hex_cnt = 0;
85026 for (i = 0; i < logo_clutsize; i++) {
85027 diff --git a/scripts/tags.sh b/scripts/tags.sh
85028 index d52f7a0..269eb1b 100755
85029 --- a/scripts/tags.sh
85030 +++ b/scripts/tags.sh
85031 @@ -93,6 +93,11 @@ docscope()
85032 cscope -b -f cscope.out
85033 }
85034
85035 +dogtags()
85036 +{
85037 + all_sources | gtags -f -
85038 +}
85039 +
85040 exuberant()
85041 {
85042 all_sources | xargs $1 -a \
85043 @@ -164,6 +169,10 @@ case "$1" in
85044 docscope
85045 ;;
85046
85047 + "gtags")
85048 + dogtags
85049 + ;;
85050 +
85051 "tags")
85052 rm -f tags
85053 xtags ctags
85054 diff --git a/security/Kconfig b/security/Kconfig
85055 index fb363cd..886ace4 100644
85056 --- a/security/Kconfig
85057 +++ b/security/Kconfig
85058 @@ -4,6 +4,626 @@
85059
85060 menu "Security options"
85061
85062 +source grsecurity/Kconfig
85063 +
85064 +menu "PaX"
85065 +
85066 + config ARCH_TRACK_EXEC_LIMIT
85067 + bool
85068 +
85069 + config PAX_KERNEXEC_PLUGIN
85070 + bool
85071 +
85072 + config PAX_PER_CPU_PGD
85073 + bool
85074 +
85075 + config TASK_SIZE_MAX_SHIFT
85076 + int
85077 + depends on X86_64
85078 + default 47 if !PAX_PER_CPU_PGD
85079 + default 42 if PAX_PER_CPU_PGD
85080 +
85081 + config PAX_ENABLE_PAE
85082 + bool
85083 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
85084 +
85085 +config PAX
85086 + bool "Enable various PaX features"
85087 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
85088 + help
85089 + This allows you to enable various PaX features. PaX adds
85090 + intrusion prevention mechanisms to the kernel that reduce
85091 + the risks posed by exploitable memory corruption bugs.
85092 +
85093 +menu "PaX Control"
85094 + depends on PAX
85095 +
85096 +config PAX_SOFTMODE
85097 + bool 'Support soft mode'
85098 + help
85099 + Enabling this option will allow you to run PaX in soft mode, that
85100 + is, PaX features will not be enforced by default, only on executables
85101 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
85102 + support as they are the only way to mark executables for soft mode use.
85103 +
85104 + Soft mode can be activated by using the "pax_softmode=1" kernel command
85105 + line option on boot. Furthermore you can control various PaX features
85106 + at runtime via the entries in /proc/sys/kernel/pax.
85107 +
85108 +config PAX_EI_PAX
85109 + bool 'Use legacy ELF header marking'
85110 + help
85111 + Enabling this option will allow you to control PaX features on
85112 + a per executable basis via the 'chpax' utility available at
85113 + http://pax.grsecurity.net/. The control flags will be read from
85114 + an otherwise reserved part of the ELF header. This marking has
85115 + numerous drawbacks (no support for soft-mode, toolchain does not
85116 + know about the non-standard use of the ELF header) therefore it
85117 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
85118 + support.
85119 +
85120 + If you have applications not marked by the PT_PAX_FLAGS ELF program
85121 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
85122 + option otherwise they will not get any protection.
85123 +
85124 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
85125 + support as well, they will override the legacy EI_PAX marks.
85126 +
85127 +config PAX_PT_PAX_FLAGS
85128 + bool 'Use ELF program header marking'
85129 + help
85130 + Enabling this option will allow you to control PaX features on
85131 + a per executable basis via the 'paxctl' utility available at
85132 + http://pax.grsecurity.net/. The control flags will be read from
85133 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
85134 + has the benefits of supporting both soft mode and being fully
85135 + integrated into the toolchain (the binutils patch is available
85136 + from http://pax.grsecurity.net).
85137 +
85138 + If you have applications not marked by the PT_PAX_FLAGS ELF program
85139 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
85140 + support otherwise they will not get any protection.
85141 +
85142 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
85143 + must make sure that the marks are the same if a binary has both marks.
85144 +
85145 + Note that if you enable the legacy EI_PAX marking support as well,
85146 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
85147 +
85148 +config PAX_XATTR_PAX_FLAGS
85149 + bool 'Use filesystem extended attributes marking'
85150 + depends on EXPERT
85151 + select CIFS_XATTR if CIFS
85152 + select EXT2_FS_XATTR if EXT2_FS
85153 + select EXT3_FS_XATTR if EXT3_FS
85154 + select EXT4_FS_XATTR if EXT4_FS
85155 + select JFFS2_FS_XATTR if JFFS2_FS
85156 + select REISERFS_FS_XATTR if REISERFS_FS
85157 + select UBIFS_FS_XATTR if UBIFS_FS
85158 + help
85159 + Enabling this option will allow you to control PaX features on
85160 + a per executable basis via the 'setfattr' utility. The control
85161 + flags will be read from the user.pax.flags extended attribute of
85162 + the file. This marking has the benefit of supporting binary-only
85163 + applications that self-check themselves (e.g., skype) and would
85164 + not tolerate chpax/paxctl changes. The main drawback is that
85165 + extended attributes are not supported by some filesystems (e.g.,
85166 + isofs, squashfs, tmpfs, udf, vfat) so copying files through such
85167 + filesystems will lose the extended attributes and these PaX markings.
85168 +
85169 + If you have applications not marked by the PT_PAX_FLAGS ELF program
85170 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
85171 + support otherwise they will not get any protection.
85172 +
85173 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
85174 + must make sure that the marks are the same if a binary has both marks.
85175 +
85176 + Note that if you enable the legacy EI_PAX marking support as well,
85177 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
85178 +
85179 +choice
85180 + prompt 'MAC system integration'
85181 + default PAX_HAVE_ACL_FLAGS
85182 + help
85183 + Mandatory Access Control systems have the option of controlling
85184 + PaX flags on a per executable basis, choose the method supported
85185 + by your particular system.
85186 +
85187 + - "none": if your MAC system does not interact with PaX,
85188 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
85189 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
85190 +
85191 + NOTE: this option is for developers/integrators only.
85192 +
85193 + config PAX_NO_ACL_FLAGS
85194 + bool 'none'
85195 +
85196 + config PAX_HAVE_ACL_FLAGS
85197 + bool 'direct'
85198 +
85199 + config PAX_HOOK_ACL_FLAGS
85200 + bool 'hook'
85201 +endchoice
85202 +
85203 +endmenu
85204 +
85205 +menu "Non-executable pages"
85206 + depends on PAX
85207 +
85208 +config PAX_NOEXEC
85209 + bool "Enforce non-executable pages"
85210 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
85211 + help
85212 + By design some architectures do not allow for protecting memory
85213 + pages against execution or even if they do, Linux does not make
85214 + use of this feature. In practice this means that if a page is
85215 + readable (such as the stack or heap) it is also executable.
85216 +
85217 + There is a well known exploit technique that makes use of this
85218 + fact and a common programming mistake where an attacker can
85219 + introduce code of his choice somewhere in the attacked program's
85220 + memory (typically the stack or the heap) and then execute it.
85221 +
85222 + If the attacked program was running with different (typically
85223 + higher) privileges than that of the attacker, then he can elevate
85224 + his own privilege level (e.g. get a root shell, write to files for
85225 + which he does not have write access to, etc).
85226 +
85227 + Enabling this option will let you choose from various features
85228 + that prevent the injection and execution of 'foreign' code in
85229 + a program.
85230 +
85231 + This will also break programs that rely on the old behaviour and
85232 + expect that dynamically allocated memory via the malloc() family
85233 + of functions is executable (which it is not). Notable examples
85234 + are the XFree86 4.x server, the java runtime and wine.
85235 +
85236 +config PAX_PAGEEXEC
85237 + bool "Paging based non-executable pages"
85238 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
85239 + select S390_SWITCH_AMODE if S390
85240 + select S390_EXEC_PROTECT if S390
85241 + select ARCH_TRACK_EXEC_LIMIT if X86_32
85242 + help
85243 + This implementation is based on the paging feature of the CPU.
85244 + On i386 without hardware non-executable bit support there is a
85245 + variable but usually low performance impact, however on Intel's
85246 + P4 core based CPUs it is very high so you should not enable this
85247 + for kernels meant to be used on such CPUs.
85248 +
85249 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
85250 + with hardware non-executable bit support there is no performance
85251 + impact, on ppc the impact is negligible.
85252 +
85253 + Note that several architectures require various emulations due to
85254 + badly designed userland ABIs, this will cause a performance impact
85255 + but will disappear as soon as userland is fixed. For example, ppc
85256 + userland MUST have been built with secure-plt by a recent toolchain.
85257 +
85258 +config PAX_SEGMEXEC
85259 + bool "Segmentation based non-executable pages"
85260 + depends on PAX_NOEXEC && X86_32
85261 + help
85262 + This implementation is based on the segmentation feature of the
85263 + CPU and has a very small performance impact, however applications
85264 + will be limited to a 1.5 GB address space instead of the normal
85265 + 3 GB.
85266 +
85267 +config PAX_EMUTRAMP
85268 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
85269 + default y if PARISC
85270 + help
85271 + There are some programs and libraries that for one reason or
85272 + another attempt to execute special small code snippets from
85273 + non-executable memory pages. Most notable examples are the
85274 + signal handler return code generated by the kernel itself and
85275 + the GCC trampolines.
85276 +
85277 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
85278 + such programs will no longer work under your kernel.
85279 +
85280 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
85281 + utilities to enable trampoline emulation for the affected programs
85282 + yet still have the protection provided by the non-executable pages.
85283 +
85284 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
85285 + your system will not even boot.
85286 +
85287 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
85288 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
85289 + for the affected files.
85290 +
85291 + NOTE: enabling this feature *may* open up a loophole in the
85292 + protection provided by non-executable pages that an attacker
85293 + could abuse. Therefore the best solution is to not have any
85294 + files on your system that would require this option. This can
85295 + be achieved by not using libc5 (which relies on the kernel
85296 + signal handler return code) and not using or rewriting programs
85297 + that make use of the nested function implementation of GCC.
85298 + Skilled users can just fix GCC itself so that it implements
85299 + nested function calls in a way that does not interfere with PaX.
85300 +
85301 +config PAX_EMUSIGRT
85302 + bool "Automatically emulate sigreturn trampolines"
85303 + depends on PAX_EMUTRAMP && PARISC
85304 + default y
85305 + help
85306 + Enabling this option will have the kernel automatically detect
85307 + and emulate signal return trampolines executing on the stack
85308 + that would otherwise lead to task termination.
85309 +
85310 + This solution is intended as a temporary one for users with
85311 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
85312 + Modula-3 runtime, etc) or executables linked to such, basically
85313 + everything that does not specify its own SA_RESTORER function in
85314 + normal executable memory like glibc 2.1+ does.
85315 +
85316 + On parisc you MUST enable this option, otherwise your system will
85317 + not even boot.
85318 +
85319 + NOTE: this feature cannot be disabled on a per executable basis
85320 + and since it *does* open up a loophole in the protection provided
85321 + by non-executable pages, the best solution is to not have any
85322 + files on your system that would require this option.
85323 +
85324 +config PAX_MPROTECT
85325 + bool "Restrict mprotect()"
85326 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
85327 + help
85328 + Enabling this option will prevent programs from
85329 + - changing the executable status of memory pages that were
85330 + not originally created as executable,
85331 + - making read-only executable pages writable again,
85332 + - creating executable pages from anonymous memory,
85333 + - making read-only-after-relocations (RELRO) data pages writable again.
85334 +
85335 + You should say Y here to complete the protection provided by
85336 + the enforcement of non-executable pages.
85337 +
85338 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
85339 + this feature on a per file basis.
85340 +
85341 +config PAX_MPROTECT_COMPAT
85342 + bool "Use legacy/compat protection demoting (read help)"
85343 + depends on PAX_MPROTECT
85344 + default n
85345 + help
85346 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
85347 + by sending the proper error code to the application. For some broken
85348 + userland, this can cause problems with Python or other applications. The
85349 + current implementation however allows for applications like clamav to
85350 + detect if JIT compilation/execution is allowed and to fall back gracefully
85351 + to an interpreter-based mode if it does not. While we encourage everyone
85352 + to use the current implementation as-is and push upstream to fix broken
85353 + userland (note that the RWX logging option can assist with this), in some
85354 + environments this may not be possible. Having to disable MPROTECT
85355 + completely on certain binaries reduces the security benefit of PaX,
85356 + so this option is provided for those environments to revert to the old
85357 + behavior.
85358 +
85359 +config PAX_ELFRELOCS
85360 + bool "Allow ELF text relocations (read help)"
85361 + depends on PAX_MPROTECT
85362 + default n
85363 + help
85364 + Non-executable pages and mprotect() restrictions are effective
85365 + in preventing the introduction of new executable code into an
85366 + attacked task's address space. There remain only two venues
85367 + for this kind of attack: if the attacker can execute already
85368 + existing code in the attacked task then he can either have it
85369 + create and mmap() a file containing his code or have it mmap()
85370 + an already existing ELF library that does not have position
85371 + independent code in it and use mprotect() on it to make it
85372 + writable and copy his code there. While protecting against
85373 + the former approach is beyond PaX, the latter can be prevented
85374 + by having only PIC ELF libraries on one's system (which do not
85375 + need to relocate their code). If you are sure this is your case,
85376 + as is the case with all modern Linux distributions, then leave
85377 + this option disabled. You should say 'n' here.
85378 +
85379 +config PAX_ETEXECRELOCS
85380 + bool "Allow ELF ET_EXEC text relocations"
85381 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
85382 + select PAX_ELFRELOCS
85383 + default y
85384 + help
85385 + On some architectures there are incorrectly created applications
85386 + that require text relocations and would not work without enabling
85387 + this option. If you are an alpha, ia64 or parisc user, you should
85388 + enable this option and disable it once you have made sure that
85389 + none of your applications need it.
85390 +
85391 +config PAX_EMUPLT
85392 + bool "Automatically emulate ELF PLT"
85393 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
85394 + default y
85395 + help
85396 + Enabling this option will have the kernel automatically detect
85397 + and emulate the Procedure Linkage Table entries in ELF files.
85398 + On some architectures such entries are in writable memory, and
85399 + become non-executable leading to task termination. Therefore
85400 + it is mandatory that you enable this option on alpha, parisc,
85401 + sparc and sparc64, otherwise your system would not even boot.
85402 +
85403 + NOTE: this feature *does* open up a loophole in the protection
85404 + provided by the non-executable pages, therefore the proper
85405 + solution is to modify the toolchain to produce a PLT that does
85406 + not need to be writable.
85407 +
85408 +config PAX_DLRESOLVE
85409 + bool 'Emulate old glibc resolver stub'
85410 + depends on PAX_EMUPLT && SPARC
85411 + default n
85412 + help
85413 + This option is needed if userland has an old glibc (before 2.4)
85414 + that puts a 'save' instruction into the runtime generated resolver
85415 + stub that needs special emulation.
85416 +
85417 +config PAX_KERNEXEC
85418 + bool "Enforce non-executable kernel pages"
85419 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
85420 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
85421 + select PAX_KERNEXEC_PLUGIN if X86_64
85422 + help
85423 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
85424 + that is, enabling this option will make it harder to inject
85425 + and execute 'foreign' code in kernel memory itself.
85426 +
85427 + Note that on x86_64 kernels there is a known regression when
85428 + this feature and KVM/VMX are both enabled in the host kernel.
85429 +
85430 +choice
85431 + prompt "Return Address Instrumentation Method"
85432 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
85433 + depends on PAX_KERNEXEC_PLUGIN
85434 + help
85435 + Select the method used to instrument function pointer dereferences.
85436 + Note that binary modules cannot be instrumented by this approach.
85437 +
85438 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
85439 + bool "bts"
85440 + help
85441 + This method is compatible with binary only modules but has
85442 + a higher runtime overhead.
85443 +
85444 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
85445 + bool "or"
85446 + depends on !PARAVIRT
85447 + help
85448 + This method is incompatible with binary only modules but has
85449 + a lower runtime overhead.
85450 +endchoice
85451 +
85452 +config PAX_KERNEXEC_PLUGIN_METHOD
85453 + string
85454 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
85455 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
85456 + default ""
85457 +
85458 +config PAX_KERNEXEC_MODULE_TEXT
85459 + int "Minimum amount of memory reserved for module code"
85460 + default "4"
85461 + depends on PAX_KERNEXEC && X86_32 && MODULES
85462 + help
85463 + Due to implementation details the kernel must reserve a fixed
85464 + amount of memory for module code at compile time that cannot be
85465 + changed at runtime. Here you can specify the minimum amount
85466 + in MB that will be reserved. Due to the same implementation
85467 + details this size will always be rounded up to the next 2/4 MB
85468 + boundary (depends on PAE) so the actually available memory for
85469 + module code will usually be more than this minimum.
85470 +
85471 + The default 4 MB should be enough for most users but if you have
85472 + an excessive number of modules (e.g., most distribution configs
85473 + compile many drivers as modules) or use huge modules such as
85474 + nvidia's kernel driver, you will need to adjust this amount.
85475 + A good rule of thumb is to look at your currently loaded kernel
85476 + modules and add up their sizes.
85477 +
85478 +endmenu
85479 +
85480 +menu "Address Space Layout Randomization"
85481 + depends on PAX
85482 +
85483 +config PAX_ASLR
85484 + bool "Address Space Layout Randomization"
85485 + help
85486 + Many if not most exploit techniques rely on the knowledge of
85487 + certain addresses in the attacked program. The following options
85488 + will allow the kernel to apply a certain amount of randomization
85489 + to specific parts of the program thereby forcing an attacker to
85490 + guess them in most cases. Any failed guess will most likely crash
85491 + the attacked program which allows the kernel to detect such attempts
85492 + and react on them. PaX itself provides no reaction mechanisms,
85493 + instead it is strongly encouraged that you make use of Nergal's
85494 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
85495 + (http://www.grsecurity.net/) built-in crash detection features or
85496 + develop one yourself.
85497 +
85498 + By saying Y here you can choose to randomize the following areas:
85499 + - top of the task's kernel stack
85500 + - top of the task's userland stack
85501 + - base address for mmap() requests that do not specify one
85502 + (this includes all libraries)
85503 + - base address of the main executable
85504 +
85505 + It is strongly recommended to say Y here as address space layout
85506 + randomization has negligible impact on performance yet it provides
85507 + a very effective protection.
85508 +
85509 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
85510 + this feature on a per file basis.
85511 +
85512 +config PAX_RANDKSTACK
85513 + bool "Randomize kernel stack base"
85514 + depends on X86_TSC && X86
85515 + help
85516 + By saying Y here the kernel will randomize every task's kernel
85517 + stack on every system call. This will not only force an attacker
85518 + to guess it but also prevent him from making use of possible
85519 + leaked information about it.
85520 +
85521 + Since the kernel stack is a rather scarce resource, randomization
85522 + may cause unexpected stack overflows, therefore you should very
85523 + carefully test your system. Note that once enabled in the kernel
85524 + configuration, this feature cannot be disabled on a per file basis.
85525 +
85526 +config PAX_RANDUSTACK
85527 + bool "Randomize user stack base"
85528 + depends on PAX_ASLR
85529 + help
85530 + By saying Y here the kernel will randomize every task's userland
85531 + stack. The randomization is done in two steps where the second
85532 + one may apply a big amount of shift to the top of the stack and
85533 + cause problems for programs that want to use lots of memory (more
85534 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
85535 + For this reason the second step can be controlled by 'chpax' or
85536 + 'paxctl' on a per file basis.
85537 +
85538 +config PAX_RANDMMAP
85539 + bool "Randomize mmap() base"
85540 + depends on PAX_ASLR
85541 + help
85542 + By saying Y here the kernel will use a randomized base address for
85543 + mmap() requests that do not specify one themselves. As a result
85544 + all dynamically loaded libraries will appear at random addresses
85545 + and therefore be harder to exploit by a technique where an attacker
85546 + attempts to execute library code for his purposes (e.g. spawn a
85547 + shell from an exploited program that is running at an elevated
85548 + privilege level).
85549 +
85550 + Furthermore, if a program is relinked as a dynamic ELF file, its
85551 + base address will be randomized as well, completing the full
85552 + randomization of the address space layout. Attacking such programs
85553 + becomes a guess game. You can find an example of doing this at
85554 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
85555 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
85556 +
85557 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
85558 + feature on a per file basis.
85559 +
85560 +endmenu
85561 +
85562 +menu "Miscellaneous hardening features"
85563 +
85564 +config PAX_MEMORY_SANITIZE
85565 + bool "Sanitize all freed memory"
85566 + depends on !HIBERNATION
85567 + help
85568 + By saying Y here the kernel will erase memory pages as soon as they
85569 + are freed. This in turn reduces the lifetime of data stored in the
85570 + pages, making it less likely that sensitive information such as
85571 + passwords, cryptographic secrets, etc stay in memory for too long.
85572 +
85573 + This is especially useful for programs whose runtime is short, long
85574 + lived processes and the kernel itself benefit from this as long as
85575 + they operate on whole memory pages and ensure timely freeing of pages
85576 + that may hold sensitive information.
85577 +
85578 + The tradeoff is performance impact, on a single CPU system kernel
85579 + compilation sees a 3% slowdown, other systems and workloads may vary
85580 + and you are advised to test this feature on your expected workload
85581 + before deploying it.
85582 +
85583 + Note that this feature does not protect data stored in live pages,
85584 + e.g., process memory swapped to disk may stay there for a long time.
85585 +
85586 +config PAX_MEMORY_STACKLEAK
85587 + bool "Sanitize kernel stack"
85588 + depends on X86
85589 + help
85590 + By saying Y here the kernel will erase the kernel stack before it
85591 + returns from a system call. This in turn reduces the information
85592 + that a kernel stack leak bug can reveal.
85593 +
85594 + Note that such a bug can still leak information that was put on
85595 + the stack by the current system call (the one eventually triggering
85596 + the bug) but traces of earlier system calls on the kernel stack
85597 + cannot leak anymore.
85598 +
85599 + The tradeoff is performance impact, on a single CPU system kernel
85600 + compilation sees a 1% slowdown, other systems and workloads may vary
85601 + and you are advised to test this feature on your expected workload
85602 + before deploying it.
85603 +
85604 + Note: full support for this feature requires gcc with plugin support
85605 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
85606 + versions means that functions with large enough stack frames may
85607 + leave uninitialized memory behind that may be exposed to a later
85608 + syscall leaking the stack.
85609 +
85610 +config PAX_MEMORY_UDEREF
85611 + bool "Prevent invalid userland pointer dereference"
85612 + depends on X86 && !UML_X86 && !XEN
85613 + select PAX_PER_CPU_PGD if X86_64
85614 + help
85615 + By saying Y here the kernel will be prevented from dereferencing
85616 + userland pointers in contexts where the kernel expects only kernel
85617 + pointers. This is both a useful runtime debugging feature and a
85618 + security measure that prevents exploiting a class of kernel bugs.
85619 +
85620 + The tradeoff is that some virtualization solutions may experience
85621 + a huge slowdown and therefore you should not enable this feature
85622 + for kernels meant to run in such environments. Whether a given VM
85623 + solution is affected or not is best determined by simply trying it
85624 + out, the performance impact will be obvious right on boot as this
85625 + mechanism engages from very early on. A good rule of thumb is that
85626 + VMs running on CPUs without hardware virtualization support (i.e.,
85627 + the majority of IA-32 CPUs) will likely experience the slowdown.
85628 +
85629 +config PAX_REFCOUNT
85630 + bool "Prevent various kernel object reference counter overflows"
85631 + depends on GRKERNSEC && (X86 || SPARC64)
85632 + help
85633 + By saying Y here the kernel will detect and prevent overflowing
85634 + various (but not all) kinds of object reference counters. Such
85635 + overflows can normally occur due to bugs only and are often, if
85636 + not always, exploitable.
85637 +
85638 + The tradeoff is that data structures protected by an overflowed
85639 + refcount will never be freed and therefore will leak memory. Note
85640 + that this leak also happens even without this protection but in
85641 + that case the overflow can eventually trigger the freeing of the
85642 + data structure while it is still being used elsewhere, resulting
85643 + in the exploitable situation that this feature prevents.
85644 +
85645 + Since this has a negligible performance impact, you should enable
85646 + this feature.
85647 +
85648 +config PAX_USERCOPY
85649 + bool "Harden heap object copies between kernel and userland"
85650 + depends on X86 || PPC || SPARC || ARM
85651 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
85652 + help
85653 + By saying Y here the kernel will enforce the size of heap objects
85654 + when they are copied in either direction between the kernel and
85655 + userland, even if only a part of the heap object is copied.
85656 +
85657 + Specifically, this checking prevents information leaking from the
85658 + kernel heap during kernel to userland copies (if the kernel heap
85659 + object is otherwise fully initialized) and prevents kernel heap
85660 + overflows during userland to kernel copies.
85661 +
85662 + Note that the current implementation provides the strictest bounds
85663 + checks for the SLUB allocator.
85664 +
85665 + Enabling this option also enables per-slab cache protection against
85666 + data in a given cache being copied into/out of via userland
85667 + accessors. Though the whitelist of regions will be reduced over
85668 + time, it notably protects important data structures like task structs.
85669 +
85670 +
85671 + If frame pointers are enabled on x86, this option will also
85672 + restrict copies into and out of the kernel stack to local variables
85673 + within a single frame.
85674 +
85675 + Since this has a negligible performance impact, you should enable
85676 + this feature.
85677 +
85678 +endmenu
85679 +
85680 +endmenu
85681 +
85682 config KEYS
85683 bool "Enable access key retention support"
85684 help
85685 @@ -146,7 +766,7 @@ config INTEL_TXT
85686 config LSM_MMAP_MIN_ADDR
85687 int "Low address space for LSM to protect from user allocation"
85688 depends on SECURITY && SECURITY_SELINUX
85689 - default 65536
85690 + default 32768
85691 help
85692 This is the portion of low virtual memory which should be protected
85693 from userspace allocation. Keeping a user from writing to low pages
85694 diff --git a/security/capability.c b/security/capability.c
85695 index fce07a7..5f12858 100644
85696 --- a/security/capability.c
85697 +++ b/security/capability.c
85698 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *lsmrule)
85699 }
85700 #endif /* CONFIG_AUDIT */
85701
85702 -struct security_operations default_security_ops = {
85703 +struct security_operations default_security_ops __read_only = {
85704 .name = "default",
85705 };
85706
85707 diff --git a/security/commoncap.c b/security/commoncap.c
85708 index fe30751..aaba312 100644
85709 --- a/security/commoncap.c
85710 +++ b/security/commoncap.c
85711 @@ -27,6 +27,8 @@
85712 #include <linux/sched.h>
85713 #include <linux/prctl.h>
85714 #include <linux/securebits.h>
85715 +#include <linux/syslog.h>
85716 +#include <net/sock.h>
85717
85718 /*
85719 * If a non-root user executes a setuid-root binary in
85720 @@ -50,9 +52,18 @@ static void warn_setuid_and_fcaps_mixed(char *fname)
85721 }
85722 }
85723
85724 +#ifdef CONFIG_NET
85725 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
85726 +#endif
85727 +
85728 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
85729 {
85730 +#ifdef CONFIG_NET
85731 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
85732 +#else
85733 NETLINK_CB(skb).eff_cap = current_cap();
85734 +#endif
85735 +
85736 return 0;
85737 }
85738
85739 @@ -582,6 +593,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
85740 {
85741 const struct cred *cred = current_cred();
85742
85743 + if (gr_acl_enable_at_secure())
85744 + return 1;
85745 +
85746 if (cred->uid != 0) {
85747 if (bprm->cap_effective)
85748 return 1;
85749 @@ -956,13 +970,18 @@ error:
85750 /**
85751 * cap_syslog - Determine whether syslog function is permitted
85752 * @type: Function requested
85753 + * @from_file: Whether this request came from an open file (i.e. /proc)
85754 *
85755 * Determine whether the current process is permitted to use a particular
85756 * syslog function, returning 0 if permission is granted, -ve if not.
85757 */
85758 -int cap_syslog(int type)
85759 +int cap_syslog(int type, bool from_file)
85760 {
85761 - if ((type != 3 && type != 10) && !capable(CAP_SYS_ADMIN))
85762 + /* /proc/kmsg can open be opened by CAP_SYS_ADMIN */
85763 + if (type != SYSLOG_ACTION_OPEN && from_file)
85764 + return 0;
85765 + if ((type != SYSLOG_ACTION_READ_ALL &&
85766 + type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
85767 return -EPERM;
85768 return 0;
85769 }
85770 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
85771 index 165eb53..b1db4eb 100644
85772 --- a/security/integrity/ima/ima.h
85773 +++ b/security/integrity/ima/ima.h
85774 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
85775 extern spinlock_t ima_queue_lock;
85776
85777 struct ima_h_table {
85778 - atomic_long_t len; /* number of stored measurements in the list */
85779 - atomic_long_t violations;
85780 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
85781 + atomic_long_unchecked_t violations;
85782 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
85783 };
85784 extern struct ima_h_table ima_htable;
85785 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
85786 index 852bf85..35d6df3 100644
85787 --- a/security/integrity/ima/ima_api.c
85788 +++ b/security/integrity/ima/ima_api.c
85789 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
85790 int result;
85791
85792 /* can overflow, only indicator */
85793 - atomic_long_inc(&ima_htable.violations);
85794 + atomic_long_inc_unchecked(&ima_htable.violations);
85795
85796 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
85797 if (!entry) {
85798 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
85799 index 0c72c9c..433e29b 100644
85800 --- a/security/integrity/ima/ima_fs.c
85801 +++ b/security/integrity/ima/ima_fs.c
85802 @@ -27,12 +27,12 @@
85803 static int valid_policy = 1;
85804 #define TMPBUFLEN 12
85805 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
85806 - loff_t *ppos, atomic_long_t *val)
85807 + loff_t *ppos, atomic_long_unchecked_t *val)
85808 {
85809 char tmpbuf[TMPBUFLEN];
85810 ssize_t len;
85811
85812 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
85813 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
85814 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
85815 }
85816
85817 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
85818 index e19316d..339f7ae 100644
85819 --- a/security/integrity/ima/ima_queue.c
85820 +++ b/security/integrity/ima/ima_queue.c
85821 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
85822 INIT_LIST_HEAD(&qe->later);
85823 list_add_tail_rcu(&qe->later, &ima_measurements);
85824
85825 - atomic_long_inc(&ima_htable.len);
85826 + atomic_long_inc_unchecked(&ima_htable.len);
85827 key = ima_hash_key(entry->digest);
85828 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
85829 return 0;
85830 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
85831 index e031952..c9a535d 100644
85832 --- a/security/keys/keyring.c
85833 +++ b/security/keys/keyring.c
85834 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
85835 ret = -EFAULT;
85836
85837 for (loop = 0; loop < klist->nkeys; loop++) {
85838 + key_serial_t serial;
85839 key = klist->keys[loop];
85840 + serial = key->serial;
85841
85842 tmp = sizeof(key_serial_t);
85843 if (tmp > buflen)
85844 tmp = buflen;
85845
85846 - if (copy_to_user(buffer,
85847 - &key->serial,
85848 - tmp) != 0)
85849 + if (copy_to_user(buffer, &serial, tmp))
85850 goto error;
85851
85852 buflen -= tmp;
85853 diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
85854 index 931cfda..e71808a 100644
85855 --- a/security/keys/process_keys.c
85856 +++ b/security/keys/process_keys.c
85857 @@ -208,7 +208,7 @@ static int install_process_keyring(void)
85858 ret = install_process_keyring_to_cred(new);
85859 if (ret < 0) {
85860 abort_creds(new);
85861 - return ret != -EEXIST ?: 0;
85862 + return ret != -EEXIST ? ret : 0;
85863 }
85864
85865 return commit_creds(new);
85866 diff --git a/security/min_addr.c b/security/min_addr.c
85867 index d9f9425..c28cef4 100644
85868 --- a/security/min_addr.c
85869 +++ b/security/min_addr.c
85870 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
85871 */
85872 static void update_mmap_min_addr(void)
85873 {
85874 +#ifndef SPARC
85875 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
85876 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
85877 mmap_min_addr = dac_mmap_min_addr;
85878 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
85879 #else
85880 mmap_min_addr = dac_mmap_min_addr;
85881 #endif
85882 +#endif
85883 }
85884
85885 /*
85886 diff --git a/security/root_plug.c b/security/root_plug.c
85887 index 2f7ffa6..0455400 100644
85888 --- a/security/root_plug.c
85889 +++ b/security/root_plug.c
85890 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security (struct linux_binprm *bprm)
85891 return 0;
85892 }
85893
85894 -static struct security_operations rootplug_security_ops = {
85895 +static struct security_operations rootplug_security_ops __read_only = {
85896 .bprm_check_security = rootplug_bprm_check_security,
85897 };
85898
85899 diff --git a/security/security.c b/security/security.c
85900 index c4c6732..7abf13b 100644
85901 --- a/security/security.c
85902 +++ b/security/security.c
85903 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1];
85904 extern struct security_operations default_security_ops;
85905 extern void security_fixup_ops(struct security_operations *ops);
85906
85907 -struct security_operations *security_ops; /* Initialized to NULL */
85908 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
85909
85910 static inline int verify(struct security_operations *ops)
85911 {
85912 @@ -106,7 +106,7 @@ int __init security_module_enable(struct security_operations *ops)
85913 * If there is already a security module registered with the kernel,
85914 * an error will be returned. Otherwise %0 is returned on success.
85915 */
85916 -int register_security(struct security_operations *ops)
85917 +int __init register_security(struct security_operations *ops)
85918 {
85919 if (verify(ops)) {
85920 printk(KERN_DEBUG "%s could not verify "
85921 @@ -199,9 +199,9 @@ int security_quota_on(struct dentry *dentry)
85922 return security_ops->quota_on(dentry);
85923 }
85924
85925 -int security_syslog(int type)
85926 +int security_syslog(int type, bool from_file)
85927 {
85928 - return security_ops->syslog(type);
85929 + return security_ops->syslog(type, from_file);
85930 }
85931
85932 int security_settime(struct timespec *ts, struct timezone *tz)
85933 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
85934 index a106754..ca3a589 100644
85935 --- a/security/selinux/hooks.c
85936 +++ b/security/selinux/hooks.c
85937 @@ -76,6 +76,7 @@
85938 #include <linux/selinux.h>
85939 #include <linux/mutex.h>
85940 #include <linux/posix-timers.h>
85941 +#include <linux/syslog.h>
85942
85943 #include "avc.h"
85944 #include "objsec.h"
85945 @@ -131,7 +132,7 @@ int selinux_enabled = 1;
85946 * Minimal support for a secondary security module,
85947 * just to allow the use of the capability module.
85948 */
85949 -static struct security_operations *secondary_ops;
85950 +static struct security_operations *secondary_ops __read_only;
85951
85952 /* Lists of inode and superblock security structures initialized
85953 before the policy was loaded. */
85954 @@ -2050,29 +2051,30 @@ static int selinux_quota_on(struct dentry *dentry)
85955 return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON);
85956 }
85957
85958 -static int selinux_syslog(int type)
85959 +static int selinux_syslog(int type, bool from_file)
85960 {
85961 int rc;
85962
85963 - rc = cap_syslog(type);
85964 + rc = cap_syslog(type, from_file);
85965 if (rc)
85966 return rc;
85967
85968 switch (type) {
85969 - case 3: /* Read last kernel messages */
85970 - case 10: /* Return size of the log buffer */
85971 + case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */
85972 + case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */
85973 rc = task_has_system(current, SYSTEM__SYSLOG_READ);
85974 break;
85975 - case 6: /* Disable logging to console */
85976 - case 7: /* Enable logging to console */
85977 - case 8: /* Set level of messages printed to console */
85978 + case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */
85979 + case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */
85980 + /* Set level of messages printed to console */
85981 + case SYSLOG_ACTION_CONSOLE_LEVEL:
85982 rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE);
85983 break;
85984 - case 0: /* Close log */
85985 - case 1: /* Open log */
85986 - case 2: /* Read from log */
85987 - case 4: /* Read/clear last kernel messages */
85988 - case 5: /* Clear ring buffer */
85989 + case SYSLOG_ACTION_CLOSE: /* Close log */
85990 + case SYSLOG_ACTION_OPEN: /* Open log */
85991 + case SYSLOG_ACTION_READ: /* Read from log */
85992 + case SYSLOG_ACTION_READ_CLEAR: /* Read/clear last kernel messages */
85993 + case SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
85994 default:
85995 rc = task_has_system(current, SYSTEM__SYSLOG_MOD);
85996 break;
85997 @@ -5457,7 +5459,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
85998
85999 #endif
86000
86001 -static struct security_operations selinux_ops = {
86002 +static struct security_operations selinux_ops __read_only = {
86003 .name = "selinux",
86004
86005 .ptrace_access_check = selinux_ptrace_access_check,
86006 @@ -5841,7 +5843,9 @@ int selinux_disable(void)
86007 avc_disable();
86008
86009 /* Reset security_ops to the secondary module, dummy or capability. */
86010 + pax_open_kernel();
86011 security_ops = secondary_ops;
86012 + pax_close_kernel();
86013
86014 /* Unregister netfilter hooks. */
86015 selinux_nf_ip_exit();
86016 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
86017 index 13128f9..c23c736 100644
86018 --- a/security/selinux/include/xfrm.h
86019 +++ b/security/selinux/include/xfrm.h
86020 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
86021
86022 static inline void selinux_xfrm_notify_policyload(void)
86023 {
86024 - atomic_inc(&flow_cache_genid);
86025 + atomic_inc_unchecked(&flow_cache_genid);
86026 }
86027 #else
86028 static inline int selinux_xfrm_enabled(void)
86029 diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
86030 index ff17820..d68084c 100644
86031 --- a/security/selinux/ss/services.c
86032 +++ b/security/selinux/ss/services.c
86033 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, size_t len)
86034 int rc = 0;
86035 struct policy_file file = { data, len }, *fp = &file;
86036
86037 + pax_track_stack();
86038 +
86039 if (!ss_initialized) {
86040 avtab_cache_init();
86041 if (policydb_read(&policydb, fp)) {
86042 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
86043 index c33b6bb..b51f19e 100644
86044 --- a/security/smack/smack_lsm.c
86045 +++ b/security/smack/smack_lsm.c
86046 @@ -157,12 +157,12 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
86047 *
86048 * Returns 0 on success, error code otherwise.
86049 */
86050 -static int smack_syslog(int type)
86051 +static int smack_syslog(int type, bool from_file)
86052 {
86053 int rc;
86054 char *sp = current_security();
86055
86056 - rc = cap_syslog(type);
86057 + rc = cap_syslog(type, from_file);
86058 if (rc != 0)
86059 return rc;
86060
86061 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
86062 return 0;
86063 }
86064
86065 -struct security_operations smack_ops = {
86066 +struct security_operations smack_ops __read_only = {
86067 .name = "smack",
86068
86069 .ptrace_access_check = smack_ptrace_access_check,
86070 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
86071 index 9548a09..9a5f384 100644
86072 --- a/security/tomoyo/tomoyo.c
86073 +++ b/security/tomoyo/tomoyo.c
86074 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct file *f, const struct cred *cred)
86075 * tomoyo_security_ops is a "struct security_operations" which is used for
86076 * registering TOMOYO.
86077 */
86078 -static struct security_operations tomoyo_security_ops = {
86079 +static struct security_operations tomoyo_security_ops __read_only = {
86080 .name = "tomoyo",
86081 .cred_alloc_blank = tomoyo_cred_alloc_blank,
86082 .cred_prepare = tomoyo_cred_prepare,
86083 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
86084 index 84bb07d..c2ab6b6 100644
86085 --- a/sound/aoa/codecs/onyx.c
86086 +++ b/sound/aoa/codecs/onyx.c
86087 @@ -53,7 +53,7 @@ struct onyx {
86088 spdif_locked:1,
86089 analog_locked:1,
86090 original_mute:2;
86091 - int open_count;
86092 + local_t open_count;
86093 struct codec_info *codec_info;
86094
86095 /* mutex serializes concurrent access to the device
86096 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_item *cii,
86097 struct onyx *onyx = cii->codec_data;
86098
86099 mutex_lock(&onyx->mutex);
86100 - onyx->open_count++;
86101 + local_inc(&onyx->open_count);
86102 mutex_unlock(&onyx->mutex);
86103
86104 return 0;
86105 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_item *cii,
86106 struct onyx *onyx = cii->codec_data;
86107
86108 mutex_lock(&onyx->mutex);
86109 - onyx->open_count--;
86110 - if (!onyx->open_count)
86111 + if (local_dec_and_test(&onyx->open_count))
86112 onyx->spdif_locked = onyx->analog_locked = 0;
86113 mutex_unlock(&onyx->mutex);
86114
86115 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
86116 index ffd2025..df062c9 100644
86117 --- a/sound/aoa/codecs/onyx.h
86118 +++ b/sound/aoa/codecs/onyx.h
86119 @@ -11,6 +11,7 @@
86120 #include <linux/i2c.h>
86121 #include <asm/pmac_low_i2c.h>
86122 #include <asm/prom.h>
86123 +#include <asm/local.h>
86124
86125 /* PCM3052 register definitions */
86126
86127 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
86128 index d9c9635..bc0a5a2 100644
86129 --- a/sound/core/oss/pcm_oss.c
86130 +++ b/sound/core/oss/pcm_oss.c
86131 @@ -1395,7 +1395,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
86132 }
86133 } else {
86134 tmp = snd_pcm_oss_write2(substream,
86135 - (const char __force *)buf,
86136 + (const char __force_kernel *)buf,
86137 runtime->oss.period_bytes, 0);
86138 if (tmp <= 0)
86139 goto err;
86140 @@ -1483,7 +1483,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
86141 xfer += tmp;
86142 runtime->oss.buffer_used -= tmp;
86143 } else {
86144 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
86145 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
86146 runtime->oss.period_bytes, 0);
86147 if (tmp <= 0)
86148 goto err;
86149 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
86150 index 038232d..7dd9e5c 100644
86151 --- a/sound/core/pcm_compat.c
86152 +++ b/sound/core/pcm_compat.c
86153 @@ -30,7 +30,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
86154 int err;
86155
86156 fs = snd_enter_user();
86157 - err = snd_pcm_delay(substream, &delay);
86158 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
86159 snd_leave_user(fs);
86160 if (err < 0)
86161 return err;
86162 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
86163 index e6d2d97..4843949 100644
86164 --- a/sound/core/pcm_native.c
86165 +++ b/sound/core/pcm_native.c
86166 @@ -2747,11 +2747,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
86167 switch (substream->stream) {
86168 case SNDRV_PCM_STREAM_PLAYBACK:
86169 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
86170 - (void __user *)arg);
86171 + (void __force_user *)arg);
86172 break;
86173 case SNDRV_PCM_STREAM_CAPTURE:
86174 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
86175 - (void __user *)arg);
86176 + (void __force_user *)arg);
86177 break;
86178 default:
86179 result = -EINVAL;
86180 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
86181 index 1f99767..14636533 100644
86182 --- a/sound/core/seq/seq_device.c
86183 +++ b/sound/core/seq/seq_device.c
86184 @@ -63,7 +63,7 @@ struct ops_list {
86185 int argsize; /* argument size */
86186
86187 /* operators */
86188 - struct snd_seq_dev_ops ops;
86189 + struct snd_seq_dev_ops *ops;
86190
86191 /* registred devices */
86192 struct list_head dev_list; /* list of devices */
86193 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
86194
86195 mutex_lock(&ops->reg_mutex);
86196 /* copy driver operators */
86197 - ops->ops = *entry;
86198 + ops->ops = entry;
86199 ops->driver |= DRIVER_LOADED;
86200 ops->argsize = argsize;
86201
86202 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
86203 dev->name, ops->id, ops->argsize, dev->argsize);
86204 return -EINVAL;
86205 }
86206 - if (ops->ops.init_device(dev) >= 0) {
86207 + if (ops->ops->init_device(dev) >= 0) {
86208 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
86209 ops->num_init_devices++;
86210 } else {
86211 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
86212 dev->name, ops->id, ops->argsize, dev->argsize);
86213 return -EINVAL;
86214 }
86215 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
86216 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
86217 dev->status = SNDRV_SEQ_DEVICE_FREE;
86218 dev->driver_data = NULL;
86219 ops->num_init_devices--;
86220 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
86221 index 9284829..ac8e8b2 100644
86222 --- a/sound/drivers/mts64.c
86223 +++ b/sound/drivers/mts64.c
86224 @@ -27,6 +27,7 @@
86225 #include <sound/initval.h>
86226 #include <sound/rawmidi.h>
86227 #include <sound/control.h>
86228 +#include <asm/local.h>
86229
86230 #define CARD_NAME "Miditerminal 4140"
86231 #define DRIVER_NAME "MTS64"
86232 @@ -65,7 +66,7 @@ struct mts64 {
86233 struct pardevice *pardev;
86234 int pardev_claimed;
86235
86236 - int open_count;
86237 + local_t open_count;
86238 int current_midi_output_port;
86239 int current_midi_input_port;
86240 u8 mode[MTS64_NUM_INPUT_PORTS];
86241 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
86242 {
86243 struct mts64 *mts = substream->rmidi->private_data;
86244
86245 - if (mts->open_count == 0) {
86246 + if (local_read(&mts->open_count) == 0) {
86247 /* We don't need a spinlock here, because this is just called
86248 if the device has not been opened before.
86249 So there aren't any IRQs from the device */
86250 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
86251
86252 msleep(50);
86253 }
86254 - ++(mts->open_count);
86255 + local_inc(&mts->open_count);
86256
86257 return 0;
86258 }
86259 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
86260 struct mts64 *mts = substream->rmidi->private_data;
86261 unsigned long flags;
86262
86263 - --(mts->open_count);
86264 - if (mts->open_count == 0) {
86265 + if (local_dec_return(&mts->open_count) == 0) {
86266 /* We need the spinlock_irqsave here because we can still
86267 have IRQs at this point */
86268 spin_lock_irqsave(&mts->lock, flags);
86269 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
86270
86271 msleep(500);
86272
86273 - } else if (mts->open_count < 0)
86274 - mts->open_count = 0;
86275 + } else if (local_read(&mts->open_count) < 0)
86276 + local_set(&mts->open_count, 0);
86277
86278 return 0;
86279 }
86280 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
86281 index 01997f2..cbc1195 100644
86282 --- a/sound/drivers/opl4/opl4_lib.c
86283 +++ b/sound/drivers/opl4/opl4_lib.c
86284 @@ -27,7 +27,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
86285 MODULE_DESCRIPTION("OPL4 driver");
86286 MODULE_LICENSE("GPL");
86287
86288 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
86289 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
86290 {
86291 int timeout = 10;
86292 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
86293 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
86294 index 60158e2..0a0cc1a 100644
86295 --- a/sound/drivers/portman2x4.c
86296 +++ b/sound/drivers/portman2x4.c
86297 @@ -46,6 +46,7 @@
86298 #include <sound/initval.h>
86299 #include <sound/rawmidi.h>
86300 #include <sound/control.h>
86301 +#include <asm/local.h>
86302
86303 #define CARD_NAME "Portman 2x4"
86304 #define DRIVER_NAME "portman"
86305 @@ -83,7 +84,7 @@ struct portman {
86306 struct pardevice *pardev;
86307 int pardev_claimed;
86308
86309 - int open_count;
86310 + local_t open_count;
86311 int mode[PORTMAN_NUM_INPUT_PORTS];
86312 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
86313 };
86314 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
86315 index 02f79d2..8691d43 100644
86316 --- a/sound/isa/cmi8330.c
86317 +++ b/sound/isa/cmi8330.c
86318 @@ -173,7 +173,7 @@ struct snd_cmi8330 {
86319
86320 struct snd_pcm *pcm;
86321 struct snd_cmi8330_stream {
86322 - struct snd_pcm_ops ops;
86323 + snd_pcm_ops_no_const ops;
86324 snd_pcm_open_callback_t open;
86325 void *private_data; /* sb or wss */
86326 } streams[2];
86327 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
86328 index 733b014..56ce96f 100644
86329 --- a/sound/oss/sb_audio.c
86330 +++ b/sound/oss/sb_audio.c
86331 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
86332 buf16 = (signed short *)(localbuf + localoffs);
86333 while (c)
86334 {
86335 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
86336 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
86337 if (copy_from_user(lbuf8,
86338 userbuf+useroffs + p,
86339 locallen))
86340 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
86341 index 3136c88..28ad950 100644
86342 --- a/sound/oss/swarm_cs4297a.c
86343 +++ b/sound/oss/swarm_cs4297a.c
86344 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
86345 {
86346 struct cs4297a_state *s;
86347 u32 pwr, id;
86348 - mm_segment_t fs;
86349 int rval;
86350 #ifndef CONFIG_BCM_CS4297A_CSWARM
86351 u64 cfg;
86352 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
86353 if (!rval) {
86354 char *sb1250_duart_present;
86355
86356 +#if 0
86357 + mm_segment_t fs;
86358 fs = get_fs();
86359 set_fs(KERNEL_DS);
86360 -#if 0
86361 val = SOUND_MASK_LINE;
86362 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
86363 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
86364 val = initvol[i].vol;
86365 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
86366 }
86367 + set_fs(fs);
86368 // cs4297a_write_ac97(s, 0x18, 0x0808);
86369 #else
86370 // cs4297a_write_ac97(s, 0x5e, 0x180);
86371 cs4297a_write_ac97(s, 0x02, 0x0808);
86372 cs4297a_write_ac97(s, 0x18, 0x0808);
86373 #endif
86374 - set_fs(fs);
86375
86376 list_add(&s->list, &cs4297a_devs);
86377
86378 diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
86379 index 78288db..0406809 100644
86380 --- a/sound/pci/ac97/ac97_codec.c
86381 +++ b/sound/pci/ac97/ac97_codec.c
86382 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struct snd_device *device)
86383 }
86384
86385 /* build_ops to do nothing */
86386 -static struct snd_ac97_build_ops null_build_ops;
86387 +static const struct snd_ac97_build_ops null_build_ops;
86388
86389 #ifdef CONFIG_SND_AC97_POWER_SAVE
86390 static void do_update_power(struct work_struct *work)
86391 diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
86392 index eeb2e23..82bf625 100644
86393 --- a/sound/pci/ac97/ac97_patch.c
86394 +++ b/sound/pci/ac97/ac97_patch.c
86395 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spdif(struct snd_ac97 *ac97)
86396 return 0;
86397 }
86398
86399 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
86400 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
86401 .build_spdif = patch_yamaha_ymf743_build_spdif,
86402 .build_3d = patch_yamaha_ymf7x3_3d,
86403 };
86404 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdif(struct snd_ac97 * ac97)
86405 return 0;
86406 }
86407
86408 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
86409 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
86410 .build_3d = patch_yamaha_ymf7x3_3d,
86411 .build_post_spdif = patch_yamaha_ymf753_post_spdif
86412 };
86413 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific(struct snd_ac97 * ac97)
86414 return 0;
86415 }
86416
86417 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
86418 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
86419 .build_specific = patch_wolfson_wm9703_specific,
86420 };
86421
86422 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific(struct snd_ac97 * ac97)
86423 return 0;
86424 }
86425
86426 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
86427 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
86428 .build_specific = patch_wolfson_wm9704_specific,
86429 };
86430
86431 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific(struct snd_ac97 * ac97)
86432 return 0;
86433 }
86434
86435 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
86436 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
86437 .build_specific = patch_wolfson_wm9705_specific,
86438 };
86439
86440 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific(struct snd_ac97 * ac97)
86441 return 0;
86442 }
86443
86444 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
86445 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
86446 .build_specific = patch_wolfson_wm9711_specific,
86447 };
86448
86449 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume (struct snd_ac97 * ac97)
86450 }
86451 #endif
86452
86453 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
86454 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
86455 .build_specific = patch_wolfson_wm9713_specific,
86456 .build_3d = patch_wolfson_wm9713_3d,
86457 #ifdef CONFIG_PM
86458 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_specific(struct snd_ac97 * ac97)
86459 return 0;
86460 }
86461
86462 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
86463 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
86464 .build_3d = patch_sigmatel_stac9700_3d,
86465 .build_specific = patch_sigmatel_stac97xx_specific
86466 };
86467 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_specific(struct snd_ac97 *ac97)
86468 return patch_sigmatel_stac97xx_specific(ac97);
86469 }
86470
86471 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
86472 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
86473 .build_3d = patch_sigmatel_stac9708_3d,
86474 .build_specific = patch_sigmatel_stac9708_specific
86475 };
86476 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_specific(struct snd_ac97 *ac97)
86477 return 0;
86478 }
86479
86480 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
86481 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
86482 .build_3d = patch_sigmatel_stac9700_3d,
86483 .build_specific = patch_sigmatel_stac9758_specific
86484 };
86485 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(struct snd_ac97 * ac97)
86486 return 0;
86487 }
86488
86489 -static struct snd_ac97_build_ops patch_cirrus_ops = {
86490 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
86491 .build_spdif = patch_cirrus_build_spdif
86492 };
86493
86494 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(struct snd_ac97 * ac97)
86495 return 0;
86496 }
86497
86498 -static struct snd_ac97_build_ops patch_conexant_ops = {
86499 +static const struct snd_ac97_build_ops patch_conexant_ops = {
86500 .build_spdif = patch_conexant_build_spdif
86501 };
86502
86503 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct snd_ac97 * ac97, int unchained_idx, int
86504 }
86505 }
86506
86507 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
86508 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
86509 #ifdef CONFIG_PM
86510 .resume = ad18xx_resume
86511 #endif
86512 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct snd_ac97 * ac97)
86513 return 0;
86514 }
86515
86516 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
86517 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
86518 .build_specific = &patch_ad1885_specific,
86519 #ifdef CONFIG_PM
86520 .resume = ad18xx_resume
86521 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct snd_ac97 * ac97)
86522 return 0;
86523 }
86524
86525 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
86526 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
86527 .build_specific = &patch_ad1886_specific,
86528 #ifdef CONFIG_PM
86529 .resume = ad18xx_resume
86530 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct snd_ac97 * ac97)
86531 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
86532 }
86533
86534 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
86535 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
86536 .build_post_spdif = patch_ad198x_post_spdif,
86537 .build_specific = patch_ad1981a_specific,
86538 #ifdef CONFIG_PM
86539 @@ -1952,7 +1952,7 @@ static int patch_ad1981b_specific(struct snd_ac97 *ac97)
86540 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
86541 }
86542
86543 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
86544 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
86545 .build_post_spdif = patch_ad198x_post_spdif,
86546 .build_specific = patch_ad1981b_specific,
86547 #ifdef CONFIG_PM
86548 @@ -2091,7 +2091,7 @@ static int patch_ad1888_specific(struct snd_ac97 *ac97)
86549 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
86550 }
86551
86552 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
86553 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
86554 .build_post_spdif = patch_ad198x_post_spdif,
86555 .build_specific = patch_ad1888_specific,
86556 #ifdef CONFIG_PM
86557 @@ -2140,7 +2140,7 @@ static int patch_ad1980_specific(struct snd_ac97 *ac97)
86558 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
86559 }
86560
86561 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
86562 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
86563 .build_post_spdif = patch_ad198x_post_spdif,
86564 .build_specific = patch_ad1980_specific,
86565 #ifdef CONFIG_PM
86566 @@ -2255,7 +2255,7 @@ static int patch_ad1985_specific(struct snd_ac97 *ac97)
86567 ARRAY_SIZE(snd_ac97_ad1985_controls));
86568 }
86569
86570 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
86571 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
86572 .build_post_spdif = patch_ad198x_post_spdif,
86573 .build_specific = patch_ad1985_specific,
86574 #ifdef CONFIG_PM
86575 @@ -2547,7 +2547,7 @@ static int patch_ad1986_specific(struct snd_ac97 *ac97)
86576 ARRAY_SIZE(snd_ac97_ad1985_controls));
86577 }
86578
86579 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
86580 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
86581 .build_post_spdif = patch_ad198x_post_spdif,
86582 .build_specific = patch_ad1986_specific,
86583 #ifdef CONFIG_PM
86584 @@ -2652,7 +2652,7 @@ static int patch_alc650_specific(struct snd_ac97 * ac97)
86585 return 0;
86586 }
86587
86588 -static struct snd_ac97_build_ops patch_alc650_ops = {
86589 +static const struct snd_ac97_build_ops patch_alc650_ops = {
86590 .build_specific = patch_alc650_specific,
86591 .update_jacks = alc650_update_jacks
86592 };
86593 @@ -2804,7 +2804,7 @@ static int patch_alc655_specific(struct snd_ac97 * ac97)
86594 return 0;
86595 }
86596
86597 -static struct snd_ac97_build_ops patch_alc655_ops = {
86598 +static const struct snd_ac97_build_ops patch_alc655_ops = {
86599 .build_specific = patch_alc655_specific,
86600 .update_jacks = alc655_update_jacks
86601 };
86602 @@ -2916,7 +2916,7 @@ static int patch_alc850_specific(struct snd_ac97 *ac97)
86603 return 0;
86604 }
86605
86606 -static struct snd_ac97_build_ops patch_alc850_ops = {
86607 +static const struct snd_ac97_build_ops patch_alc850_ops = {
86608 .build_specific = patch_alc850_specific,
86609 .update_jacks = alc850_update_jacks
86610 };
86611 @@ -2978,7 +2978,7 @@ static int patch_cm9738_specific(struct snd_ac97 * ac97)
86612 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
86613 }
86614
86615 -static struct snd_ac97_build_ops patch_cm9738_ops = {
86616 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
86617 .build_specific = patch_cm9738_specific,
86618 .update_jacks = cm9738_update_jacks
86619 };
86620 @@ -3069,7 +3069,7 @@ static int patch_cm9739_post_spdif(struct snd_ac97 * ac97)
86621 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
86622 }
86623
86624 -static struct snd_ac97_build_ops patch_cm9739_ops = {
86625 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
86626 .build_specific = patch_cm9739_specific,
86627 .build_post_spdif = patch_cm9739_post_spdif,
86628 .update_jacks = cm9739_update_jacks
86629 @@ -3243,7 +3243,7 @@ static int patch_cm9761_specific(struct snd_ac97 * ac97)
86630 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
86631 }
86632
86633 -static struct snd_ac97_build_ops patch_cm9761_ops = {
86634 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
86635 .build_specific = patch_cm9761_specific,
86636 .build_post_spdif = patch_cm9761_post_spdif,
86637 .update_jacks = cm9761_update_jacks
86638 @@ -3339,7 +3339,7 @@ static int patch_cm9780_specific(struct snd_ac97 *ac97)
86639 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
86640 }
86641
86642 -static struct snd_ac97_build_ops patch_cm9780_ops = {
86643 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
86644 .build_specific = patch_cm9780_specific,
86645 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
86646 };
86647 @@ -3459,7 +3459,7 @@ static int patch_vt1616_specific(struct snd_ac97 * ac97)
86648 return 0;
86649 }
86650
86651 -static struct snd_ac97_build_ops patch_vt1616_ops = {
86652 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
86653 .build_specific = patch_vt1616_specific
86654 };
86655
86656 @@ -3813,7 +3813,7 @@ static int patch_it2646_specific(struct snd_ac97 * ac97)
86657 return 0;
86658 }
86659
86660 -static struct snd_ac97_build_ops patch_it2646_ops = {
86661 +static const struct snd_ac97_build_ops patch_it2646_ops = {
86662 .build_specific = patch_it2646_specific,
86663 .update_jacks = it2646_update_jacks
86664 };
86665 @@ -3847,7 +3847,7 @@ static int patch_si3036_specific(struct snd_ac97 * ac97)
86666 return 0;
86667 }
86668
86669 -static struct snd_ac97_build_ops patch_si3036_ops = {
86670 +static const struct snd_ac97_build_ops patch_si3036_ops = {
86671 .build_specific = patch_si3036_specific,
86672 };
86673
86674 @@ -3914,7 +3914,7 @@ static int patch_ucb1400_specific(struct snd_ac97 * ac97)
86675 return 0;
86676 }
86677
86678 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
86679 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
86680 .build_specific = patch_ucb1400_specific,
86681 };
86682
86683 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
86684 index 99552fb..4dcc2c5 100644
86685 --- a/sound/pci/hda/hda_codec.h
86686 +++ b/sound/pci/hda/hda_codec.h
86687 @@ -580,7 +580,7 @@ struct hda_bus_ops {
86688 /* notify power-up/down from codec to controller */
86689 void (*pm_notify)(struct hda_bus *bus);
86690 #endif
86691 -};
86692 +} __no_const;
86693
86694 /* template to pass to the bus constructor */
86695 struct hda_bus_template {
86696 @@ -675,6 +675,7 @@ struct hda_codec_ops {
86697 int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid);
86698 #endif
86699 };
86700 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
86701
86702 /* record for amp information cache */
86703 struct hda_cache_head {
86704 @@ -705,7 +706,7 @@ struct hda_pcm_ops {
86705 struct snd_pcm_substream *substream);
86706 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
86707 struct snd_pcm_substream *substream);
86708 -};
86709 +} __no_const;
86710
86711 /* PCM information for each substream */
86712 struct hda_pcm_stream {
86713 @@ -760,7 +761,7 @@ struct hda_codec {
86714 const char *modelname; /* model name for preset */
86715
86716 /* set by patch */
86717 - struct hda_codec_ops patch_ops;
86718 + hda_codec_ops_no_const patch_ops;
86719
86720 /* PCM to create, set by patch_ops.build_pcms callback */
86721 unsigned int num_pcms;
86722 diff --git a/sound/pci/hda/patch_atihdmi.c b/sound/pci/hda/patch_atihdmi.c
86723 index fb684f0..2b11cea 100644
86724 --- a/sound/pci/hda/patch_atihdmi.c
86725 +++ b/sound/pci/hda/patch_atihdmi.c
86726 @@ -177,7 +177,7 @@ static int patch_atihdmi(struct hda_codec *codec)
86727 */
86728 spec->multiout.dig_out_nid = CVT_NID;
86729
86730 - codec->patch_ops = atihdmi_patch_ops;
86731 + memcpy((void *)&codec->patch_ops, &atihdmi_patch_ops, sizeof(atihdmi_patch_ops));
86732
86733 return 0;
86734 }
86735 diff --git a/sound/pci/hda/patch_intelhdmi.c b/sound/pci/hda/patch_intelhdmi.c
86736 index 7c23016..c5bfdd7 100644
86737 --- a/sound/pci/hda/patch_intelhdmi.c
86738 +++ b/sound/pci/hda/patch_intelhdmi.c
86739 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
86740 cp_ready);
86741
86742 /* TODO */
86743 - if (cp_state)
86744 - ;
86745 - if (cp_ready)
86746 - ;
86747 + if (cp_state) {
86748 + }
86749 + if (cp_ready) {
86750 + }
86751 }
86752
86753
86754 @@ -656,7 +656,7 @@ static int do_patch_intel_hdmi(struct hda_codec *codec)
86755 spec->multiout.dig_out_nid = cvt_nid;
86756
86757 codec->spec = spec;
86758 - codec->patch_ops = intel_hdmi_patch_ops;
86759 + memcpy((void *)&codec->patch_ops, &intel_hdmi_patch_ops, sizeof(intel_hdmi_patch_ops));
86760
86761 snd_hda_eld_proc_new(codec, &spec->sink_eld);
86762
86763 diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
86764 index 6afdab0..68ed352 100644
86765 --- a/sound/pci/hda/patch_nvhdmi.c
86766 +++ b/sound/pci/hda/patch_nvhdmi.c
86767 @@ -367,7 +367,7 @@ static int patch_nvhdmi_8ch(struct hda_codec *codec)
86768 spec->multiout.max_channels = 8;
86769 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
86770
86771 - codec->patch_ops = nvhdmi_patch_ops_8ch;
86772 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_8ch, sizeof(nvhdmi_patch_ops_8ch));
86773
86774 return 0;
86775 }
86776 @@ -386,7 +386,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
86777 spec->multiout.max_channels = 2;
86778 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
86779
86780 - codec->patch_ops = nvhdmi_patch_ops_2ch;
86781 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_2ch, sizeof(nvhdmi_patch_ops_2ch));
86782
86783 return 0;
86784 }
86785 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
86786 index 2fcd70d..a143eaf 100644
86787 --- a/sound/pci/hda/patch_sigmatel.c
86788 +++ b/sound/pci/hda/patch_sigmatel.c
86789 @@ -5220,7 +5220,7 @@ again:
86790 snd_hda_codec_write_cache(codec, nid, 0,
86791 AC_VERB_SET_CONNECT_SEL, num_dacs);
86792
86793 - codec->patch_ops = stac92xx_patch_ops;
86794 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
86795
86796 codec->proc_widget_hook = stac92hd_proc_hook;
86797
86798 @@ -5294,7 +5294,7 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
86799 return -ENOMEM;
86800
86801 codec->spec = spec;
86802 - codec->patch_ops = stac92xx_patch_ops;
86803 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
86804 spec->num_pins = STAC92HD71BXX_NUM_PINS;
86805 switch (codec->vendor_id) {
86806 case 0x111d76b6:
86807 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
86808 index d063149..01599a4 100644
86809 --- a/sound/pci/ice1712/ice1712.h
86810 +++ b/sound/pci/ice1712/ice1712.h
86811 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
86812 unsigned int mask_flags; /* total mask bits */
86813 struct snd_akm4xxx_ops {
86814 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
86815 - } ops;
86816 + } __no_const ops;
86817 };
86818
86819 struct snd_ice1712_spdif {
86820 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
86821 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
86822 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
86823 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
86824 - } ops;
86825 + } __no_const ops;
86826 };
86827
86828
86829 diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
86830 index 9e7d12e..3e3bc64 100644
86831 --- a/sound/pci/intel8x0m.c
86832 +++ b/sound/pci/intel8x0m.c
86833 @@ -1264,7 +1264,7 @@ static struct shortname_table {
86834 { 0x5455, "ALi M5455" },
86835 { 0x746d, "AMD AMD8111" },
86836 #endif
86837 - { 0 },
86838 + { 0, },
86839 };
86840
86841 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
86842 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
86843 index 5518371..45cf7ac 100644
86844 --- a/sound/pci/ymfpci/ymfpci_main.c
86845 +++ b/sound/pci/ymfpci/ymfpci_main.c
86846 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
86847 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
86848 break;
86849 }
86850 - if (atomic_read(&chip->interrupt_sleep_count)) {
86851 - atomic_set(&chip->interrupt_sleep_count, 0);
86852 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
86853 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
86854 wake_up(&chip->interrupt_sleep);
86855 }
86856 __end:
86857 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
86858 continue;
86859 init_waitqueue_entry(&wait, current);
86860 add_wait_queue(&chip->interrupt_sleep, &wait);
86861 - atomic_inc(&chip->interrupt_sleep_count);
86862 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
86863 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
86864 remove_wait_queue(&chip->interrupt_sleep, &wait);
86865 }
86866 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
86867 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
86868 spin_unlock(&chip->reg_lock);
86869
86870 - if (atomic_read(&chip->interrupt_sleep_count)) {
86871 - atomic_set(&chip->interrupt_sleep_count, 0);
86872 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
86873 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
86874 wake_up(&chip->interrupt_sleep);
86875 }
86876 }
86877 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
86878 spin_lock_init(&chip->reg_lock);
86879 spin_lock_init(&chip->voice_lock);
86880 init_waitqueue_head(&chip->interrupt_sleep);
86881 - atomic_set(&chip->interrupt_sleep_count, 0);
86882 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
86883 chip->card = card;
86884 chip->pci = pci;
86885 chip->irq = -1;
86886 diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
86887 index 0a1b2f6..776bb19 100644
86888 --- a/sound/soc/soc-core.c
86889 +++ b/sound/soc/soc-core.c
86890 @@ -609,7 +609,7 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
86891 }
86892
86893 /* ASoC PCM operations */
86894 -static struct snd_pcm_ops soc_pcm_ops = {
86895 +static snd_pcm_ops_no_const soc_pcm_ops = {
86896 .open = soc_pcm_open,
86897 .close = soc_codec_close,
86898 .hw_params = soc_pcm_hw_params,
86899 diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
86900 index 79633ea..9732e90 100644
86901 --- a/sound/usb/usbaudio.c
86902 +++ b/sound/usb/usbaudio.c
86903 @@ -963,12 +963,12 @@ static int snd_usb_pcm_playback_trigger(struct snd_pcm_substream *substream,
86904 switch (cmd) {
86905 case SNDRV_PCM_TRIGGER_START:
86906 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
86907 - subs->ops.prepare = prepare_playback_urb;
86908 + *(void **)&subs->ops.prepare = prepare_playback_urb;
86909 return 0;
86910 case SNDRV_PCM_TRIGGER_STOP:
86911 return deactivate_urbs(subs, 0, 0);
86912 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
86913 - subs->ops.prepare = prepare_nodata_playback_urb;
86914 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
86915 return 0;
86916 default:
86917 return -EINVAL;
86918 @@ -985,15 +985,15 @@ static int snd_usb_pcm_capture_trigger(struct snd_pcm_substream *substream,
86919
86920 switch (cmd) {
86921 case SNDRV_PCM_TRIGGER_START:
86922 - subs->ops.retire = retire_capture_urb;
86923 + *(void **)&subs->ops.retire = retire_capture_urb;
86924 return start_urbs(subs, substream->runtime);
86925 case SNDRV_PCM_TRIGGER_STOP:
86926 return deactivate_urbs(subs, 0, 0);
86927 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
86928 - subs->ops.retire = retire_paused_capture_urb;
86929 + *(void **)&subs->ops.retire = retire_paused_capture_urb;
86930 return 0;
86931 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
86932 - subs->ops.retire = retire_capture_urb;
86933 + *(void **)&subs->ops.retire = retire_capture_urb;
86934 return 0;
86935 default:
86936 return -EINVAL;
86937 @@ -1542,7 +1542,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
86938 /* for playback, submit the URBs now; otherwise, the first hwptr_done
86939 * updates for all URBs would happen at the same time when starting */
86940 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
86941 - subs->ops.prepare = prepare_nodata_playback_urb;
86942 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
86943 return start_urbs(subs, runtime);
86944 } else
86945 return 0;
86946 @@ -2228,14 +2228,14 @@ static void init_substream(struct snd_usb_stream *as, int stream, struct audiofo
86947 subs->direction = stream;
86948 subs->dev = as->chip->dev;
86949 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
86950 - subs->ops = audio_urb_ops[stream];
86951 + memcpy((void *)&subs->ops, &audio_urb_ops[stream], sizeof(subs->ops));
86952 } else {
86953 - subs->ops = audio_urb_ops_high_speed[stream];
86954 + memcpy((void *)&subs->ops, &audio_urb_ops_high_speed[stream], sizeof(subs->ops));
86955 switch (as->chip->usb_id) {
86956 case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
86957 case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
86958 case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
86959 - subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
86960 + *(void **)&subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
86961 break;
86962 }
86963 }
86964 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
86965 new file mode 100644
86966 index 0000000..29b6b75
86967 --- /dev/null
86968 +++ b/tools/gcc/Makefile
86969 @@ -0,0 +1,21 @@
86970 +#CC := gcc
86971 +#PLUGIN_SOURCE_FILES := pax_plugin.c
86972 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
86973 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
86974 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
86975 +
86976 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu99
86977 +
86978 +hostlibs-y := constify_plugin.so
86979 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
86980 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
86981 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
86982 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
86983 +
86984 +always := $(hostlibs-y)
86985 +
86986 +constify_plugin-objs := constify_plugin.o
86987 +stackleak_plugin-objs := stackleak_plugin.o
86988 +kallocstat_plugin-objs := kallocstat_plugin.o
86989 +kernexec_plugin-objs := kernexec_plugin.o
86990 +checker_plugin-objs := checker_plugin.o
86991 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
86992 new file mode 100644
86993 index 0000000..d41b5af
86994 --- /dev/null
86995 +++ b/tools/gcc/checker_plugin.c
86996 @@ -0,0 +1,171 @@
86997 +/*
86998 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
86999 + * Licensed under the GPL v2
87000 + *
87001 + * Note: the choice of the license means that the compilation process is
87002 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
87003 + * but for the kernel it doesn't matter since it doesn't link against
87004 + * any of the gcc libraries
87005 + *
87006 + * gcc plugin to implement various sparse (source code checker) features
87007 + *
87008 + * TODO:
87009 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
87010 + *
87011 + * BUGS:
87012 + * - none known
87013 + */
87014 +#include "gcc-plugin.h"
87015 +#include "config.h"
87016 +#include "system.h"
87017 +#include "coretypes.h"
87018 +#include "tree.h"
87019 +#include "tree-pass.h"
87020 +#include "flags.h"
87021 +#include "intl.h"
87022 +#include "toplev.h"
87023 +#include "plugin.h"
87024 +//#include "expr.h" where are you...
87025 +#include "diagnostic.h"
87026 +#include "plugin-version.h"
87027 +#include "tm.h"
87028 +#include "function.h"
87029 +#include "basic-block.h"
87030 +#include "gimple.h"
87031 +#include "rtl.h"
87032 +#include "emit-rtl.h"
87033 +#include "tree-flow.h"
87034 +#include "target.h"
87035 +
87036 +extern void c_register_addr_space (const char *str, addr_space_t as);
87037 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
87038 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
87039 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
87040 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
87041 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
87042 +
87043 +extern void print_gimple_stmt(FILE *, gimple, int, int);
87044 +extern rtx emit_move_insn(rtx x, rtx y);
87045 +
87046 +int plugin_is_GPL_compatible;
87047 +
87048 +static struct plugin_info checker_plugin_info = {
87049 + .version = "201111150100",
87050 +};
87051 +
87052 +#define ADDR_SPACE_KERNEL 0
87053 +#define ADDR_SPACE_FORCE_KERNEL 1
87054 +#define ADDR_SPACE_USER 2
87055 +#define ADDR_SPACE_FORCE_USER 3
87056 +#define ADDR_SPACE_IOMEM 0
87057 +#define ADDR_SPACE_FORCE_IOMEM 0
87058 +#define ADDR_SPACE_PERCPU 0
87059 +#define ADDR_SPACE_FORCE_PERCPU 0
87060 +#define ADDR_SPACE_RCU 0
87061 +#define ADDR_SPACE_FORCE_RCU 0
87062 +
87063 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
87064 +{
87065 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
87066 +}
87067 +
87068 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
87069 +{
87070 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
87071 +}
87072 +
87073 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
87074 +{
87075 + return default_addr_space_valid_pointer_mode(mode, as);
87076 +}
87077 +
87078 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
87079 +{
87080 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
87081 +}
87082 +
87083 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
87084 +{
87085 + return default_addr_space_legitimize_address(x, oldx, mode, as);
87086 +}
87087 +
87088 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
87089 +{
87090 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
87091 + return true;
87092 +
87093 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
87094 + return true;
87095 +
87096 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
87097 + return true;
87098 +
87099 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
87100 + return true;
87101 +
87102 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
87103 + return true;
87104 +
87105 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
87106 + return true;
87107 +
87108 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
87109 + return true;
87110 +
87111 + return subset == superset;
87112 +}
87113 +
87114 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
87115 +{
87116 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
87117 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
87118 +
87119 + return op;
87120 +}
87121 +
87122 +static void register_checker_address_spaces(void *event_data, void *data)
87123 +{
87124 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
87125 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
87126 + c_register_addr_space("__user", ADDR_SPACE_USER);
87127 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
87128 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
87129 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
87130 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
87131 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
87132 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
87133 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
87134 +
87135 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
87136 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
87137 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
87138 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
87139 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
87140 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
87141 + targetm.addr_space.convert = checker_addr_space_convert;
87142 +}
87143 +
87144 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87145 +{
87146 + const char * const plugin_name = plugin_info->base_name;
87147 + const int argc = plugin_info->argc;
87148 + const struct plugin_argument * const argv = plugin_info->argv;
87149 + int i;
87150 +
87151 + if (!plugin_default_version_check(version, &gcc_version)) {
87152 + error(G_("incompatible gcc/plugin versions"));
87153 + return 1;
87154 + }
87155 +
87156 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
87157 +
87158 + for (i = 0; i < argc; ++i)
87159 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87160 +
87161 + if (TARGET_64BIT == 0)
87162 + return 0;
87163 +
87164 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
87165 +
87166 + return 0;
87167 +}
87168 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
87169 new file mode 100644
87170 index 0000000..704a564
87171 --- /dev/null
87172 +++ b/tools/gcc/constify_plugin.c
87173 @@ -0,0 +1,303 @@
87174 +/*
87175 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
87176 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
87177 + * Licensed under the GPL v2, or (at your option) v3
87178 + *
87179 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
87180 + *
87181 + * Homepage:
87182 + * http://www.grsecurity.net/~ephox/const_plugin/
87183 + *
87184 + * Usage:
87185 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
87186 + * $ gcc -fplugin=constify_plugin.so test.c -O2
87187 + */
87188 +
87189 +#include "gcc-plugin.h"
87190 +#include "config.h"
87191 +#include "system.h"
87192 +#include "coretypes.h"
87193 +#include "tree.h"
87194 +#include "tree-pass.h"
87195 +#include "flags.h"
87196 +#include "intl.h"
87197 +#include "toplev.h"
87198 +#include "plugin.h"
87199 +#include "diagnostic.h"
87200 +#include "plugin-version.h"
87201 +#include "tm.h"
87202 +#include "function.h"
87203 +#include "basic-block.h"
87204 +#include "gimple.h"
87205 +#include "rtl.h"
87206 +#include "emit-rtl.h"
87207 +#include "tree-flow.h"
87208 +
87209 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
87210 +
87211 +int plugin_is_GPL_compatible;
87212 +
87213 +static struct plugin_info const_plugin_info = {
87214 + .version = "201111150100",
87215 + .help = "no-constify\tturn off constification\n",
87216 +};
87217 +
87218 +static void constify_type(tree type);
87219 +static bool walk_struct(tree node);
87220 +
87221 +static tree deconstify_type(tree old_type)
87222 +{
87223 + tree new_type, field;
87224 +
87225 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
87226 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
87227 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
87228 + DECL_FIELD_CONTEXT(field) = new_type;
87229 + TYPE_READONLY(new_type) = 0;
87230 + C_TYPE_FIELDS_READONLY(new_type) = 0;
87231 + return new_type;
87232 +}
87233 +
87234 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
87235 +{
87236 + tree type;
87237 +
87238 + *no_add_attrs = true;
87239 + if (TREE_CODE(*node) == FUNCTION_DECL) {
87240 + error("%qE attribute does not apply to functions", name);
87241 + return NULL_TREE;
87242 + }
87243 +
87244 + if (TREE_CODE(*node) == VAR_DECL) {
87245 + error("%qE attribute does not apply to variables", name);
87246 + return NULL_TREE;
87247 + }
87248 +
87249 + if (TYPE_P(*node)) {
87250 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
87251 + *no_add_attrs = false;
87252 + else
87253 + error("%qE attribute applies to struct and union types only", name);
87254 + return NULL_TREE;
87255 + }
87256 +
87257 + type = TREE_TYPE(*node);
87258 +
87259 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
87260 + error("%qE attribute applies to struct and union types only", name);
87261 + return NULL_TREE;
87262 + }
87263 +
87264 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
87265 + error("%qE attribute is already applied to the type", name);
87266 + return NULL_TREE;
87267 + }
87268 +
87269 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
87270 + error("%qE attribute used on type that is not constified", name);
87271 + return NULL_TREE;
87272 + }
87273 +
87274 + if (TREE_CODE(*node) == TYPE_DECL) {
87275 + TREE_TYPE(*node) = deconstify_type(type);
87276 + TREE_READONLY(*node) = 0;
87277 + return NULL_TREE;
87278 + }
87279 +
87280 + return NULL_TREE;
87281 +}
87282 +
87283 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
87284 +{
87285 + *no_add_attrs = true;
87286 + if (!TYPE_P(*node)) {
87287 + error("%qE attribute applies to types only", name);
87288 + return NULL_TREE;
87289 + }
87290 +
87291 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
87292 + error("%qE attribute applies to struct and union types only", name);
87293 + return NULL_TREE;
87294 + }
87295 +
87296 + *no_add_attrs = false;
87297 + constify_type(*node);
87298 + return NULL_TREE;
87299 +}
87300 +
87301 +static struct attribute_spec no_const_attr = {
87302 + .name = "no_const",
87303 + .min_length = 0,
87304 + .max_length = 0,
87305 + .decl_required = false,
87306 + .type_required = false,
87307 + .function_type_required = false,
87308 + .handler = handle_no_const_attribute,
87309 +#if BUILDING_GCC_VERSION >= 4007
87310 + .affects_type_identity = true
87311 +#endif
87312 +};
87313 +
87314 +static struct attribute_spec do_const_attr = {
87315 + .name = "do_const",
87316 + .min_length = 0,
87317 + .max_length = 0,
87318 + .decl_required = false,
87319 + .type_required = false,
87320 + .function_type_required = false,
87321 + .handler = handle_do_const_attribute,
87322 +#if BUILDING_GCC_VERSION >= 4007
87323 + .affects_type_identity = true
87324 +#endif
87325 +};
87326 +
87327 +static void register_attributes(void *event_data, void *data)
87328 +{
87329 + register_attribute(&no_const_attr);
87330 + register_attribute(&do_const_attr);
87331 +}
87332 +
87333 +static void constify_type(tree type)
87334 +{
87335 + TYPE_READONLY(type) = 1;
87336 + C_TYPE_FIELDS_READONLY(type) = 1;
87337 +}
87338 +
87339 +static bool is_fptr(tree field)
87340 +{
87341 + tree ptr = TREE_TYPE(field);
87342 +
87343 + if (TREE_CODE(ptr) != POINTER_TYPE)
87344 + return false;
87345 +
87346 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
87347 +}
87348 +
87349 +static bool walk_struct(tree node)
87350 +{
87351 + tree field;
87352 +
87353 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
87354 + return false;
87355 +
87356 + if (TYPE_FIELDS(node) == NULL_TREE)
87357 + return false;
87358 +
87359 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
87360 + tree type = TREE_TYPE(field);
87361 + enum tree_code code = TREE_CODE(type);
87362 + if (code == RECORD_TYPE || code == UNION_TYPE) {
87363 + if (!(walk_struct(type)))
87364 + return false;
87365 + } else if (!is_fptr(field) && !TREE_READONLY(field))
87366 + return false;
87367 + }
87368 + return true;
87369 +}
87370 +
87371 +static void finish_type(void *event_data, void *data)
87372 +{
87373 + tree type = (tree)event_data;
87374 +
87375 + if (type == NULL_TREE)
87376 + return;
87377 +
87378 + if (TYPE_READONLY(type))
87379 + return;
87380 +
87381 + if (walk_struct(type))
87382 + constify_type(type);
87383 +}
87384 +
87385 +static unsigned int check_local_variables(void);
87386 +
87387 +struct gimple_opt_pass pass_local_variable = {
87388 + {
87389 + .type = GIMPLE_PASS,
87390 + .name = "check_local_variables",
87391 + .gate = NULL,
87392 + .execute = check_local_variables,
87393 + .sub = NULL,
87394 + .next = NULL,
87395 + .static_pass_number = 0,
87396 + .tv_id = TV_NONE,
87397 + .properties_required = 0,
87398 + .properties_provided = 0,
87399 + .properties_destroyed = 0,
87400 + .todo_flags_start = 0,
87401 + .todo_flags_finish = 0
87402 + }
87403 +};
87404 +
87405 +static unsigned int check_local_variables(void)
87406 +{
87407 + tree var;
87408 + referenced_var_iterator rvi;
87409 +
87410 +#if BUILDING_GCC_VERSION == 4005
87411 + FOR_EACH_REFERENCED_VAR(var, rvi) {
87412 +#else
87413 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
87414 +#endif
87415 + tree type = TREE_TYPE(var);
87416 +
87417 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
87418 + continue;
87419 +
87420 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
87421 + continue;
87422 +
87423 + if (!TYPE_READONLY(type))
87424 + continue;
87425 +
87426 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
87427 +// continue;
87428 +
87429 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
87430 +// continue;
87431 +
87432 + if (walk_struct(type)) {
87433 + error("constified variable %qE cannot be local", var);
87434 + return 1;
87435 + }
87436 + }
87437 + return 0;
87438 +}
87439 +
87440 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87441 +{
87442 + const char * const plugin_name = plugin_info->base_name;
87443 + const int argc = plugin_info->argc;
87444 + const struct plugin_argument * const argv = plugin_info->argv;
87445 + int i;
87446 + bool constify = true;
87447 +
87448 + struct register_pass_info local_variable_pass_info = {
87449 + .pass = &pass_local_variable.pass,
87450 + .reference_pass_name = "*referenced_vars",
87451 + .ref_pass_instance_number = 0,
87452 + .pos_op = PASS_POS_INSERT_AFTER
87453 + };
87454 +
87455 + if (!plugin_default_version_check(version, &gcc_version)) {
87456 + error(G_("incompatible gcc/plugin versions"));
87457 + return 1;
87458 + }
87459 +
87460 + for (i = 0; i < argc; ++i) {
87461 + if (!(strcmp(argv[i].key, "no-constify"))) {
87462 + constify = false;
87463 + continue;
87464 + }
87465 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87466 + }
87467 +
87468 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
87469 + if (constify) {
87470 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
87471 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
87472 + }
87473 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
87474 +
87475 + return 0;
87476 +}
87477 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
87478 new file mode 100644
87479 index 0000000..a5eabce
87480 --- /dev/null
87481 +++ b/tools/gcc/kallocstat_plugin.c
87482 @@ -0,0 +1,167 @@
87483 +/*
87484 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
87485 + * Licensed under the GPL v2
87486 + *
87487 + * Note: the choice of the license means that the compilation process is
87488 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
87489 + * but for the kernel it doesn't matter since it doesn't link against
87490 + * any of the gcc libraries
87491 + *
87492 + * gcc plugin to find the distribution of k*alloc sizes
87493 + *
87494 + * TODO:
87495 + *
87496 + * BUGS:
87497 + * - none known
87498 + */
87499 +#include "gcc-plugin.h"
87500 +#include "config.h"
87501 +#include "system.h"
87502 +#include "coretypes.h"
87503 +#include "tree.h"
87504 +#include "tree-pass.h"
87505 +#include "flags.h"
87506 +#include "intl.h"
87507 +#include "toplev.h"
87508 +#include "plugin.h"
87509 +//#include "expr.h" where are you...
87510 +#include "diagnostic.h"
87511 +#include "plugin-version.h"
87512 +#include "tm.h"
87513 +#include "function.h"
87514 +#include "basic-block.h"
87515 +#include "gimple.h"
87516 +#include "rtl.h"
87517 +#include "emit-rtl.h"
87518 +
87519 +extern void print_gimple_stmt(FILE *, gimple, int, int);
87520 +
87521 +int plugin_is_GPL_compatible;
87522 +
87523 +static const char * const kalloc_functions[] = {
87524 + "__kmalloc",
87525 + "kmalloc",
87526 + "kmalloc_large",
87527 + "kmalloc_node",
87528 + "kmalloc_order",
87529 + "kmalloc_order_trace",
87530 + "kmalloc_slab",
87531 + "kzalloc",
87532 + "kzalloc_node",
87533 +};
87534 +
87535 +static struct plugin_info kallocstat_plugin_info = {
87536 + .version = "201111150100",
87537 +};
87538 +
87539 +static unsigned int execute_kallocstat(void);
87540 +
87541 +static struct gimple_opt_pass kallocstat_pass = {
87542 + .pass = {
87543 + .type = GIMPLE_PASS,
87544 + .name = "kallocstat",
87545 + .gate = NULL,
87546 + .execute = execute_kallocstat,
87547 + .sub = NULL,
87548 + .next = NULL,
87549 + .static_pass_number = 0,
87550 + .tv_id = TV_NONE,
87551 + .properties_required = 0,
87552 + .properties_provided = 0,
87553 + .properties_destroyed = 0,
87554 + .todo_flags_start = 0,
87555 + .todo_flags_finish = 0
87556 + }
87557 +};
87558 +
87559 +static bool is_kalloc(const char *fnname)
87560 +{
87561 + size_t i;
87562 +
87563 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
87564 + if (!strcmp(fnname, kalloc_functions[i]))
87565 + return true;
87566 + return false;
87567 +}
87568 +
87569 +static unsigned int execute_kallocstat(void)
87570 +{
87571 + basic_block bb;
87572 +
87573 + // 1. loop through BBs and GIMPLE statements
87574 + FOR_EACH_BB(bb) {
87575 + gimple_stmt_iterator gsi;
87576 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
87577 + // gimple match:
87578 + tree fndecl, size;
87579 + gimple call_stmt;
87580 + const char *fnname;
87581 +
87582 + // is it a call
87583 + call_stmt = gsi_stmt(gsi);
87584 + if (!is_gimple_call(call_stmt))
87585 + continue;
87586 + fndecl = gimple_call_fndecl(call_stmt);
87587 + if (fndecl == NULL_TREE)
87588 + continue;
87589 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
87590 + continue;
87591 +
87592 + // is it a call to k*alloc
87593 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
87594 + if (!is_kalloc(fnname))
87595 + continue;
87596 +
87597 + // is the size arg the result of a simple const assignment
87598 + size = gimple_call_arg(call_stmt, 0);
87599 + while (true) {
87600 + gimple def_stmt;
87601 + expanded_location xloc;
87602 + size_t size_val;
87603 +
87604 + if (TREE_CODE(size) != SSA_NAME)
87605 + break;
87606 + def_stmt = SSA_NAME_DEF_STMT(size);
87607 + if (!def_stmt || !is_gimple_assign(def_stmt))
87608 + break;
87609 + if (gimple_num_ops(def_stmt) != 2)
87610 + break;
87611 + size = gimple_assign_rhs1(def_stmt);
87612 + if (!TREE_CONSTANT(size))
87613 + continue;
87614 + xloc = expand_location(gimple_location(def_stmt));
87615 + if (!xloc.file)
87616 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
87617 + size_val = TREE_INT_CST_LOW(size);
87618 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
87619 + break;
87620 + }
87621 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
87622 +//debug_tree(gimple_call_fn(call_stmt));
87623 +//print_node(stderr, "pax", fndecl, 4);
87624 + }
87625 + }
87626 +
87627 + return 0;
87628 +}
87629 +
87630 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87631 +{
87632 + const char * const plugin_name = plugin_info->base_name;
87633 + struct register_pass_info kallocstat_pass_info = {
87634 + .pass = &kallocstat_pass.pass,
87635 + .reference_pass_name = "ssa",
87636 + .ref_pass_instance_number = 0,
87637 + .pos_op = PASS_POS_INSERT_AFTER
87638 + };
87639 +
87640 + if (!plugin_default_version_check(version, &gcc_version)) {
87641 + error(G_("incompatible gcc/plugin versions"));
87642 + return 1;
87643 + }
87644 +
87645 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
87646 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
87647 +
87648 + return 0;
87649 +}
87650 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
87651 new file mode 100644
87652 index 0000000..008f159
87653 --- /dev/null
87654 +++ b/tools/gcc/kernexec_plugin.c
87655 @@ -0,0 +1,427 @@
87656 +/*
87657 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
87658 + * Licensed under the GPL v2
87659 + *
87660 + * Note: the choice of the license means that the compilation process is
87661 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
87662 + * but for the kernel it doesn't matter since it doesn't link against
87663 + * any of the gcc libraries
87664 + *
87665 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
87666 + *
87667 + * TODO:
87668 + *
87669 + * BUGS:
87670 + * - none known
87671 + */
87672 +#include "gcc-plugin.h"
87673 +#include "config.h"
87674 +#include "system.h"
87675 +#include "coretypes.h"
87676 +#include "tree.h"
87677 +#include "tree-pass.h"
87678 +#include "flags.h"
87679 +#include "intl.h"
87680 +#include "toplev.h"
87681 +#include "plugin.h"
87682 +//#include "expr.h" where are you...
87683 +#include "diagnostic.h"
87684 +#include "plugin-version.h"
87685 +#include "tm.h"
87686 +#include "function.h"
87687 +#include "basic-block.h"
87688 +#include "gimple.h"
87689 +#include "rtl.h"
87690 +#include "emit-rtl.h"
87691 +#include "tree-flow.h"
87692 +
87693 +extern void print_gimple_stmt(FILE *, gimple, int, int);
87694 +extern rtx emit_move_insn(rtx x, rtx y);
87695 +
87696 +int plugin_is_GPL_compatible;
87697 +
87698 +static struct plugin_info kernexec_plugin_info = {
87699 + .version = "201111291120",
87700 + .help = "method=[bts|or]\tinstrumentation method\n"
87701 +};
87702 +
87703 +static unsigned int execute_kernexec_reload(void);
87704 +static unsigned int execute_kernexec_fptr(void);
87705 +static unsigned int execute_kernexec_retaddr(void);
87706 +static bool kernexec_cmodel_check(void);
87707 +
87708 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
87709 +static void (*kernexec_instrument_retaddr)(rtx);
87710 +
87711 +static struct gimple_opt_pass kernexec_reload_pass = {
87712 + .pass = {
87713 + .type = GIMPLE_PASS,
87714 + .name = "kernexec_reload",
87715 + .gate = kernexec_cmodel_check,
87716 + .execute = execute_kernexec_reload,
87717 + .sub = NULL,
87718 + .next = NULL,
87719 + .static_pass_number = 0,
87720 + .tv_id = TV_NONE,
87721 + .properties_required = 0,
87722 + .properties_provided = 0,
87723 + .properties_destroyed = 0,
87724 + .todo_flags_start = 0,
87725 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
87726 + }
87727 +};
87728 +
87729 +static struct gimple_opt_pass kernexec_fptr_pass = {
87730 + .pass = {
87731 + .type = GIMPLE_PASS,
87732 + .name = "kernexec_fptr",
87733 + .gate = kernexec_cmodel_check,
87734 + .execute = execute_kernexec_fptr,
87735 + .sub = NULL,
87736 + .next = NULL,
87737 + .static_pass_number = 0,
87738 + .tv_id = TV_NONE,
87739 + .properties_required = 0,
87740 + .properties_provided = 0,
87741 + .properties_destroyed = 0,
87742 + .todo_flags_start = 0,
87743 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
87744 + }
87745 +};
87746 +
87747 +static struct rtl_opt_pass kernexec_retaddr_pass = {
87748 + .pass = {
87749 + .type = RTL_PASS,
87750 + .name = "kernexec_retaddr",
87751 + .gate = kernexec_cmodel_check,
87752 + .execute = execute_kernexec_retaddr,
87753 + .sub = NULL,
87754 + .next = NULL,
87755 + .static_pass_number = 0,
87756 + .tv_id = TV_NONE,
87757 + .properties_required = 0,
87758 + .properties_provided = 0,
87759 + .properties_destroyed = 0,
87760 + .todo_flags_start = 0,
87761 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
87762 + }
87763 +};
87764 +
87765 +static bool kernexec_cmodel_check(void)
87766 +{
87767 + tree section;
87768 +
87769 + if (ix86_cmodel != CM_KERNEL)
87770 + return false;
87771 +
87772 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
87773 + if (!section || !TREE_VALUE(section))
87774 + return true;
87775 +
87776 + section = TREE_VALUE(TREE_VALUE(section));
87777 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
87778 + return true;
87779 +
87780 + return false;
87781 +}
87782 +
87783 +/*
87784 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
87785 + */
87786 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
87787 +{
87788 + gimple asm_movabs_stmt;
87789 +
87790 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
87791 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
87792 + gimple_asm_set_volatile(asm_movabs_stmt, true);
87793 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
87794 + update_stmt(asm_movabs_stmt);
87795 +}
87796 +
87797 +/*
87798 + * find all asm() stmts that clobber r10 and add a reload of r10
87799 + */
87800 +static unsigned int execute_kernexec_reload(void)
87801 +{
87802 + basic_block bb;
87803 +
87804 + // 1. loop through BBs and GIMPLE statements
87805 + FOR_EACH_BB(bb) {
87806 + gimple_stmt_iterator gsi;
87807 +
87808 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
87809 + // gimple match: __asm__ ("" : : : "r10");
87810 + gimple asm_stmt;
87811 + size_t nclobbers;
87812 +
87813 + // is it an asm ...
87814 + asm_stmt = gsi_stmt(gsi);
87815 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
87816 + continue;
87817 +
87818 + // ... clobbering r10
87819 + nclobbers = gimple_asm_nclobbers(asm_stmt);
87820 + while (nclobbers--) {
87821 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
87822 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
87823 + continue;
87824 + kernexec_reload_fptr_mask(&gsi);
87825 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
87826 + break;
87827 + }
87828 + }
87829 + }
87830 +
87831 + return 0;
87832 +}
87833 +
87834 +/*
87835 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
87836 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
87837 + */
87838 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
87839 +{
87840 + gimple assign_intptr, assign_new_fptr, call_stmt;
87841 + tree intptr, old_fptr, new_fptr, kernexec_mask;
87842 +
87843 + call_stmt = gsi_stmt(*gsi);
87844 + old_fptr = gimple_call_fn(call_stmt);
87845 +
87846 + // create temporary unsigned long variable used for bitops and cast fptr to it
87847 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
87848 + add_referenced_var(intptr);
87849 + mark_sym_for_renaming(intptr);
87850 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
87851 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
87852 + update_stmt(assign_intptr);
87853 +
87854 + // apply logical or to temporary unsigned long and bitmask
87855 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
87856 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
87857 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
87858 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
87859 + update_stmt(assign_intptr);
87860 +
87861 + // cast temporary unsigned long back to a temporary fptr variable
87862 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
87863 + add_referenced_var(new_fptr);
87864 + mark_sym_for_renaming(new_fptr);
87865 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
87866 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
87867 + update_stmt(assign_new_fptr);
87868 +
87869 + // replace call stmt fn with the new fptr
87870 + gimple_call_set_fn(call_stmt, new_fptr);
87871 + update_stmt(call_stmt);
87872 +}
87873 +
87874 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
87875 +{
87876 + gimple asm_or_stmt, call_stmt;
87877 + tree old_fptr, new_fptr, input, output;
87878 + VEC(tree, gc) *inputs = NULL;
87879 + VEC(tree, gc) *outputs = NULL;
87880 +
87881 + call_stmt = gsi_stmt(*gsi);
87882 + old_fptr = gimple_call_fn(call_stmt);
87883 +
87884 + // create temporary fptr variable
87885 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
87886 + add_referenced_var(new_fptr);
87887 + mark_sym_for_renaming(new_fptr);
87888 +
87889 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
87890 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
87891 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
87892 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
87893 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
87894 + VEC_safe_push(tree, gc, inputs, input);
87895 + VEC_safe_push(tree, gc, outputs, output);
87896 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
87897 + gimple_asm_set_volatile(asm_or_stmt, true);
87898 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
87899 + update_stmt(asm_or_stmt);
87900 +
87901 + // replace call stmt fn with the new fptr
87902 + gimple_call_set_fn(call_stmt, new_fptr);
87903 + update_stmt(call_stmt);
87904 +}
87905 +
87906 +/*
87907 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
87908 + */
87909 +static unsigned int execute_kernexec_fptr(void)
87910 +{
87911 + basic_block bb;
87912 +
87913 + // 1. loop through BBs and GIMPLE statements
87914 + FOR_EACH_BB(bb) {
87915 + gimple_stmt_iterator gsi;
87916 +
87917 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
87918 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
87919 + tree fn;
87920 + gimple call_stmt;
87921 +
87922 + // is it a call ...
87923 + call_stmt = gsi_stmt(gsi);
87924 + if (!is_gimple_call(call_stmt))
87925 + continue;
87926 + fn = gimple_call_fn(call_stmt);
87927 + if (TREE_CODE(fn) == ADDR_EXPR)
87928 + continue;
87929 + if (TREE_CODE(fn) != SSA_NAME)
87930 + gcc_unreachable();
87931 +
87932 + // ... through a function pointer
87933 + fn = SSA_NAME_VAR(fn);
87934 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
87935 + continue;
87936 + fn = TREE_TYPE(fn);
87937 + if (TREE_CODE(fn) != POINTER_TYPE)
87938 + continue;
87939 + fn = TREE_TYPE(fn);
87940 + if (TREE_CODE(fn) != FUNCTION_TYPE)
87941 + continue;
87942 +
87943 + kernexec_instrument_fptr(&gsi);
87944 +
87945 +//debug_tree(gimple_call_fn(call_stmt));
87946 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
87947 + }
87948 + }
87949 +
87950 + return 0;
87951 +}
87952 +
87953 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
87954 +static void kernexec_instrument_retaddr_bts(rtx insn)
87955 +{
87956 + rtx btsq;
87957 + rtvec argvec, constraintvec, labelvec;
87958 + int line;
87959 +
87960 + // create asm volatile("btsq $63,(%%rsp)":::)
87961 + argvec = rtvec_alloc(0);
87962 + constraintvec = rtvec_alloc(0);
87963 + labelvec = rtvec_alloc(0);
87964 + line = expand_location(RTL_LOCATION(insn)).line;
87965 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
87966 + MEM_VOLATILE_P(btsq) = 1;
87967 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
87968 + emit_insn_before(btsq, insn);
87969 +}
87970 +
87971 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
87972 +static void kernexec_instrument_retaddr_or(rtx insn)
87973 +{
87974 + rtx orq;
87975 + rtvec argvec, constraintvec, labelvec;
87976 + int line;
87977 +
87978 + // create asm volatile("orq %%r10,(%%rsp)":::)
87979 + argvec = rtvec_alloc(0);
87980 + constraintvec = rtvec_alloc(0);
87981 + labelvec = rtvec_alloc(0);
87982 + line = expand_location(RTL_LOCATION(insn)).line;
87983 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
87984 + MEM_VOLATILE_P(orq) = 1;
87985 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
87986 + emit_insn_before(orq, insn);
87987 +}
87988 +
87989 +/*
87990 + * find all asm level function returns and forcibly set the highest bit of the return address
87991 + */
87992 +static unsigned int execute_kernexec_retaddr(void)
87993 +{
87994 + rtx insn;
87995 +
87996 + // 1. find function returns
87997 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
87998 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
87999 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
88000 + rtx body;
88001 +
88002 + // is it a retn
88003 + if (!JUMP_P(insn))
88004 + continue;
88005 + body = PATTERN(insn);
88006 + if (GET_CODE(body) == PARALLEL)
88007 + body = XVECEXP(body, 0, 0);
88008 + if (GET_CODE(body) != RETURN)
88009 + continue;
88010 + kernexec_instrument_retaddr(insn);
88011 + }
88012 +
88013 +// print_simple_rtl(stderr, get_insns());
88014 +// print_rtl(stderr, get_insns());
88015 +
88016 + return 0;
88017 +}
88018 +
88019 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
88020 +{
88021 + const char * const plugin_name = plugin_info->base_name;
88022 + const int argc = plugin_info->argc;
88023 + const struct plugin_argument * const argv = plugin_info->argv;
88024 + int i;
88025 + struct register_pass_info kernexec_reload_pass_info = {
88026 + .pass = &kernexec_reload_pass.pass,
88027 + .reference_pass_name = "ssa",
88028 + .ref_pass_instance_number = 0,
88029 + .pos_op = PASS_POS_INSERT_AFTER
88030 + };
88031 + struct register_pass_info kernexec_fptr_pass_info = {
88032 + .pass = &kernexec_fptr_pass.pass,
88033 + .reference_pass_name = "ssa",
88034 + .ref_pass_instance_number = 0,
88035 + .pos_op = PASS_POS_INSERT_AFTER
88036 + };
88037 + struct register_pass_info kernexec_retaddr_pass_info = {
88038 + .pass = &kernexec_retaddr_pass.pass,
88039 + .reference_pass_name = "pro_and_epilogue",
88040 + .ref_pass_instance_number = 0,
88041 + .pos_op = PASS_POS_INSERT_AFTER
88042 + };
88043 +
88044 + if (!plugin_default_version_check(version, &gcc_version)) {
88045 + error(G_("incompatible gcc/plugin versions"));
88046 + return 1;
88047 + }
88048 +
88049 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
88050 +
88051 + if (TARGET_64BIT == 0)
88052 + return 0;
88053 +
88054 + for (i = 0; i < argc; ++i) {
88055 + if (!strcmp(argv[i].key, "method")) {
88056 + if (!argv[i].value) {
88057 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88058 + continue;
88059 + }
88060 + if (!strcmp(argv[i].value, "bts")) {
88061 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
88062 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
88063 + } else if (!strcmp(argv[i].value, "or")) {
88064 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
88065 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
88066 + fix_register("r10", 1, 1);
88067 + } else
88068 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
88069 + continue;
88070 + }
88071 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88072 + }
88073 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
88074 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
88075 +
88076 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
88077 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
88078 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
88079 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
88080 +
88081 + return 0;
88082 +}
88083 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
88084 new file mode 100644
88085 index 0000000..8b61031
88086 --- /dev/null
88087 +++ b/tools/gcc/stackleak_plugin.c
88088 @@ -0,0 +1,295 @@
88089 +/*
88090 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
88091 + * Licensed under the GPL v2
88092 + *
88093 + * Note: the choice of the license means that the compilation process is
88094 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
88095 + * but for the kernel it doesn't matter since it doesn't link against
88096 + * any of the gcc libraries
88097 + *
88098 + * gcc plugin to help implement various PaX features
88099 + *
88100 + * - track lowest stack pointer
88101 + *
88102 + * TODO:
88103 + * - initialize all local variables
88104 + *
88105 + * BUGS:
88106 + * - none known
88107 + */
88108 +#include "gcc-plugin.h"
88109 +#include "config.h"
88110 +#include "system.h"
88111 +#include "coretypes.h"
88112 +#include "tree.h"
88113 +#include "tree-pass.h"
88114 +#include "flags.h"
88115 +#include "intl.h"
88116 +#include "toplev.h"
88117 +#include "plugin.h"
88118 +//#include "expr.h" where are you...
88119 +#include "diagnostic.h"
88120 +#include "plugin-version.h"
88121 +#include "tm.h"
88122 +#include "function.h"
88123 +#include "basic-block.h"
88124 +#include "gimple.h"
88125 +#include "rtl.h"
88126 +#include "emit-rtl.h"
88127 +
88128 +extern void print_gimple_stmt(FILE *, gimple, int, int);
88129 +
88130 +int plugin_is_GPL_compatible;
88131 +
88132 +static int track_frame_size = -1;
88133 +static const char track_function[] = "pax_track_stack";
88134 +static const char check_function[] = "pax_check_alloca";
88135 +static bool init_locals;
88136 +
88137 +static struct plugin_info stackleak_plugin_info = {
88138 + .version = "201111150100",
88139 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
88140 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
88141 +};
88142 +
88143 +static bool gate_stackleak_track_stack(void);
88144 +static unsigned int execute_stackleak_tree_instrument(void);
88145 +static unsigned int execute_stackleak_final(void);
88146 +
88147 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
88148 + .pass = {
88149 + .type = GIMPLE_PASS,
88150 + .name = "stackleak_tree_instrument",
88151 + .gate = gate_stackleak_track_stack,
88152 + .execute = execute_stackleak_tree_instrument,
88153 + .sub = NULL,
88154 + .next = NULL,
88155 + .static_pass_number = 0,
88156 + .tv_id = TV_NONE,
88157 + .properties_required = PROP_gimple_leh | PROP_cfg,
88158 + .properties_provided = 0,
88159 + .properties_destroyed = 0,
88160 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
88161 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
88162 + }
88163 +};
88164 +
88165 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
88166 + .pass = {
88167 + .type = RTL_PASS,
88168 + .name = "stackleak_final",
88169 + .gate = gate_stackleak_track_stack,
88170 + .execute = execute_stackleak_final,
88171 + .sub = NULL,
88172 + .next = NULL,
88173 + .static_pass_number = 0,
88174 + .tv_id = TV_NONE,
88175 + .properties_required = 0,
88176 + .properties_provided = 0,
88177 + .properties_destroyed = 0,
88178 + .todo_flags_start = 0,
88179 + .todo_flags_finish = TODO_dump_func
88180 + }
88181 +};
88182 +
88183 +static bool gate_stackleak_track_stack(void)
88184 +{
88185 + return track_frame_size >= 0;
88186 +}
88187 +
88188 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
88189 +{
88190 + gimple check_alloca;
88191 + tree fndecl, fntype, alloca_size;
88192 +
88193 + // insert call to void pax_check_alloca(unsigned long size)
88194 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
88195 + fndecl = build_fn_decl(check_function, fntype);
88196 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
88197 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
88198 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
88199 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
88200 +}
88201 +
88202 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
88203 +{
88204 + gimple track_stack;
88205 + tree fndecl, fntype;
88206 +
88207 + // insert call to void pax_track_stack(void)
88208 + fntype = build_function_type_list(void_type_node, NULL_TREE);
88209 + fndecl = build_fn_decl(track_function, fntype);
88210 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
88211 + track_stack = gimple_build_call(fndecl, 0);
88212 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
88213 +}
88214 +
88215 +#if BUILDING_GCC_VERSION == 4005
88216 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
88217 +{
88218 + tree fndecl;
88219 +
88220 + if (!is_gimple_call(stmt))
88221 + return false;
88222 + fndecl = gimple_call_fndecl(stmt);
88223 + if (!fndecl)
88224 + return false;
88225 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
88226 + return false;
88227 +// print_node(stderr, "pax", fndecl, 4);
88228 + return DECL_FUNCTION_CODE(fndecl) == code;
88229 +}
88230 +#endif
88231 +
88232 +static bool is_alloca(gimple stmt)
88233 +{
88234 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
88235 + return true;
88236 +
88237 +#if BUILDING_GCC_VERSION >= 4007
88238 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
88239 + return true;
88240 +#endif
88241 +
88242 + return false;
88243 +}
88244 +
88245 +static unsigned int execute_stackleak_tree_instrument(void)
88246 +{
88247 + basic_block bb, entry_bb;
88248 + bool prologue_instrumented = false;
88249 +
88250 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
88251 +
88252 + // 1. loop through BBs and GIMPLE statements
88253 + FOR_EACH_BB(bb) {
88254 + gimple_stmt_iterator gsi;
88255 +
88256 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
88257 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
88258 + if (!is_alloca(gsi_stmt(gsi)))
88259 + continue;
88260 +
88261 + // 2. insert stack overflow check before each __builtin_alloca call
88262 + stackleak_check_alloca(&gsi);
88263 +
88264 + // 3. insert track call after each __builtin_alloca call
88265 + stackleak_add_instrumentation(&gsi);
88266 + if (bb == entry_bb)
88267 + prologue_instrumented = true;
88268 + }
88269 + }
88270 +
88271 + // 4. insert track call at the beginning
88272 + if (!prologue_instrumented) {
88273 + gimple_stmt_iterator gsi;
88274 +
88275 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
88276 + if (dom_info_available_p(CDI_DOMINATORS))
88277 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
88278 + gsi = gsi_start_bb(bb);
88279 + stackleak_add_instrumentation(&gsi);
88280 + }
88281 +
88282 + return 0;
88283 +}
88284 +
88285 +static unsigned int execute_stackleak_final(void)
88286 +{
88287 + rtx insn;
88288 +
88289 + if (cfun->calls_alloca)
88290 + return 0;
88291 +
88292 + // keep calls only if function frame is big enough
88293 + if (get_frame_size() >= track_frame_size)
88294 + return 0;
88295 +
88296 + // 1. find pax_track_stack calls
88297 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
88298 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
88299 + rtx body;
88300 +
88301 + if (!CALL_P(insn))
88302 + continue;
88303 + body = PATTERN(insn);
88304 + if (GET_CODE(body) != CALL)
88305 + continue;
88306 + body = XEXP(body, 0);
88307 + if (GET_CODE(body) != MEM)
88308 + continue;
88309 + body = XEXP(body, 0);
88310 + if (GET_CODE(body) != SYMBOL_REF)
88311 + continue;
88312 + if (strcmp(XSTR(body, 0), track_function))
88313 + continue;
88314 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
88315 + // 2. delete call
88316 + insn = delete_insn_and_edges(insn);
88317 +#if BUILDING_GCC_VERSION >= 4007
88318 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
88319 + insn = delete_insn_and_edges(insn);
88320 +#endif
88321 + }
88322 +
88323 +// print_simple_rtl(stderr, get_insns());
88324 +// print_rtl(stderr, get_insns());
88325 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
88326 +
88327 + return 0;
88328 +}
88329 +
88330 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
88331 +{
88332 + const char * const plugin_name = plugin_info->base_name;
88333 + const int argc = plugin_info->argc;
88334 + const struct plugin_argument * const argv = plugin_info->argv;
88335 + int i;
88336 + struct register_pass_info stackleak_tree_instrument_pass_info = {
88337 + .pass = &stackleak_tree_instrument_pass.pass,
88338 +// .reference_pass_name = "tree_profile",
88339 + .reference_pass_name = "optimized",
88340 + .ref_pass_instance_number = 0,
88341 + .pos_op = PASS_POS_INSERT_AFTER
88342 + };
88343 + struct register_pass_info stackleak_final_pass_info = {
88344 + .pass = &stackleak_final_rtl_opt_pass.pass,
88345 + .reference_pass_name = "final",
88346 + .ref_pass_instance_number = 0,
88347 + .pos_op = PASS_POS_INSERT_BEFORE
88348 + };
88349 +
88350 + if (!plugin_default_version_check(version, &gcc_version)) {
88351 + error(G_("incompatible gcc/plugin versions"));
88352 + return 1;
88353 + }
88354 +
88355 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
88356 +
88357 + for (i = 0; i < argc; ++i) {
88358 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
88359 + if (!argv[i].value) {
88360 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88361 + continue;
88362 + }
88363 + track_frame_size = atoi(argv[i].value);
88364 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
88365 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
88366 + continue;
88367 + }
88368 + if (!strcmp(argv[i].key, "initialize-locals")) {
88369 + if (argv[i].value) {
88370 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
88371 + continue;
88372 + }
88373 + init_locals = true;
88374 + continue;
88375 + }
88376 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88377 + }
88378 +
88379 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
88380 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
88381 +
88382 + return 0;
88383 +}
88384 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
88385 index 83b3dde..835bee7 100644
88386 --- a/usr/gen_init_cpio.c
88387 +++ b/usr/gen_init_cpio.c
88388 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name, const char *location,
88389 int retval;
88390 int rc = -1;
88391 int namesize;
88392 - int i;
88393 + unsigned int i;
88394
88395 mode |= S_IFREG;
88396
88397 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_location)
88398 *env_var = *expanded = '\0';
88399 strncat(env_var, start + 2, end - start - 2);
88400 strncat(expanded, new_location, start - new_location);
88401 - strncat(expanded, getenv(env_var), PATH_MAX);
88402 - strncat(expanded, end + 1, PATH_MAX);
88403 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
88404 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
88405 strncpy(new_location, expanded, PATH_MAX);
88406 + new_location[PATH_MAX] = 0;
88407 } else
88408 break;
88409 }
88410 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
88411 index 4f3434f..159bc3e 100644
88412 --- a/virt/kvm/kvm_main.c
88413 +++ b/virt/kvm/kvm_main.c
88414 @@ -2494,7 +2494,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void)
88415 if (kvm_rebooting)
88416 /* spin while reset goes on */
88417 while (true)
88418 - ;
88419 + cpu_relax();
88420 /* Fault while not rebooting. We want the trace. */
88421 BUG();
88422 }
88423 @@ -2714,7 +2714,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
88424 kvm_arch_vcpu_put(vcpu);
88425 }
88426
88427 -int kvm_init(void *opaque, unsigned int vcpu_size,
88428 +int kvm_init(const void *opaque, unsigned int vcpu_size,
88429 struct module *module)
88430 {
88431 int r;
88432 @@ -2767,15 +2767,17 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
88433 /* A kmem cache lets us meet the alignment requirements of fx_save. */
88434 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
88435 __alignof__(struct kvm_vcpu),
88436 - 0, NULL);
88437 + SLAB_USERCOPY, NULL);
88438 if (!kvm_vcpu_cache) {
88439 r = -ENOMEM;
88440 goto out_free_5;
88441 }
88442
88443 - kvm_chardev_ops.owner = module;
88444 - kvm_vm_fops.owner = module;
88445 - kvm_vcpu_fops.owner = module;
88446 + pax_open_kernel();
88447 + *(void **)&kvm_chardev_ops.owner = module;
88448 + *(void **)&kvm_vm_fops.owner = module;
88449 + *(void **)&kvm_vcpu_fops.owner = module;
88450 + pax_close_kernel();
88451
88452 r = misc_register(&kvm_dev);
88453 if (r) {